_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q7400
|
BunningsProductSpider.parse_detail
|
train
|
def parse_detail(self, response):
"""Parse individual product's detail"""
# Product Information (a start)
product_data = {
'url': response.url,
'name': response.css('div.page-title h1::text').extract_first(),
}
# Inventory Number
inventory_number = re.search(
r'(?P<inv_num>\d+)$',
response.css('span.product-in::text').extract_first(),
).group('inv_num')
product_data.update({'in': inventory_number})
# Specifications (arbitrary key:value pairs)
specs_table = response.css('#tab-specs dl')
for row in specs_table.css('div.spec-row'):
keys = row.css('dt::text').extract()
values = row.css('dd::text').extract()
product_data.update({
key: value
for (key, value) in zip(keys, values)
})
self.logger.info(product_data['name'])
yield product_data
|
python
|
{
"resource": ""
}
|
q7401
|
_Stator.mate_top
|
train
|
def mate_top(self):
" top of the stator"
return Mate(self, CoordSystem(
origin=(0, 0, self.length/2),
xDir=(0, 1, 0),
normal=(0, 0, 1)
))
|
python
|
{
"resource": ""
}
|
q7402
|
_Stator.mate_bottom
|
train
|
def mate_bottom(self):
" bottom of the stator"
return Mate(self, CoordSystem(
origin=(0, 0, -self.length/2),
xDir=(1, 0, 0),
normal=(0, 0, -1)
))
|
python
|
{
"resource": ""
}
|
q7403
|
Stepper.mount_points
|
train
|
def mount_points(self):
" return mount points"
wp = cq.Workplane("XY")
h = wp.rect(self.hole_spacing,self.hole_spacing
,forConstruction=True).vertices()
return h.objects
|
python
|
{
"resource": ""
}
|
q7404
|
ParametricObject.class_param_names
|
train
|
def class_param_names(cls, hidden=True):
"""
Return the names of all class parameters.
:param hidden: if ``False``, excludes parameters with a ``_`` prefix.
:type hidden: :class:`bool`
:return: set of parameter names
:rtype: :class:`set`
"""
param_names = set(
k for (k, v) in cls.__dict__.items()
if isinstance(v, Parameter)
)
for parent in cls.__bases__:
if hasattr(parent, 'class_param_names'):
param_names |= parent.class_param_names(hidden=hidden)
if not hidden:
param_names = set(n for n in param_names if not n.startswith('_'))
return param_names
|
python
|
{
"resource": ""
}
|
q7405
|
ParametricObject.serialize_parameters
|
train
|
def serialize_parameters(self):
"""
Get the parameter data in its serialized form.
Data is serialized by each parameter's :meth:`Parameter.serialize`
implementation.
:return: serialized parameter data in the form: ``{<name>: <serial data>, ...}``
:rtype: :class:`dict`
"""
# Get parameter data
class_params = self.class_params()
instance_params = self.params()
# Serialize each parameter
serialized = {}
for name in class_params.keys():
param = class_params[name]
value = instance_params[name]
serialized[name] = param.serialize(value)
return serialized
|
python
|
{
"resource": ""
}
|
q7406
|
ParametricObject.deserialize
|
train
|
def deserialize(data):
"""
Create instance from serial data
"""
# Import module & get class
try:
module = import_module(data.get('class').get('module'))
cls = getattr(module, data.get('class').get('name'))
except ImportError:
raise ImportError("No module named: %r" % data.get('class').get('module'))
except AttributeError:
raise ImportError("module %r does not contain class %r" % (
data.get('class').get('module'),
data.get('class').get('name')
))
# Deserialize parameters
class_params = cls.class_params(hidden=True)
params = dict(
(name, class_params[name].deserialize(value))
for (name, value) in data.get('params').items()
)
# Instantiate new instance
return cls(**params)
|
python
|
{
"resource": ""
}
|
q7407
|
common_criteria
|
train
|
def common_criteria(**common):
"""
Wrap a function to always call with the given ``common`` named parameters.
:property common: criteria common to your function call
:return: decorator function
:rtype: :class:`function`
.. doctest::
>>> import cqparts
>>> from cqparts.search import register, search, find
>>> from cqparts.search import common_criteria
>>> # Somebody elses (boring) library may register with...
>>> @register(a='one', b='two')
... class BoringThing(cqparts.Part):
... pass
>>> # But your library is awesome; only registering with unique criteria...
>>> lib_criteria = {
... 'author': 'your_name',
... 'libname': 'awesome_things',
... }
>>> awesome_register = common_criteria(**lib_criteria)(register)
>>> @awesome_register(a='one', b='two') # identical to BoringThing
... class AwesomeThing(cqparts.Part):
... pass
>>> # So lets try a search
>>> len(search(a='one', b='two')) # doctest: +SKIP
2
>>> # oops, that returned both classes
>>> # To narrow it down, we add something unique:
>>> len(search(a='one', b='two', libname='awesome_things')) # finds only yours # doctest: +SKIP
1
>>> # or, we could use common_criteria again...
>>> awesome_search = common_criteria(**lib_criteria)(search)
>>> awesome_find = common_criteria(**lib_criteria)(find)
>>> len(awesome_search(a='one', b='two')) # doctest: +SKIP
1
>>> awesome_find(a='one', b='two').__name__
'AwesomeThing'
A good universal way to apply unique criteria is with
.. testcode::
import cadquery, cqparts
from cqparts.search import register, common_criteria
_register = common_criteria(module=__name__)(register)
@_register(shape='cube', scale='unit')
class Cube(cqparts.Part):
# just an example...
def make(self):
return cadquery.Workplane('XY').box(1, 1, 1)
"""
def decorator(func):
def inner(*args, **kwargs):
merged_kwargs = copy(common)
merged_kwargs.update(kwargs)
return func(*args, **merged_kwargs)
return inner
return decorator
|
python
|
{
"resource": ""
}
|
q7408
|
merge_boundboxes
|
train
|
def merge_boundboxes(*bb_list):
"""
Combine bounding boxes to result in a single BoundBox that encloses
all of them.
:param bb_list: List of bounding boxes
:type bb_list: :class:`list` of :class:`cadquery.BoundBox`
"""
# Verify types
if not all(isinstance(x, cadquery.BoundBox) for x in bb_list):
raise TypeError(
"parameters must be cadquery.BoundBox instances: {!r}".format(bb_list)
)
if len(bb_list) <= 1:
return bb_list[0] # if only 1, nothing to merge; simply return it
# Find the smallest bounding box to enclose each of those given
min_params = list(min(*vals) for vals in zip( # minimum for each axis
*((bb.xmin, bb.ymin, bb.zmin) for bb in bb_list)
))
max_params = list(max(*vals) for vals in zip( # maximum for each axis
*((bb.xmax, bb.ymax, bb.zmax) for bb in bb_list)
))
#__import__('ipdb').set_trace()
# Create new object with combined parameters
WrappedType = type(bb_list[0].wrapped) # assuming they're all the same
wrapped_bb = WrappedType(*(min_params + max_params))
return cadquery.BoundBox(wrapped_bb)
|
python
|
{
"resource": ""
}
|
q7409
|
CoordSystem.random
|
train
|
def random(cls, span=1, seed=None):
"""
Creates a randomized coordinate system.
Useful for confirming that an *assembly* does not rely on its
origin coordinate system to remain intact.
For example, the :class:`CoordSysIndicator` *assembly* aligns 3 boxes
along each of the :math:`XYZ` axes.
Positioning it randomly by setting its ``world_coords`` shows that each
box is always positioned orthogonally to the other two.
.. doctest::
from cqparts_misc.basic.indicators import CoordSysIndicator
from cqparts.display import display
from cqparts.utils import CoordSystem
cs = CoordSysIndicator()
cs.world_coords = CoordSystem.random()
display(cs) # doctest: +SKIP
:param span: origin of return will be :math:`\pm span` per axis
:param seed: if supplied, return is psudorandom (repeatable)
:type seed: hashable object
:return: randomized coordinate system
:rtype: :class:`CoordSystem`
"""
if seed is not None:
random.seed(seed)
def rand_vect(min, max):
return (
random.uniform(min, max),
random.uniform(min, max),
random.uniform(min, max),
)
while True:
try:
return cls(
origin=rand_vect(-span, span),
xDir=rand_vect(-1, 1),
normal=rand_vect(-1, 1),
)
except RuntimeError: # Base.FreeCADError inherits from RuntimeError
# Raised if xDir & normal vectors are parallel.
# (the chance is very low, but it could happen)
continue
|
python
|
{
"resource": ""
}
|
q7410
|
VectorEffect.start_point
|
train
|
def start_point(self):
"""
Start vertex of effect
:return: vertex (as vector)
:rtype: :class:`cadquery.Vector`
"""
edge = self.result.wire().val().Edges()[0]
return edge.Vertices()[0].Center()
|
python
|
{
"resource": ""
}
|
q7411
|
VectorEffect.start_coordsys
|
train
|
def start_coordsys(self):
"""
Coordinate system at start of effect.
All axes are parallel to the original vector evaluation location, with
the origin moved to this effect's start point.
:return: coordinate system at start of effect
:rtype: :class:`CoordSys`
"""
coordsys = copy(self.location)
coordsys.origin = self.start_point
return coordsys
|
python
|
{
"resource": ""
}
|
q7412
|
VectorEffect.end_coordsys
|
train
|
def end_coordsys(self):
"""
Coordinate system at end of effect.
All axes are parallel to the original vector evaluation location, with
the origin moved to this effect's end point.
:return: coordinate system at end of effect
:rtype: :class:`CoordSys`
"""
coordsys = copy(self.location)
coordsys.origin = self.end_point
return coordsys
|
python
|
{
"resource": ""
}
|
q7413
|
VectorEvaluator.perform_evaluation
|
train
|
def perform_evaluation(self):
"""
Determine which parts lie along the given vector, and what length
:return: effects on the given parts (in order of the distance from
the start point)
:rtype: list(:class:`VectorEffect`)
"""
# Create effect vector (with max length)
if not self.max_effect_length:
# no effect is possible, return an empty list
return []
edge = cadquery.Edge.makeLine(
self.location.origin,
self.location.origin + (self.location.zDir * -(self.max_effect_length + 1)) # +1 to avoid rounding errors
)
wire = cadquery.Wire.assembleEdges([edge])
wp = cadquery.Workplane('XY').newObject([wire])
effect_list = [] # list of self.effect_class instances
for part in self.parts:
solid = part.world_obj.translate((0, 0, 0))
intersection = solid.intersect(copy(wp))
effect = self.effect_class(
location=self.location,
part=part,
result=intersection,
)
if effect:
effect_list.append(effect)
return sorted(effect_list)
|
python
|
{
"resource": ""
}
|
q7414
|
indicate_last
|
train
|
def indicate_last(items):
"""
iterate through list and indicate which item is the last, intended to
assist tree displays of hierarchical content.
:return: yielding (<bool>, <item>) where bool is True only on last entry
:rtype: generator
"""
last_index = len(items) - 1
for (i, item) in enumerate(items):
yield (i == last_index, item)
|
python
|
{
"resource": ""
}
|
q7415
|
Thread.get_radii
|
train
|
def get_radii(self):
"""
Get the inner and outer radii of the thread.
:return: (<inner radius>, <outer radius>)
:rtype: :class:`tuple`
.. note::
Ideally this method is overridden in inheriting classes to
mathematically determine the radii.
Default action is to generate the profile, then use the
bounding box to determine min & max radii. However this method is
prone to small numeric error.
"""
bb = self.profile.val().BoundingBox()
return (bb.xmin, bb.xmax)
|
python
|
{
"resource": ""
}
|
q7416
|
Thread.make_simple
|
train
|
def make_simple(self):
"""
Return a cylinder with the thread's average radius & length.
:math:`radius = (inner_radius + outer_radius) / 2`
"""
(inner_radius, outer_radius) = self.get_radii()
radius = (inner_radius + outer_radius) / 2
return cadquery.Workplane('XY') \
.circle(radius).extrude(self.length)
|
python
|
{
"resource": ""
}
|
q7417
|
Thread.make_pilothole_cutter
|
train
|
def make_pilothole_cutter(self):
"""
Make a solid to subtract from an interfacing solid to bore a pilot-hole.
"""
# get pilothole ratio
# note: not done in .initialize_parameters() because this would cause
# the thread's profile to be created at initialisation (by default).
pilothole_radius = self.pilothole_radius
if pilothole_radius is None:
(inner_radius, outer_radius) = self.get_radii()
pilothole_radius = inner_radius + self.pilothole_ratio * (outer_radius - inner_radius)
return cadquery.Workplane('XY') \
.circle(pilothole_radius) \
.extrude(self.length)
|
python
|
{
"resource": ""
}
|
q7418
|
Assembly.tree_str
|
train
|
def tree_str(self, name=None, prefix='', add_repr=False, _depth=0):
u"""
Return string listing recursively the assembly hierarchy
:param name: if set, names the tree's trunk, otherwise the object's :meth:`repr` names the tree
:type name: :class:`str`
:param prefix: string prefixed to each line, can be used to indent
:type prefix: :class:`str`
:param add_repr: if set, *component* :meth:`repr` is put after their names
:type add_repr: :class:`bool`
:return: Printable string of an assembly's component hierarchy.
:rtype: :class:`str`
Example output from `block_tree.py <https://github.com/fragmuffin/cqparts/blob/master/tests/manual/block_tree.py>`_
::
>>> log = logging.getLogger(__name__)
>>> isinstance(block_tree, Assembly)
True
>>> log.info(block_tree.tree_str(name="block_tree"))
block_tree
\u251c\u25cb branch_lb
\u251c\u25cb branch_ls
\u251c\u2500 branch_r
\u2502 \u251c\u25cb L
\u2502 \u251c\u25cb R
\u2502 \u251c\u25cb branch
\u2502 \u251c\u2500 house
\u2502 \u2502 \u251c\u25cb bar
\u2502 \u2502 \u2514\u25cb foo
\u2502 \u2514\u25cb split
\u251c\u25cb trunk
\u2514\u25cb trunk_split
Where:
* ``\u2500`` denotes an :class:`Assembly`, and
* ``\u25cb`` denotes a :class:`Part`
"""
# unicode characters
c_t = u'\u251c'
c_l = u'\u2514'
c_dash = u'\u2500'
c_o = u'\u25cb'
c_span = u'\u2502'
output = u''
if not _depth: # first line
output = prefix
if name:
output += (name + u': ') if add_repr else name
if add_repr or not name:
output += repr(self)
output += '\n'
# build tree
for (is_last, (name, component)) in indicate_last(sorted(self.components.items(), key=lambda x: x[0])):
branch_chr = c_l if is_last else c_t
if isinstance(component, Assembly):
# Assembly: also list nested components
output += prefix + ' ' + branch_chr + c_dash + u' ' + name
if add_repr:
output += ': ' + repr(component)
output += '\n'
output += component.tree_str(
prefix=(prefix + (u' ' if is_last else (u' ' + c_span + ' '))),
add_repr=add_repr,
_depth=_depth + 1,
)
else:
# Part (assumed): leaf node
output += prefix + ' ' + branch_chr + c_o + u' ' + name
if add_repr:
output += ': ' + repr(component)
output += '\n'
return output
|
python
|
{
"resource": ""
}
|
q7419
|
_relative_path_to
|
train
|
def _relative_path_to(path_list, filename):
"""Get a neat relative path to files relative to the CWD"""
return os.path.join(
os.path.relpath(os.path.join(*path_list), os.getcwd()),
filename
)
|
python
|
{
"resource": ""
}
|
q7420
|
TrapezoidalGear._make_tooth_template
|
train
|
def _make_tooth_template(self):
"""
Builds a single tooth including the cylinder with tooth faces
tangential to its circumference.
"""
# parameters
period_arc = (2 * pi) / self.tooth_count
tooth_arc = period_arc * self.spacing_ratio # the arc between faces at effective_radius
outer_radius = self.effective_radius + (self.tooth_height / 2)
face_angle_rad = radians(self.face_angle)
# cartesian isosceles trapezoid dimensions
side_angle = face_angle_rad - (tooth_arc / 2)
side_tangent_radius = sin(face_angle_rad) * self.effective_radius
extra_side_angle = side_angle + acos(side_tangent_radius / outer_radius)
tooth = cadquery.Workplane('XY', origin=(0, 0, -self.width / 2)) \
.moveTo(
side_tangent_radius * cos(side_angle),
side_tangent_radius * sin(side_angle)
)
opposite_point = (
-side_tangent_radius * cos(side_angle),
side_tangent_radius * sin(side_angle)
)
if self.face_angle:
tooth = tooth.lineTo(*opposite_point)
#tooth = tooth.threePointArc(
# (0, -side_tangent_radius),
# opposite_point
#)
tooth = tooth.lineTo(
-cos(extra_side_angle) * outer_radius,
sin(extra_side_angle) * outer_radius
)
opposite_point = (
cos(extra_side_angle) * outer_radius,
sin(extra_side_angle) * outer_radius
)
if self.flat_top:
tooth = tooth.lineTo(*opposite_point)
else:
tooth = tooth.threePointArc((0, outer_radius), opposite_point)
tooth = tooth.close().extrude(self.width)
return tooth
|
python
|
{
"resource": ""
}
|
q7421
|
_Ring.get_mate_center
|
train
|
def get_mate_center(self, angle=0):
"""
Mate at ring's center rotated ``angle`` degrees.
:param angle: rotation around z-axis (unit: deg)
:type angle: :class:`float`
:return: mate in ring's center rotated about z-axis
:rtype: :class:`Mate <cqparts.constraint.Mate>`
"""
return Mate(self, CoordSystem.from_plane(
cadquery.Plane(
origin=(0, 0, self.width / 2),
xDir=(1, 0, 0),
normal=(0, 0, 1),
).rotated((0, 0, angle)) # rotate about z-axis
))
|
python
|
{
"resource": ""
}
|
q7422
|
_BallRing.get_max_ballcount
|
train
|
def get_max_ballcount(cls, ball_diam, rolling_radius, min_gap=0.):
"""
The maximum number of balls given ``rolling_radius`` and ``ball_diam``
:param min_gap: minimum gap between balls (measured along vector between
spherical centers)
:type min_gap: :class:`float`
:return: maximum ball count
:rtype: :class:`int`
"""
min_arc = asin(((ball_diam + min_gap) / 2) / rolling_radius) * 2
return int((2 * pi) / min_arc)
|
python
|
{
"resource": ""
}
|
q7423
|
map_environment
|
train
|
def map_environment(**kwargs):
"""
Decorator to map a DisplayEnvironment for displaying components.
The decorated environment will be chosen if its condition is ``True``, and
its order is the smallest.
:param add_to: if set to ``globals()``, display environment's constructor
may reference its own type.
:type add_to: :class:`dict`
Any additional named parameters will be passed to the constructor of
the decorated DisplayEnvironment.
See :class:`DisplayEnvironment` for example usage.
**NameError on importing**
The following code::
@map_environment(
name='abc', order=10, condition=lambda: True,
)
class SomeDisplayEnv(DisplayEnvironment):
def __init__(self, *args, **kwargs):
super(SomeDisplayEnv, self).__init__(*args, **kwargs)
Will raise the Exception::
NameError: global name 'SomeDisplayEnv' is not defined
Because this ``map_environment`` decorator attempts to instantiate
this class before it's returned to populate the ``global()`` dict.
To cicrumvent this problem, set ``add_to`` to ``globals()``::
@map_environment(
name='abc', order=10, condition=lambda: True,
add_to=globals(),
)
class SomeDisplayEnv(DisplayEnvironment):
... as above
"""
def inner(cls):
global display_environments
assert issubclass(cls, DisplayEnvironment), "can only map DisplayEnvironment classes"
# Add class to it's local globals() so constructor can reference
# its own type
add_to = kwargs.pop('add_to', {})
add_to[cls.__name__] = cls
# Create display environment
disp_env = cls(**kwargs)
# is already mappped?
try:
i = display_environments.index(disp_env) # raises ValueError
# report duplicate
raise RuntimeError(
("environment %r already mapped, " % display_environments[i]) +
("can't map duplicate %r" % disp_env)
)
except ValueError:
pass # as expected
# map class
display_environments = sorted(display_environments + [disp_env])
return cls
return inner
|
python
|
{
"resource": ""
}
|
q7424
|
DisplayEnvironment.display_callback
|
train
|
def display_callback(self, component, **kwargs):
"""
Display given component in this environment.
.. note::
To be overridden by inheriting classes
An example of a introducing a custom display environment.
.. doctest::
import cqparts
from cqparts.display.environment import DisplayEnvironment, map_environment
def is_text_env():
# function that returns True if it's run in the
# desired environment.
import sys
# Python 2.x
if sys.version_info[0] == 2:
return isinstance(sys.stdout, file)
# Python 3.x
import io
return isinstance(sys.stdout, io.TextIOWrapper)
@map_environment(
name="text",
order=0, # force display to be first priority
condition=is_text_env,
)
class TextDisplay(DisplayEnvironment):
def display_callback(self, component, **kwargs):
# Print component details to STDOUT
if isinstance(component, cqparts.Assembly):
sys.stdout.write(component.tree_str(add_repr=True))
else: # assumed to be a cqparts.Part
sys.stdout.write("%r\\n" % (component))
``is_text_env()`` checks if there's a valid ``sys.stdout`` to write to,
``TextDisplay`` defines how to display any given component,
and the ``@map_environment`` decorator adds the display paired with
its environment test function.
When using :meth:`display() <cqparts.display.display>`, this display
will be used if ``is_text_env()`` returns ``True``, and no previously
mapped environment with a smaller ``order`` tested ``True``:
.. doctest::
# create component to display
from cqparts_misc.basic.primatives import Cube
cube = Cube()
# display component
from cqparts.display import display
display(cube)
The ``display_callback`` will be called via
:meth:`display() <DisplayEnvironment.display>`. So to call this
display method directly:
.. doctest::
TextDisplay().display(cube)
:raises: NotImplementedError if not overridden
"""
if type(self) is DisplayEnvironment:
raise RuntimeError(
("%r is not a functional display environment, " % (type(self))) +
"it's meant to be inherited by an implemented environment"
)
raise NotImplementedError(
"display_callback function not overridden by %r" % (type(self))
)
|
python
|
{
"resource": ""
}
|
q7425
|
ShapeBuffer.add_vertex
|
train
|
def add_vertex(self, x, y, z):
"""
Add a ``VEC3`` of ``floats`` to the ``vert_data`` buffer
"""
self.vert_data.write(
struct.pack('<f', x) +
struct.pack('<f', y) +
struct.pack('<f', z)
)
# retain min/max values
self.vert_min = _list3_min(self.vert_min, (x, y, z))
self.vert_max = _list3_max(self.vert_max, (x, y, z))
|
python
|
{
"resource": ""
}
|
q7426
|
ShapeBuffer.add_poly_index
|
train
|
def add_poly_index(self, i, j, k):
"""
Add 3 ``SCALAR`` of ``uint`` to the ``idx_data`` buffer.
"""
self.idx_data.write(
struct.pack(self.idx_fmt, i) +
struct.pack(self.idx_fmt, j) +
struct.pack(self.idx_fmt, k)
)
|
python
|
{
"resource": ""
}
|
q7427
|
ShapeBuffer.buffer_iter
|
train
|
def buffer_iter(self, block_size=1024):
"""
Iterate through chunks of the vertices, and indices buffers seamlessly.
.. note::
To see a usage example, look at the :class:`ShapeBuffer` description.
"""
streams = (
self.vert_data,
self.idx_data,
)
# Chain streams seamlessly
for stream in streams:
stream.seek(0)
while True:
chunk = stream.read(block_size)
if chunk:
yield chunk
else:
break
|
python
|
{
"resource": ""
}
|
q7428
|
ShapeBuffer.read
|
train
|
def read(self):
"""
Read buffer out as a single stream.
.. warning::
Avoid using this function!
**Why?** This is a *convenience* function; it doesn't encourage good
memory management.
All memory required for a mesh is duplicated, and returned as a
single :class:`str`. So at best, using this function will double
the memory required for a single model.
**Instead:** Wherever possible, please use :meth:`buffer_iter`.
"""
buffer = BytesIO()
for chunk in self.buffer_iter():
log.debug('buffer.write(%r)', chunk)
buffer.write(chunk)
buffer.seek(0)
return buffer.read()
|
python
|
{
"resource": ""
}
|
q7429
|
Part.bounding_box
|
train
|
def bounding_box(self):
"""
Generate a bounding box based on the full complexity part.
:return: bounding box of part
:rtype: cadquery.BoundBox
"""
if self.world_coords:
return self.world_obj.findSolid().BoundingBox()
return self.local_obj.findSolid().BoundingBox()
|
python
|
{
"resource": ""
}
|
q7430
|
MaleFastenerPart.make_cutter
|
train
|
def make_cutter(self):
"""
Makes a shape to be used as a negative; it can be cut away from other
shapes to make a perfectly shaped pocket for this part.
For example, for a countersunk screw with a neck, the following
cutter would be generated.
.. image:: /_static/img/fastenerpart/male.cutter.png
If the head were an externally driven shape (like a hex bolt), then the
cutter's head would be wide enough to accommodate a tool to fasten it.
"""
# head
obj = self.head.make_cutter()
# neck
if self.neck_length:
# neck cut diameter (if thread is larger than the neck, thread must fit through)
(inner_radius, outer_radius) = self.thread.get_radii()
neck_cut_radius = max(outer_radius, self.neck_diam / 2)
neck = cadquery.Workplane(
'XY', origin=(0, 0, -self.neck_length)
).circle(neck_cut_radius).extrude(self.neck_length)
obj = obj.union(neck)
# thread (pilot hole)
pilot_hole = self.thread.make_pilothole_cutter() \
.translate((0, 0, -self.length))
obj = obj.union(pilot_hole)
return obj
|
python
|
{
"resource": ""
}
|
q7431
|
_Cup.get_cutout
|
train
|
def get_cutout(self, clearance=0):
" get the cutout for the shaft"
return cq.Workplane('XY', origin=(0, 0, 0)) \
.circle((self.diam / 2) + clearance) \
.extrude(10)
|
python
|
{
"resource": ""
}
|
q7432
|
_Cup.mate_bottom
|
train
|
def mate_bottom(self):
" connect to the bottom of the cup"
return Mate(self, CoordSystem(\
origin=(0, 0, -self.height),\
xDir=(1, 0, 0),\
normal=(0, 0, 1)))
|
python
|
{
"resource": ""
}
|
q7433
|
Controller._construct_api_path
|
train
|
def _construct_api_path(self, version):
"""Returns valid base API path based on version given
The base API path for the URL is different depending on UniFi server version.
Default returns correct path for latest known stable working versions.
"""
V2_PATH = 'api/'
V3_PATH = 'api/s/' + self.site_id + '/'
if(version == 'v2'):
return V2_PATH
if(version == 'v3'):
return V3_PATH
if(version == 'v4'):
return V3_PATH
if(version == 'v5'):
return V3_PATH
else:
return V2_PATH
|
python
|
{
"resource": ""
}
|
q7434
|
Controller.get_alerts_unarchived
|
train
|
def get_alerts_unarchived(self):
"""Return a list of Alerts unarchived."""
js = json.dumps({'_sort': '-time', 'archived': False})
params = urllib.urlencode({'json': js})
return self._read(self.api_url + 'list/alarm', params)
|
python
|
{
"resource": ""
}
|
q7435
|
Controller.get_statistics_24h
|
train
|
def get_statistics_24h(self, endtime):
"""Return statistical data last 24h from time"""
js = json.dumps(
{'attrs': ["bytes", "num_sta", "time"], 'start': int(endtime - 86400) * 1000, 'end': int(endtime - 3600) * 1000})
params = urllib.urlencode({'json': js})
return self._read(self.api_url + 'stat/report/hourly.system', params)
|
python
|
{
"resource": ""
}
|
q7436
|
Controller.archive_all_alerts
|
train
|
def archive_all_alerts(self):
"""Archive all Alerts
"""
js = json.dumps({'cmd': 'archive-all-alarms'})
params = urllib.urlencode({'json': js})
answer = self._read(self.api_url + 'cmd/evtmgr', params)
|
python
|
{
"resource": ""
}
|
q7437
|
Controller.create_backup
|
train
|
def create_backup(self):
"""Ask controller to create a backup archive file, response contains the path to the backup file.
Warning: This process puts significant load on the controller may
render it partially unresponsive for other requests.
"""
js = json.dumps({'cmd': 'backup'})
params = urllib.urlencode({'json': js})
answer = self._read(self.api_url + 'cmd/system', params)
return answer[0].get('url')
|
python
|
{
"resource": ""
}
|
q7438
|
Controller.get_backup
|
train
|
def get_backup(self, target_file='unifi-backup.unf'):
"""Get a backup archive from a controller.
Arguments:
target_file -- Filename or full path to download the backup archive to, should have .unf extension for restore.
"""
download_path = self.create_backup()
opener = self.opener.open(self.url + download_path)
unifi_archive = opener.read()
backupfile = open(target_file, 'w')
backupfile.write(unifi_archive)
backupfile.close()
|
python
|
{
"resource": ""
}
|
q7439
|
Controller.authorize_guest
|
train
|
def authorize_guest(self, guest_mac, minutes, up_bandwidth=None, down_bandwidth=None, byte_quota=None, ap_mac=None):
"""
Authorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
minutes -- duration of the authorization in minutes
up_bandwith -- up speed allowed in kbps (optional)
down_bandwith -- down speed allowed in kbps (optional)
byte_quota -- quantity of bytes allowed in MB (optional)
ap_mac -- access point MAC address (UniFi >= 3.x) (optional)
"""
cmd = 'authorize-guest'
js = {'mac': guest_mac, 'minutes': minutes}
if up_bandwidth:
js['up'] = up_bandwidth
if down_bandwidth:
js['down'] = down_bandwidth
if byte_quota:
js['bytes'] = byte_quota
if ap_mac and self.version != 'v2':
js['ap_mac'] = ap_mac
return self._run_command(cmd, params=js)
|
python
|
{
"resource": ""
}
|
q7440
|
Controller.unauthorize_guest
|
train
|
def unauthorize_guest(self, guest_mac):
"""
Unauthorize a guest based on his MAC address.
Arguments:
guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
"""
cmd = 'unauthorize-guest'
js = {'mac': guest_mac}
return self._run_command(cmd, params=js)
|
python
|
{
"resource": ""
}
|
q7441
|
download
|
train
|
def download(gfile, wks_name=None, col_names=False, row_names=False,
credentials=None, start_cell = 'A1'):
"""
Download Google Spreadsheet and convert it to Pandas DataFrame
:param gfile: path to Google Spreadsheet or gspread ID
:param wks_name: worksheet name
:param col_names: assing top row to column names for Pandas DataFrame
:param row_names: assing left column to row names for Pandas DataFrame
:param credentials: provide own credentials
:param start_cell: specify where to start capturing of the DataFrame; default is A1
:type gfile: str
:type wks_name: str
:type col_names: bool
:type row_names: bool
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type start_cell: str
:returns: Pandas DataFrame
:rtype: class 'pandas.core.frame.DataFrame'
:Example:
>>> from df2gspread import gspread2df as g2d
>>> df = g2d.download(gfile="1U-kSDyeD-...", col_names=True, row_names=True)
>>> df
col1 col2
field1 1 2
field2 3 4
"""
# access credentials
credentials = get_credentials(credentials)
# auth for gspread
gc = gspread.authorize(credentials)
try:
# if gfile is file_id
gc.open_by_key(gfile).__repr__()
gfile_id = gfile
except:
# else look for file_id in drive
gfile_id = get_file_id(credentials, gfile)
if gfile_id is None:
raise RuntimeError(
"Trying to open non-existent or inaccessible spreadsheet")
wks = get_worksheet(gc, gfile_id, wks_name)
if wks is None:
raise RuntimeError(
"Trying to open non-existent or inaccessible worksheet")
raw_data = wks.get_all_values()
if not raw_data:
raise ValueError(
'Worksheet is empty or invalid.'
)
start_row_int, start_col_int = gspread.utils.a1_to_rowcol(start_cell)
rows, cols = np.shape(raw_data)
if start_col_int > cols or (row_names and start_col_int + 1 > cols):
raise RuntimeError(
"Start col (%s) out of the table columns(%s)" % (start_col_int +
row_names, cols))
if start_row_int > rows or (col_names and start_row_int + 1 > rows):
raise RuntimeError(
"Start row (%s) out of the table rows(%s)" % (start_row_int +
col_names, rows))
raw_data = [row[start_col_int-1:] for row in raw_data[start_row_int-1:]]
if row_names and col_names:
row_names = [row[0] for row in raw_data[1:]]
col_names = raw_data[0][1:]
raw_data = [row[1:] for row in raw_data[1:]]
elif row_names:
row_names = [row[0] for row in raw_data]
col_names = np.arange(len(raw_data[0]) - 1)
raw_data = [row[1:] for row in raw_data]
elif col_names:
row_names = np.arange(len(raw_data) - 1)
col_names = raw_data[0]
raw_data = raw_data[1:]
else:
row_names = np.arange(len(raw_data))
col_names = np.arange(len(raw_data[0]))
df = pd.DataFrame([pd.Series(row) for row in raw_data], index=row_names)
df.columns = col_names
return df
|
python
|
{
"resource": ""
}
|
q7442
|
get_credentials
|
train
|
def get_credentials(credentials=None, client_secret_file=CLIENT_SECRET_FILE, refresh_token=None):
"""Consistently returns valid credentials object.
See Also:
https://developers.google.com/drive/web/quickstart/python
Args:
client_secret_file (str): path to client secrets file, defaults to .gdrive_private
refresh_token (str): path to a user provided refresh token that is already
pre-authenticated
credentials (`~oauth2client.client.OAuth2Credentials`, optional): handle direct
input of credentials, which will check credentials for valid type and
return them
Returns:
`~oauth2client.client.OAuth2Credentials`: google credentials object
"""
# if the utility was provided credentials just return those
if credentials:
if _is_valid_credentials(credentials):
# auth for gspread
return credentials
else:
print("Invalid credentials supplied. Will generate from default token.")
token = refresh_token or DEFAULT_TOKEN
dir_name = os.path.dirname(DEFAULT_TOKEN)
try:
os.makedirs(dir_name)
except OSError:
if not os.path.isdir(dir_name):
raise
store = file.Storage(token)
credentials = store.get()
try:
import argparse
flags = argparse.ArgumentParser(
parents=[tools.argparser]).parse_known_args()[0]
except ImportError:
flags = None
logr.error(
'Unable to parse oauth2client args; `pip install argparse`')
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(
client_secret_file, SCOPES)
flow.redirect_uri = client.OOB_CALLBACK_URN
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatability with Python 2.6
credentials = tools.run(flow, store)
logr.info('Storing credentials to ' + DEFAULT_TOKEN)
return credentials
|
python
|
{
"resource": ""
}
|
q7443
|
create_service_credentials
|
train
|
def create_service_credentials(private_key_file=None, client_email=None,
client_secret_file=CLIENT_SECRET_FILE):
"""Create credentials from service account information.
See Also:
https://developers.google.com/api-client-library/python/auth/service-accounts
Args:
client_secret_file (str): path to json file with just the client_email when
providing the `private_key_file` separately, or this file can have both the
`client_email` and `private_key` contained in it. Defaults to .gdrive_private
client_email (str): service email account
private_key_file (str): path to the p12 private key, defaults to same name of file
used for regular authentication
Returns:
`~oauth2client.client.OAuth2Credentials`: google credentials object
"""
if private_key_file is not None:
with open(os.path.expanduser(private_key_file)) as f:
private_key = f.read()
else:
private_key = None
if client_email is None:
with open(os.path.expanduser(client_secret_file)) as client_file:
client_data = json.load(client_file)
if 'installed' in client_data:
# handle regular json format where key is separate
client_email = client_data['installed']['client_id']
if private_key is None:
raise RuntimeError('You must have the private key file \
with the regular json file. Try creating a new \
public/private key pair and downloading as json.')
else:
# handle newer case where json file has everything in it
client_email = client_data['client_email']
private_key = client_data['private_key']
if client_email is None or private_key is None:
raise RuntimeError(
'Client email and/or private key not provided by inputs.')
credentials = client.SignedJwtAssertionCredentials(
client_email, private_key, SCOPES)
return credentials
|
python
|
{
"resource": ""
}
|
q7444
|
get_file_id
|
train
|
def get_file_id(credentials, gfile, write_access=False):
"""
Get file ID by provided path. If file does not exist and
`write_access` is true, it will create whole path for you.
:param credentials: provide own credentials
:param gfile: path to Google Spreadsheet
:param write_access: allows to create full path if file does not exist
:type credentials: class 'oauth2client.client.OAuth2Credentials'
:type gfile: str
:type write_access: boolean
:returns: file ID
:rtype: str
:Example:
>>> from df2gspread.gfiles import get_file_id
>>> from df2gspread.utils import get_credentials
>>> gfile = '/some/folder/with/file'
>>> credentials = get_credentials()
>>> get_file_id(credentials=credentials, gfile=gfile, write_access=True)
u'78asbcsSND8sdSACNsa7ggcasca8shscaSACVD'
"""
# auth for apiclient
http = credentials.authorize(Http())
service = discovery.build('drive', 'v3', http=http, cache_discovery=False)
file_id = service.files().get(fileId='root', fields='id').execute().get('id')
# folder/folder/folder/spreadsheet
pathway = gfile.strip('/').split('/')
for idx, name in enumerate(pathway):
files = service.files().list(
q="name = '{}' and trashed = false and '{}' in parents".format(name, file_id)).execute()['files']
if len(files) > 0:
# Why do you ever need to use several folders with the same name?!
file_id = files[0].get('id')
elif write_access == True:
body = {
'mimeType': 'application/vnd.google-apps.' + ('spreadsheet' if idx == len(pathway)-1 else 'folder'),
'name': name,
'parents': [file_id]
}
file_id = service.files().create(body=body, fields='id').execute().get('id')
else:
return None
return file_id
|
python
|
{
"resource": ""
}
|
q7445
|
agg_conc
|
train
|
def agg_conc(original_countries,
aggregates,
missing_countries='test',
merge_multiple_string='_&_',
log_missing_countries=None,
log_merge_multiple_strings=None,
coco=None,
as_dataframe='sparse',
original_countries_class=None):
""" Builds an aggregation concordance dict, vec or matrix
Parameters
----------
original_countries: list or str
List of countries to aggregated, also accepts and valid column name of
CountryConverter.data
aggregates: list of dict or str
List of aggregation information. This can either be dict mapping the
names of 'original_countries' to aggregates, or a valid column name of
CountryConverter.data Aggregation happens in order given in this
parameter. Thus, country assigned to an aggregate are not re-assigned
by the following aggregation information.
missing_countries: str, boolean, None
Entry to fill in for countries in 'original_countries' which do not
appear in 'aggregates'. str: Use the given name for all missing
countries True: Use the name in original_countries for missing
countries False: Skip these countries None: Use None for these
countries
merge_multiple_string: str or None, optional
If multiple correspondance entries are given in one of the aggregates
join them with the given string (default: '_&_'. To skip these enries,
pass None.
log_missing_countries: function, optional
This function is called with country is country is in
'original_countries' but missing in all 'aggregates'.
For example, pass
lambda x: logging.error('Country {} missing'.format(x))
to log errors for such countries. Default: do nothing
log_merge_multiple_strings: function, optional
Function to call for logging multiple strings, see
log_missing_countries Default: do nothing
coco: instance of CountryConverter, optional
CountryConverter instance used for the conversion. Pass a custom one
if additional data is needed in addition to the custom country
converter file. If None (default), the bare CountryConverter is used
as_dataframe: boolean or st, optional
If False, output as OrderedDict. If True or str, output as pandas
dataframe. If str and 'full', output as a full matrix, otherwise only
two collumns with the original and aggregated names are returned.
original_countries_class: str, optional
Valid column name of CountryConverter.data. This parameter is needed
if a list of countries is passed to 'orginal_countries' and strings
corresponding to data in CountryConverter.data are used subsequently.
Can be omitted otherwise.
Returns
-------
OrderedDict or DataFrame (defined by 'as_dataframe')
"""
if coco is None:
coco = CountryConverter()
if type(original_countries) is str:
original_countries_class = original_countries
original_countries = coco.data[original_countries].values
else:
original_countries_class = (original_countries_class or
coco._get_input_format_from_name(
original_countries[0]))
if type(aggregates) is not list:
aggregates = [aggregates]
correspond = OrderedDict.fromkeys(original_countries)
for agg in aggregates:
if type(agg) is str:
agg = coco.get_correspondance_dict(original_countries_class,
agg)
for country in original_countries:
if correspond.get(country) is None:
try:
entry = agg[country]
except KeyError:
entry = None
if type(entry) is list:
if 1 < len(entry):
if merge_multiple_string:
entry = merge_multiple_string.join([
str(e) for e in entry])
else:
entry = None
if log_merge_multiple_strings:
log_merge_multiple_strings(country)
else:
entry = entry[0]
correspond[country] = entry
for country in original_countries:
if correspond.get(country) is None:
if missing_countries is True:
correspond[country] = country
elif missing_countries is False:
del correspond[country]
else:
correspond[country] = missing_countries
if log_missing_countries:
log_missing_countries(country)
if as_dataframe:
correspond = pd.DataFrame.from_dict(
correspond, orient='index').reset_index()
correspond.columns = ['original', 'aggregated']
if ((type(as_dataframe) is str) and
(as_dataframe[0].lower() == 'f')):
_co_list = correspond.original
correspond['val'] = 1
correspond = correspond.set_index(
['original', 'aggregated']).unstack().fillna(0)['val']
correspond = correspond.loc[_co_list]
return correspond
|
python
|
{
"resource": ""
}
|
q7446
|
match
|
train
|
def match(list_a, list_b, not_found='not_found', enforce_sublist=False,
country_data=COUNTRY_DATA_FILE, additional_data=None):
""" Matches the country names given in two lists into a dictionary.
This function matches names given in list_a to the one provided in list_b
using regular expressions defined in country_data.
Parameters
----------
list_a : list
Names of countries to identify
list_b : list
Master list of names for countries
not_found : str, optional
Fill in value for not found entries. If None, keep the input value
(default: 'not found')
enforce_sublist : boolean, optional
If True, all entries in both list are list.
If False(default), only multiple matches are list, rest are strings
country_data : Pandas DataFrame or path to data file (optional)
This is by default set to COUNTRY_DATA_FILE - the standard (tested)
country list for coco.
additional_data: (list of) Pandas DataFrames or data files (optional)
Additional data to include for a specific analysis.
This must be given in the same format as specified in the
country_data file. (utf-8 encoded tab separated data, same
column headers in all files)
Returns
-------
dict:
A dictionary with a key for every entry in list_a. The value
correspond to the matching entry in list_b if found. If there is
a 1:1 correspondence, the value is a str (if enforce_sublist is False),
otherwise multiple entries as list.
"""
if isinstance(list_a, str):
list_a = [list_a]
if isinstance(list_b, str):
list_b = [list_b]
if isinstance(list_a, tuple):
list_a = list(list_a)
if isinstance(list_b, tuple):
list_b = list(list_b)
coco = CountryConverter(country_data, additional_data)
name_dict_a = dict()
match_dict_a = dict()
for name_a in list_a:
name_dict_a[name_a] = []
match_dict_a[name_a] = []
for regex in coco.regexes:
if regex.search(name_a):
match_dict_a[name_a].append(regex)
if len(match_dict_a[name_a]) == 0:
logging.warning('Could not identify {} in list_a'.format(name_a))
_not_found_entry = name_a if not not_found else not_found
name_dict_a[name_a].append(_not_found_entry)
if not enforce_sublist:
name_dict_a[name_a] = name_dict_a[name_a][0]
continue
if len(match_dict_a[name_a]) > 1:
logging.warning(
'Multiple matches for name {} in list_a'.format(name_a))
for match_case in match_dict_a[name_a]:
b_matches = 0
for name_b in list_b:
if match_case.search(name_b):
b_matches += 1
name_dict_a[name_a].append(name_b)
if b_matches == 0:
logging.warning(
'Could not find any '
'correspondence for {} in list_b'.format(name_a))
_not_found_entry = name_a if not not_found else not_found
name_dict_a[name_a].append(_not_found_entry)
if b_matches > 1:
logging.warning('Multiple matches for '
'name {} in list_b'.format(name_a))
if not enforce_sublist and (len(name_dict_a[name_a]) == 1):
name_dict_a[name_a] = name_dict_a[name_a][0]
return name_dict_a
|
python
|
{
"resource": ""
}
|
q7447
|
_parse_arg
|
train
|
def _parse_arg(valid_classifications):
""" Command line parser for coco
Parameters
----------
valid_classifications: list
Available classifications, used for checking input parameters.
Returns
-------
args : ArgumentParser namespace
"""
parser = argparse.ArgumentParser(
description=('The country converter (coco): a Python package for '
'converting country names between '
'different classifications schemes. '
'Version: {}'.format(__version__)
), prog='coco', usage=('%(prog)s --names --src --to]'))
parser.add_argument('names',
help=('List of countries to convert '
'(space separated, country names consisting of '
'multiple words must be put in quotation marks).'
'Possible classifications: ' +
', '.join(valid_classifications) +
'; NB: long, official and short are provided '
'as shortcuts for the names classifications'
), nargs='*')
parser.add_argument('-s', '--src', '--source', '-f', '--from',
help=('Classification of the names given, '
'(default: inferred from names)'))
parser.add_argument('-t', '--to',
help=('Required classification of the passed names'
'(default: "ISO3"'))
parser.add_argument('-o', '--output_sep',
help=('Seperator for output names '
'(default: space), e.g. "," '))
parser.add_argument('-n', '--not_found',
default='not found',
help=('Fill in value for none found entries. '
'If "None" (string), keep the input value '
'(default: not found)'))
parser.add_argument('-a', '--additional_data',
help=('Data file with additional country data'
'(Same format as the original data file - '
'utf-8 encoded tab separated data, same '
'column headers as in the general country '
'data file; default: not found)'))
args = parser.parse_args()
args.src = args.src or None
args.to = args.to or 'ISO3'
args.not_found = args.not_found if args.not_found != 'None' else None
args.output_sep = args.output_sep or ' '
return args
|
python
|
{
"resource": ""
}
|
q7448
|
main
|
train
|
def main():
""" Main entry point - used for command line call
"""
args = _parse_arg(CountryConverter().valid_class)
coco = CountryConverter(additional_data=args.additional_data)
converted_names = coco.convert(
names=args.names,
src=args.src,
to=args.to,
enforce_list=False,
not_found=args.not_found)
print(args.output_sep.join(
[str(etr) for etr in converted_names] if
isinstance(converted_names, list) else [str(converted_names)]))
|
python
|
{
"resource": ""
}
|
q7449
|
CountryConverter._separate_exclude_cases
|
train
|
def _separate_exclude_cases(name, exclude_prefix):
""" Splits the excluded
Parameters
----------
name : str
Name of the country/region to convert.
exclude_prefix : list of valid regex strings
List of indicators which negate the subsequent country/region.
These prefixes and everything following will not be converted.
E.g. 'Asia excluding China' becomes 'Asia' and
'China excluding Hong Kong' becomes 'China' prior to conversion
Returns
-------
dict with
'clean_name' : str
as name without anything following exclude_prefix
'excluded_countries' : list
list of excluded countries
"""
excluder = re.compile('|'.join(exclude_prefix))
split_entries = excluder.split(name)
return {'clean_name': split_entries[0],
'excluded_countries': split_entries[1:]}
|
python
|
{
"resource": ""
}
|
q7450
|
CountryConverter.convert
|
train
|
def convert(self, names, src=None, to='ISO3', enforce_list=False,
not_found='not found',
exclude_prefix=['excl\\w.*', 'without', 'w/o']):
""" Convert names from a list to another list.
Note
----
A lot of the functionality can also be done directly in Pandas
DataFrames.
For example:
coco = CountryConverter()
names = ['USA', 'SWZ', 'PRI']
coco.data[coco.data['ISO3'].isin(names)][['ISO2', 'continent']]
Parameters
----------
names : str or list like
Countries in 'src' classification to convert
to 'to' classification
src : str, optional
Source classification. If None (default), each passed name is
checked if it is a number (assuming UNnumeric) or 2 (ISO2) or
3 (ISO3) characters long; for longer names 'regex' is assumed.
to : str, optional
Output classification (valid index of the country_data file),
default: ISO3
enforce_list : boolean, optional
If True, enforces the output to be list (if only one name was
passed) or to be a list of lists (if multiple names were passed).
If False (default), the output will be a string (if only one name
was passed) or a list of str and/or lists (str if a one to one
matching, list otherwise).
not_found : str, optional
Fill in value for none found entries. If None, keep the input value
(default: 'not found')
exclude_prefix : list of valid regex strings
List of indicators which negate the subsequent country/region.
These prefixes and everything following will not be converted.
E.g. 'Asia excluding China' becomes 'Asia' and
'China excluding Hong Kong' becomes 'China' prior to conversion
Default: ['excl\\w.*', 'without', 'w/o'])
Returns
-------
list or str, depending on enforce_list
"""
# The list to tuple conversion is necessary for Matlab interface
names = list(names) if (
isinstance(names, tuple) or
isinstance(names, set)) else names
names = names if isinstance(names, list) else [names]
names = [str(n) for n in names]
outlist = names.copy()
to = [self._validate_input_para(to, self.data.columns)]
exclude_split = {name: self._separate_exclude_cases(name,
exclude_prefix)
for name in names}
for ind_names, current_name in enumerate(names):
spec_name = exclude_split[current_name]['clean_name']
if src is None:
src_format = self._get_input_format_from_name(spec_name)
else:
src_format = self._validate_input_para(src, self.data.columns)
if src_format.lower() == 'regex':
result_list = []
for ind_regex, ccregex in enumerate(self.regexes):
if ccregex.search(spec_name):
result_list.append(
self.data.ix[ind_regex, to].values[0])
if len(result_list) > 1:
logging.warning('More then one regular expression '
'match for {}'.format(spec_name))
else:
_match_col = self.data[src_format].astype(
str).str.replace('\\..*', '')
result_list = [etr[0] for etr in
self.data[_match_col.str.contains(
'^' + spec_name + '$', flags=re.IGNORECASE,
na=False)][to].values]
if len(result_list) == 0:
logging.warning(
'{} not found in {}'.format(spec_name, src_format))
_fillin = not_found or spec_name
outlist[ind_names] = [_fillin] if enforce_list else _fillin
else:
outlist[ind_names] = []
for etr in result_list:
try:
conv_etr = int(etr)
except ValueError:
conv_etr = etr
outlist[ind_names].append(conv_etr)
if len(outlist[ind_names]) == 1 and enforce_list is False:
outlist[ind_names] = outlist[ind_names][0]
if (len(outlist) == 1) and not enforce_list:
return outlist[0]
else:
return outlist
|
python
|
{
"resource": ""
}
|
q7451
|
CountryConverter.EU28as
|
train
|
def EU28as(self, to='name_short'):
"""
Return EU28 countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if type(to) is str:
to = [to]
return self.data[self.data.EU < 2015][to]
|
python
|
{
"resource": ""
}
|
q7452
|
CountryConverter.EU27as
|
train
|
def EU27as(self, to='name_short'):
"""
Return EU27 countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.EU < 2013][to]
|
python
|
{
"resource": ""
}
|
q7453
|
CountryConverter.OECDas
|
train
|
def OECDas(self, to='name_short'):
"""
Return OECD member states in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.OECD > 0][to]
|
python
|
{
"resource": ""
}
|
q7454
|
CountryConverter.UNas
|
train
|
def UNas(self, to='name_short'):
"""
Return UN member states in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.UNmember > 0][to]
|
python
|
{
"resource": ""
}
|
q7455
|
CountryConverter.obsoleteas
|
train
|
def obsoleteas(self, to='name_short'):
"""
Return obsolete countries in the specified classification
Parameters
----------
to : str, optional
Output classification (valid str for an index of
country_data file), default: name_short
Returns
-------
Pandas DataFrame
"""
if isinstance(to, str):
to = [to]
return self.data[self.data.obsolete > 0][to]
|
python
|
{
"resource": ""
}
|
q7456
|
CountryConverter.get_correspondance_dict
|
train
|
def get_correspondance_dict(self, classA, classB,
restrict=None,
replace_numeric=True):
""" Returns a correspondance between classification A and B as dict
Parameters
----------
classA: str
Valid classification (column name of data)
classB: str
Valid classification (column name of data).
restrict: boolean vector of size cc.data, optional
where cc is the name of the CountryConverter instance. Used to
restrict the data sheet if necessary. E.g. to convert to countries
which were OECD members before 1970 use
cc.get_correspondance_dict('ISO3', 'OECD', restrict=cc.data.OECD <
1970)
replace_numeric: boolean, optional
If True (default) replace numeric values with the column header.
This can be used if get a correspondance to, for example, 'OECD'
instead of to the OECD membership years. Set to False if the actual
numbers are required (as for UNcode).
Returns
-------
dict with
keys: based on classA
items: list of correspoding entries in classB or None
"""
result = {nn: None for nn in self.data[classA].values}
if restrict is None:
df = self.data.copy()
else:
df = self.data[restrict].copy()
if replace_numeric and df[classB].dtype.kind in 'bifc':
df.loc[~df[classB].isnull(), classB] = classB
df.loc[df[classB].isnull(), classB] = None
result.update(df.groupby(classA)
.aggregate(lambda x: list(x.unique()))
.to_dict()[classB])
return result
|
python
|
{
"resource": ""
}
|
q7457
|
CountryConverter._validate_input_para
|
train
|
def _validate_input_para(self, para, column_names):
""" Convert the input classificaton para to the correct df column name
Parameters
----------
para : string
column_names : list of strings
Returns
-------
validated_para : string
Converted to the case used in the country file
"""
lower_case_valid_class = [et.lower() for et in self.valid_class]
alt_valid_names = {
'name_short': ['short', 'short_name', 'name', 'names'],
'name_official': ['official', 'long_name', 'long'],
'UNcode': ['un', 'unnumeric'],
'ISOnumeric': ['isocode'],
}
for item in alt_valid_names.items():
if para.lower() in item[1]:
para = item[0]
try:
validated_para = self.valid_class[
lower_case_valid_class.index(para.lower())]
except ValueError:
raise KeyError(
'{} is not a valid country classification'.format(para))
return validated_para
|
python
|
{
"resource": ""
}
|
q7458
|
CountryConverter._get_input_format_from_name
|
train
|
def _get_input_format_from_name(self, name):
""" Determines the input format based on the given country name
Parameters
----------
name : string
Returns
-------
string : valid input format
"""
try:
int(name)
src_format = 'ISOnumeric'
except ValueError:
if len(name) == 2:
src_format = 'ISO2'
elif len(name) == 3:
src_format = 'ISO3'
else:
src_format = 'regex'
return src_format
|
python
|
{
"resource": ""
}
|
q7459
|
Bernstein
|
train
|
def Bernstein(n, k):
"""Bernstein polynomial.
"""
coeff = binom(n, k)
def _bpoly(x):
return coeff * x ** k * (1 - x) ** (n - k)
return _bpoly
|
python
|
{
"resource": ""
}
|
q7460
|
_read_in_thread
|
train
|
def _read_in_thread(address, pty, blocking):
"""Read data from the pty in a thread.
"""
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(address)
while 1:
data = pty.read(4096, blocking=blocking)
if not data and not pty.isalive():
while not data and not pty.iseof():
data += pty.read(4096, blocking=blocking)
if not data:
try:
client.send(b'')
except socket.error:
pass
break
try:
client.send(data)
except socket.error:
break
client.close()
|
python
|
{
"resource": ""
}
|
q7461
|
PtyProcess.read
|
train
|
def read(self, size=1024):
"""Read and return at most ``size`` characters from the pty.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
data = self.fileobj.recv(size)
if not data:
self.flag_eof = True
raise EOFError('Pty is closed')
return self.decoder.decode(data, final=False)
|
python
|
{
"resource": ""
}
|
q7462
|
PtyProcess.readline
|
train
|
def readline(self):
"""Read one line from the pseudoterminal as bytes.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
buf = []
while 1:
try:
ch = self.read(1)
except EOFError:
return ''.join(buf)
buf.append(ch)
if ch == '\n':
return ''.join(buf)
|
python
|
{
"resource": ""
}
|
q7463
|
PtyProcess.write
|
train
|
def write(self, s):
"""Write the string ``s`` to the pseudoterminal.
Returns the number of bytes written.
"""
if not self.isalive():
raise EOFError('Pty is closed')
if PY2:
s = _unicode(s)
success, nbytes = self.pty.write(s)
if not success:
raise IOError('Write failed')
return nbytes
|
python
|
{
"resource": ""
}
|
q7464
|
PtyProcess.terminate
|
train
|
def terminate(self, force=False):
"""This forces a child process to terminate."""
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
|
python
|
{
"resource": ""
}
|
q7465
|
PtyProcess.setwinsize
|
train
|
def setwinsize(self, rows, cols):
"""Set the terminal window size of the child tty.
"""
self._winsize = (rows, cols)
self.pty.set_size(cols, rows)
|
python
|
{
"resource": ""
}
|
q7466
|
PTY.read
|
train
|
def read(self, length=1000, blocking=False):
"""
Read ``length`` bytes from current process output stream.
Note: This method is not fully non-blocking, however it
behaves like one.
"""
size_p = PLARGE_INTEGER(LARGE_INTEGER(0))
if not blocking:
windll.kernel32.GetFileSizeEx(self.conout_pipe, size_p)
size = size_p[0]
length = min(size, length)
data = ctypes.create_string_buffer(length)
if length > 0:
num_bytes = PLARGE_INTEGER(LARGE_INTEGER(0))
ReadFile(self.conout_pipe, data, length, num_bytes, None)
return data.value
|
python
|
{
"resource": ""
}
|
q7467
|
PTY.write
|
train
|
def write(self, data):
"""Write string data to current process input stream."""
data = data.encode('utf-8')
data_p = ctypes.create_string_buffer(data)
num_bytes = PLARGE_INTEGER(LARGE_INTEGER(0))
bytes_to_write = len(data)
success = WriteFile(self.conin_pipe, data_p,
bytes_to_write, num_bytes, None)
return success, num_bytes[0]
|
python
|
{
"resource": ""
}
|
q7468
|
PTY.close
|
train
|
def close(self):
"""Close all communication process streams."""
windll.kernel32.CloseHandle(self.conout_pipe)
windll.kernel32.CloseHandle(self.conin_pipe)
|
python
|
{
"resource": ""
}
|
q7469
|
PTY.iseof
|
train
|
def iseof(self):
"""Check if current process streams are still open."""
succ = windll.kernel32.PeekNamedPipe(
self.conout_pipe, None, None, None, None, None
)
return not bool(succ)
|
python
|
{
"resource": ""
}
|
q7470
|
IP.bin
|
train
|
def bin(self):
"""Full-length binary representation of the IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.bin())
01111111000000000000000000000001
"""
bits = self.v == 4 and 32 or 128
return bin(self.ip).split('b')[1].rjust(bits, '0')
|
python
|
{
"resource": ""
}
|
q7471
|
IP.info
|
train
|
def info(self):
"""Show IANA allocation information for the current IP address.
>>> ip = IP("127.0.0.1")
>>> print(ip.info())
LOOPBACK
"""
b = self.bin()
for i in range(len(b), 0, -1):
if b[:i] in self._range[self.v]:
return self._range[self.v][b[:i]]
return 'UNKNOWN'
|
python
|
{
"resource": ""
}
|
q7472
|
IP._dqtoi
|
train
|
def _dqtoi(self, dq):
"""Convert dotquad or hextet to long."""
# hex notation
if dq.startswith('0x'):
return self._dqtoi_hex(dq)
# IPv6
if ':' in dq:
return self._dqtoi_ipv6(dq)
elif len(dq) == 32:
# Assume full heximal notation
self.v = 6
return int(dq, 16)
# IPv4
if '.' in dq:
return self._dqtoi_ipv4(dq)
raise ValueError('Invalid address input')
|
python
|
{
"resource": ""
}
|
q7473
|
IP._itodq
|
train
|
def _itodq(self, n):
"""Convert long to dotquad or hextet."""
if self.v == 4:
return '.'.join(map(str, [
(n >> 24) & 0xff,
(n >> 16) & 0xff,
(n >> 8) & 0xff,
n & 0xff,
]))
else:
n = '%032x' % n
return ':'.join(n[4 * x:4 * x + 4] for x in range(0, 8))
|
python
|
{
"resource": ""
}
|
q7474
|
IP.to_compressed
|
train
|
def to_compressed(self):
"""
Compress an IP address to its shortest possible compressed form.
>>> print(IP('127.0.0.1').to_compressed())
127.1
>>> print(IP('127.1.0.1').to_compressed())
127.1.1
>>> print(IP('127.0.1.1').to_compressed())
127.0.1.1
>>> print(IP('2001:1234:0000:0000:0000:0000:0000:5678').to_compressed())
2001:1234::5678
>>> print(IP('1234:0000:0000:beef:0000:0000:0000:5678').to_compressed())
1234:0:0:beef::5678
>>> print(IP('0000:0000:0000:0000:0000:0000:0000:0001').to_compressed())
::1
>>> print(IP('fe80:0000:0000:0000:0000:0000:0000:0000').to_compressed())
fe80::
"""
if self.v == 4:
quads = self.dq.split('.')
try:
zero = quads.index('0')
if zero == 1 and quads.index('0', zero + 1):
quads.pop(zero)
quads.pop(zero)
return '.'.join(quads)
elif zero == 2:
quads.pop(zero)
return '.'.join(quads)
except ValueError: # No zeroes
pass
return self.dq
else:
quads = map(lambda q: '%x' % (int(q, 16)), self.dq.split(':'))
quadc = ':%s:' % (':'.join(quads),)
zeros = [0, -1]
# Find the largest group of zeros
for match in re.finditer(r'(:[:0]+)', quadc):
count = len(match.group(1)) - 1
if count > zeros[0]:
zeros = [count, match.start(1)]
count, where = zeros
if count:
quadc = quadc[:where] + ':' + quadc[where + count:]
quadc = re.sub(r'((^:)|(:$))', '', quadc)
quadc = re.sub(r'((^:)|(:$))', '::', quadc)
return quadc
|
python
|
{
"resource": ""
}
|
q7475
|
IP.from_bin
|
train
|
def from_bin(cls, value):
"""Initialize a new network from binary notation."""
value = value.lstrip('b')
if len(value) == 32:
return cls(int(value, 2))
elif len(value) == 128:
return cls(int(value, 2))
else:
return ValueError('%r: invalid binary notation' % (value,))
|
python
|
{
"resource": ""
}
|
q7476
|
IP.from_hex
|
train
|
def from_hex(cls, value):
"""Initialize a new network from hexadecimal notation."""
if len(value) == 8:
return cls(int(value, 16))
elif len(value) == 32:
return cls(int(value, 16))
else:
raise ValueError('%r: invalid hexadecimal notation' % (value,))
|
python
|
{
"resource": ""
}
|
q7477
|
IP.to_reverse
|
train
|
def to_reverse(self):
"""Convert the IP address to a PTR record.
Using the .in-addr.arpa zone for IPv4 and .ip6.arpa for IPv6 addresses.
>>> ip = IP('192.0.2.42')
>>> print(ip.to_reverse())
42.2.0.192.in-addr.arpa
>>> print(ip.to_ipv6().to_reverse())
0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.a.2.2.0.0.0.0.c.2.0.0.2.ip6.arpa
"""
if self.v == 4:
return '.'.join(list(self.dq.split('.')[::-1]) + ['in-addr', 'arpa'])
else:
return '.'.join(list(self.hex())[::-1] + ['ip6', 'arpa'])
|
python
|
{
"resource": ""
}
|
q7478
|
Network.netmask_long
|
train
|
def netmask_long(self):
"""
Network netmask derived from subnet size, as long.
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet.netmask_long())
4278190080
"""
if self.version() == 4:
return (MAX_IPV4 >> (32 - self.mask)) << (32 - self.mask)
else:
return (MAX_IPV6 >> (128 - self.mask)) << (128 - self.mask)
|
python
|
{
"resource": ""
}
|
q7479
|
Network.broadcast_long
|
train
|
def broadcast_long(self):
"""
Broadcast address, as long.
>>> localnet = Network('127.0.0.1/8')
>>> print(localnet.broadcast_long())
2147483647
"""
if self.version() == 4:
return self.network_long() | (MAX_IPV4 - self.netmask_long())
else:
return self.network_long() \
| (MAX_IPV6 - self.netmask_long())
|
python
|
{
"resource": ""
}
|
q7480
|
Network.host_first
|
train
|
def host_first(self):
"""First available host in this subnet."""
if (self.version() == 4 and self.mask > 30) or \
(self.version() == 6 and self.mask > 126):
return self
else:
return IP(self.network_long() + 1, version=self.version())
|
python
|
{
"resource": ""
}
|
q7481
|
Network.host_last
|
train
|
def host_last(self):
"""Last available host in this subnet."""
if (self.version() == 4 and self.mask == 32) or \
(self.version() == 6 and self.mask == 128):
return self
elif (self.version() == 4 and self.mask == 31) or \
(self.version() == 6 and self.mask == 127):
return IP(int(self) + 1, version=self.version())
else:
return IP(self.broadcast_long() - 1, version=self.version())
|
python
|
{
"resource": ""
}
|
q7482
|
Network.check_collision
|
train
|
def check_collision(self, other):
"""Check another network against the given network."""
other = Network(other)
return self.network_long() <= other.network_long() <= self.broadcast_long() or \
other.network_long() <= self.network_long() <= other.broadcast_long()
|
python
|
{
"resource": ""
}
|
q7483
|
CmsModelList.init_with_context
|
train
|
def init_with_context(self, context):
"""
Initialize the menu.
"""
# Apply the include/exclude patterns:
listitems = self._visible_models(context['request'])
# Convert to a similar data structure like the dashboard icons have.
# This allows sorting the items identically.
models = [
{'name': model._meta.model_name,
'app_name': model._meta.app_label,
'title': capfirst(model._meta.verbose_name_plural),
'url': self._get_admin_change_url(model, context)
}
for model, perms in listitems if self.is_item_visible(model, perms)
]
# Sort models.
sort_cms_models(models)
# Convert to items
for model in models:
self.children.append(items.MenuItem(title=model['title'], url=model['url']))
|
python
|
{
"resource": ""
}
|
q7484
|
ReturnToSiteItem.get_edited_object
|
train
|
def get_edited_object(self, request):
"""
Return the object which is currently being edited.
Returns ``None`` if the match could not be made.
"""
resolvermatch = urls.resolve(request.path_info)
if resolvermatch.namespace == 'admin' and resolvermatch.url_name and resolvermatch.url_name.endswith('_change'):
# In "appname_modelname_change" view of the admin.
# Extract the appname and model from the url name.
# For some custom views, url_name might not be filled in (e.g. django-polymorphic's subclass_view)
match = RE_CHANGE_URL.match(resolvermatch.url_name)
if not match:
return None
# object_id can be string (e.g. a country code as PK).
try:
object_id = resolvermatch.kwargs['object_id'] # Django 2.0+
except KeyError:
object_id = resolvermatch.args[0]
return self.get_object_by_natural_key(match.group(1), match.group(2), object_id)
return None
|
python
|
{
"resource": ""
}
|
q7485
|
get_application_groups
|
train
|
def get_application_groups():
"""
Return the applications of the system, organized in various groups.
These groups are not connected with the application names,
but rather with a pattern of applications.
"""
groups = []
for title, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS:
# Allow to pass all possible arguments to the DashboardModule class.
module_kwargs = groupdict.copy()
# However, the 'models' is treated special, to have catch-all support.
if '*' in groupdict['models']:
default_module = appsettings.FLUENT_DASHBOARD_DEFAULT_MODULE
module_kwargs['exclude'] = ALL_KNOWN_APPS + list(module_kwargs.get('exclude', []))
del module_kwargs['models']
else:
default_module = 'CmsAppIconList'
# Get module to display, can be a alias for known variations.
module = groupdict.get('module', default_module)
if module in MODULE_ALIASES:
module = MODULE_ALIASES[module]
module_kwargs['module'] = module
groups.append((title, module_kwargs),)
return groups
|
python
|
{
"resource": ""
}
|
q7486
|
is_cms_app
|
train
|
def is_cms_app(app_name):
"""
Return whether the given application is a CMS app
"""
for pat in appsettings.FLUENT_DASHBOARD_CMS_APP_NAMES:
if fnmatch(app_name, pat):
return True
return False
|
python
|
{
"resource": ""
}
|
q7487
|
get_cms_model_order
|
train
|
def get_cms_model_order(model_name):
"""
Return a numeric ordering for a model name.
"""
for (name, order) in iteritems(appsettings.FLUENT_DASHBOARD_CMS_MODEL_ORDER):
if name in model_name:
return order
return 999
|
python
|
{
"resource": ""
}
|
q7488
|
FluentMenu.init_with_context
|
train
|
def init_with_context(self, context):
"""
Initialize the menu items.
"""
site_name = get_admin_site_name(context)
self.children += [
items.MenuItem(_('Dashboard'), reverse('{0}:index'.format(site_name))),
items.Bookmarks(),
]
for title, kwargs in get_application_groups():
if kwargs.get('enabled', True):
self.children.append(CmsModelList(title, **kwargs))
self.children += [
ReturnToSiteItem()
]
|
python
|
{
"resource": ""
}
|
q7489
|
PersonalModule.init_with_context
|
train
|
def init_with_context(self, context):
"""
Initializes the link list.
"""
super(PersonalModule, self).init_with_context(context)
current_user = context['request'].user
if django.VERSION < (1, 5):
current_username = current_user.first_name or current_user.username
else:
current_username = current_user.get_short_name() or current_user.get_username()
site_name = get_admin_site_name(context)
# Personalize
self.title = _('Welcome,') + ' ' + (current_username)
# Expose links
self.pages_link = None
self.pages_title = None
self.password_link = reverse('{0}:password_change'.format(site_name))
self.logout_link = reverse('{0}:logout'.format(site_name))
if self.cms_page_model:
try:
app_label, model_name = self.cms_page_model
model = apps.get_model(app_label, model_name)
pages_title = model._meta.verbose_name_plural.lower()
pages_link = reverse('{site}:{app}_{model}_changelist'.format(site=site_name, app=app_label.lower(), model=model_name.lower()))
except AttributeError:
raise ImproperlyConfigured("The value {0} of FLUENT_DASHBOARD_CMS_PAGE_MODEL setting (or cms_page_model value) does not reffer to an existing model.".format(self.cms_page_model))
except NoReverseMatch:
pass
else:
# Also check if the user has permission to view the module.
# TODO: When there are modules that use Django 1.8's has_module_permission, add the support here.
permission_name = 'change_{0}'.format(model._meta.model_name.lower())
if current_user.has_perm('{0}.{1}'.format(model._meta.app_label, permission_name)):
self.pages_title = pages_title
self.pages_link = pages_link
|
python
|
{
"resource": ""
}
|
q7490
|
AppIconList.get_icon_url
|
train
|
def get_icon_url(self, icon):
"""
Replaces the "icon name" with a full usable URL.
* When the icon is an absolute URL, it is used as-is.
* When the icon contains a slash, it is relative from the ``STATIC_URL``.
* Otherwise, it's relative to the theme url folder.
"""
if not icon.startswith('/') \
and not icon.startswith('http://') \
and not icon.startswith('https://'):
if '/' in icon:
return self.icon_static_root + icon
else:
return self.icon_theme_root + icon
else:
return icon
|
python
|
{
"resource": ""
}
|
q7491
|
CacheStatusGroup.init_with_context
|
train
|
def init_with_context(self, context):
"""
Initializes the status list.
"""
super(CacheStatusGroup, self).init_with_context(context)
if 'dashboardmods' in settings.INSTALLED_APPS:
import dashboardmods
memcache_mods = dashboardmods.get_memcache_dash_modules()
try:
varnish_mods = dashboardmods.get_varnish_dash_modules()
except (socket.error, KeyError) as e:
# dashboardmods 2.2 throws KeyError for 'cache_misses' when the Varnish cache is empty.
# Socket errors are also ignored, to work similar to the memcache stats.
logger.exception("Unable to request Varnish stats: {0}".format(str(e)))
varnish_mods = []
except ImportError:
varnish_mods = []
self.children = memcache_mods + varnish_mods
|
python
|
{
"resource": ""
}
|
q7492
|
get_requirements
|
train
|
def get_requirements():
"""Get the dependencies."""
with open("requirements/project.txt") as f:
requirements = []
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
requirements.append(line)
return requirements
|
python
|
{
"resource": ""
}
|
q7493
|
_cached_css_compile
|
train
|
def _cached_css_compile(pattern, namespaces, custom, flags):
"""Cached CSS compile."""
custom_selectors = process_custom(custom)
return cm.SoupSieve(
pattern,
CSSParser(pattern, custom=custom_selectors, flags=flags).process_selectors(),
namespaces,
custom,
flags
)
|
python
|
{
"resource": ""
}
|
q7494
|
process_custom
|
train
|
def process_custom(custom):
"""Process custom."""
custom_selectors = {}
if custom is not None:
for key, value in custom.items():
name = util.lower(key)
if RE_CUSTOM.match(name) is None:
raise SelectorSyntaxError("The name '{}' is not a valid custom pseudo-class name".format(name))
if name in custom_selectors:
raise KeyError("The custom selector '{}' has already been registered".format(name))
custom_selectors[css_unescape(name)] = value
return custom_selectors
|
python
|
{
"resource": ""
}
|
q7495
|
css_unescape
|
train
|
def css_unescape(content, string=False):
"""
Unescape CSS value.
Strings allow for spanning the value on multiple strings by escaping a new line.
"""
def replace(m):
"""Replace with the appropriate substitute."""
if m.group(1):
codepoint = int(m.group(1)[1:], 16)
if codepoint == 0:
codepoint = UNICODE_REPLACEMENT_CHAR
value = util.uchr(codepoint)
elif m.group(2):
value = m.group(2)[1:]
elif m.group(3):
value = '\ufffd'
else:
value = ''
return value
return (RE_CSS_ESC if not string else RE_CSS_STR_ESC).sub(replace, content)
|
python
|
{
"resource": ""
}
|
q7496
|
escape
|
train
|
def escape(ident):
"""Escape identifier."""
string = []
length = len(ident)
start_dash = length > 0 and ident[0] == '-'
if length == 1 and start_dash:
# Need to escape identifier that is a single `-` with no other characters
string.append('\\{}'.format(ident))
else:
for index, c in enumerate(ident):
codepoint = util.uord(c)
if codepoint == 0x00:
string.append('\ufffd')
elif (0x01 <= codepoint <= 0x1F) or codepoint == 0x7F:
string.append('\\{:x} '.format(codepoint))
elif (index == 0 or (start_dash and index == 1)) and (0x30 <= codepoint <= 0x39):
string.append('\\{:x} '.format(codepoint))
elif (
codepoint in (0x2D, 0x5F) or codepoint >= 0x80 or (0x30 <= codepoint <= 0x39) or
(0x30 <= codepoint <= 0x39) or (0x41 <= codepoint <= 0x5A) or (0x61 <= codepoint <= 0x7A)
):
string.append(c)
else:
string.append('\\{}'.format(c))
return ''.join(string)
|
python
|
{
"resource": ""
}
|
q7497
|
SpecialPseudoPattern.match
|
train
|
def match(self, selector, index):
"""Match the selector."""
pseudo = None
m = self.re_pseudo_name.match(selector, index)
if m:
name = util.lower(css_unescape(m.group('name')))
pattern = self.patterns.get(name)
if pattern:
pseudo = pattern.match(selector, index)
if pseudo:
self.matched_name = pattern
return pseudo
|
python
|
{
"resource": ""
}
|
q7498
|
_Selector._freeze_relations
|
train
|
def _freeze_relations(self, relations):
"""Freeze relation."""
if relations:
sel = relations[0]
sel.relations.extend(relations[1:])
return ct.SelectorList([sel.freeze()])
else:
return ct.SelectorList()
|
python
|
{
"resource": ""
}
|
q7499
|
_Selector.freeze
|
train
|
def freeze(self):
"""Freeze self."""
if self.no_match:
return ct.SelectorNull()
else:
return ct.Selector(
self.tag,
tuple(self.ids),
tuple(self.classes),
tuple(self.attributes),
tuple(self.nth),
tuple(self.selectors),
self._freeze_relations(self.relations),
self.rel_type,
tuple(self.contains),
tuple(self.lang),
self.flags
)
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.