code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
if self.world_coords is None: log.warning("solving for Assembly without world coordinates set: %r", self) for (component, world_coords) in solver(self.constraints, self.world_coords): component.world_coords = world_coords
def solve(self)
Run the solver and assign the solution's :class:`CoordSystem` instances as the corresponding part's world coordinates.
10.192736
6.960108
1.464451
# verify returned type from user-defined function if not isinstance(components, dict): raise ValueError( "invalid type returned by make_components(): %r (must be a dict)" % components ) # check types for (name, component) pairs in dict for (name, component) in components.items(): # name is a string if not isinstance(name, str): raise ValueError(( "invalid name from make_components(): (%r, %r) " "(must be a (str, Component))" ) % (name, component)) # component is a Component instance if not isinstance(component, Component): raise ValueError(( "invalid component type from make_components(): (%r, %r) " "(must be a (str, Component))" ) % (name, component)) # check component name validity invalid_chars = set(name) - VALID_NAME_CHARS if invalid_chars: raise ValueError( "component name {!r} invalid; cannot include {!r}".format( name, invalid_chars ) )
def verify_components(components)
Verify values returned from :meth:`make_components`. Used internally during the :meth:`build` process. :param components: value returned from :meth:`make_components` :type components: :class:`dict` :raises ValueError: if verification fails
2.9538
2.919541
1.011734
# verify return is a list if not isinstance(constraints, list): raise ValueError( "invalid type returned by make_constraints: %r (must be a list)" % constraints ) # verify each list element is a Constraint instance for constraint in constraints: if not isinstance(constraint, Constraint): raise ValueError( "invalid constraint type: %r (must be a Constriant)" % constraint )
def verify_constraints(constraints)
Verify values returned from :meth:`make_constraints`. Used internally during the :meth:`build` process. :param constraints: value returned from :meth:`make_constraints` :type constraints: :class:`list` :raises ValueError: if verification fails
3.739749
3.645635
1.025816
# initialize values self._components = {} self._constraints = [] def genwrap(obj, name, iter_type=None): # Force obj to act like a generator. # this wrapper will always yield at least once. if isinstance(obj, GeneratorType): for i in obj: if (iter_type is not None) and (not isinstance(i, iter_type)): raise TypeError("%s must yield a %r" % (name, iter_type)) yield i else: if (iter_type is not None) and (not isinstance(obj, iter_type)): raise TypeError("%s must return a %r" % (name, iter_type)) yield obj # Make Components components_iter = genwrap(self.make_components(), "make_components", dict) new_components = next(components_iter) self.verify_components(new_components) self._components.update(new_components) # Make Constraints constraints_iter = genwrap(self.make_constraints(), "make_components", list) new_constraints = next(constraints_iter) self.verify_constraints(new_constraints) self._constraints += new_constraints # Run solver : sets components' world coordinates self.solve() # Make Alterations alterations_iter = genwrap(self.make_alterations(), "make_alterations") next(alterations_iter) # return value is ignored while True: (s1, s2, s3) = (True, True, True) # stages # Make Components new_components = None try: new_components = next(components_iter) self.verify_components(new_components) self._components.update(new_components) except StopIteration: s1 = False # Make Constraints new_constraints = None try: new_constraints = next(constraints_iter) self.verify_constraints(new_constraints) self._constraints += new_constraints except StopIteration: s2 = False # Run solver : sets components' world coordinates if new_components or new_constraints: self.solve() # Make Alterations try: next(alterations_iter) # return value is ignored except StopIteration: s3 = False # end loop when all iters are finished if not any((s1, s2, s3)): break if recursive: for (name, component) in self._components.items(): component.build(recursive=recursive)
def build(self, recursive=True)
Building an assembly buffers the :meth:`components` and :meth:`constraints`. Running ``build()`` is optional, it's automatically run when requesting :meth:`components` or :meth:`constraints`. Mostly it's used to test that there aren't any critical runtime issues with its construction, but doing anything like *displaying* or *exporting* will ultimately run a build anyway. :param recursive: if set, iterates through child components and builds those as well. :type recursive: :class:`bool`
2.546577
2.506175
1.016121
if isinstance(keys, six.string_types): keys = re.split(r'[\.-]+', keys) if _index >= len(keys): return self key = keys[_index] if key in self.components: component = self.components[key] if isinstance(component, Assembly): return component.find(keys, _index=(_index + 1)) elif _index == len(keys) - 1: # this is the last search key; component is a leaf, return it return component else: raise AssemblyFindError( "could not find '%s' (invalid type at [%i]: %r)" % ( '.'.join(keys), _index, component ) ) else: raise AssemblyFindError( "could not find '%s', '%s' is not a component of %r" % ( '.'.join(keys), key, self ) )
def find(self, keys, _index=0)
:param keys: key path. ``'a.b'`` is equivalent to ``['a', 'b']`` :type keys: :class:`str` or :class:`list` Find a nested :class:`Component` by a "`.`" separated list of names. for example:: >>> motor.find('bearing.outer_ring') would return the Part instance of the motor bearing's outer ring. :: >>> bearing = motor.find('bearing') >>> ring = bearing.find('inner_ring') # equivalent of 'bearing.inner_ring' the above code does much the same thing, ``bearing`` is an :class:`Assembly`, and ``ring`` is a :class:`Part`. .. note:: For a key path of ``a.b.c`` the ``c`` key can referernce any :class:`Component` type. Everything prior (in this case ``a`` and ``b``) must reference an :class:`Assembly`.
3.095485
2.996077
1.033179
u # unicode characters c_t = u'\u251c' c_l = u'\u2514' c_dash = u'\u2500' c_o = u'\u25cb' c_span = u'\u2502' output = u'' if not _depth: # first line output = prefix if name: output += (name + u': ') if add_repr else name if add_repr or not name: output += repr(self) output += '\n' # build tree for (is_last, (name, component)) in indicate_last(sorted(self.components.items(), key=lambda x: x[0])): branch_chr = c_l if is_last else c_t if isinstance(component, Assembly): # Assembly: also list nested components output += prefix + ' ' + branch_chr + c_dash + u' ' + name if add_repr: output += ': ' + repr(component) output += '\n' output += component.tree_str( prefix=(prefix + (u' ' if is_last else (u' ' + c_span + ' '))), add_repr=add_repr, _depth=_depth + 1, ) else: # Part (assumed): leaf node output += prefix + ' ' + branch_chr + c_o + u' ' + name if add_repr: output += ': ' + repr(component) output += '\n' return output
def tree_str(self, name=None, prefix='', add_repr=False, _depth=0)
u""" Return string listing recursively the assembly hierarchy :param name: if set, names the tree's trunk, otherwise the object's :meth:`repr` names the tree :type name: :class:`str` :param prefix: string prefixed to each line, can be used to indent :type prefix: :class:`str` :param add_repr: if set, *component* :meth:`repr` is put after their names :type add_repr: :class:`bool` :return: Printable string of an assembly's component hierarchy. :rtype: :class:`str` Example output from `block_tree.py <https://github.com/fragmuffin/cqparts/blob/master/tests/manual/block_tree.py>`_ :: >>> log = logging.getLogger(__name__) >>> isinstance(block_tree, Assembly) True >>> log.info(block_tree.tree_str(name="block_tree")) block_tree \u251c\u25cb branch_lb \u251c\u25cb branch_ls \u251c\u2500 branch_r \u2502 \u251c\u25cb L \u2502 \u251c\u25cb R \u2502 \u251c\u25cb branch \u2502 \u251c\u2500 house \u2502 \u2502 \u251c\u25cb bar \u2502 \u2502 \u2514\u25cb foo \u2502 \u2514\u25cb split \u251c\u25cb trunk \u2514\u25cb trunk_split Where: * ``\u2500`` denotes an :class:`Assembly`, and * ``\u25cb`` denotes a :class:`Part`
3.390485
3.336873
1.016067
return os.path.join( os.path.relpath(os.path.join(*path_list), os.getcwd()), filename )
def _relative_path_to(path_list, filename)
Get a neat relative path to files relative to the CWD
2.604459
2.333533
1.116101
# parameters period_arc = (2 * pi) / self.tooth_count tooth_arc = period_arc * self.spacing_ratio # the arc between faces at effective_radius outer_radius = self.effective_radius + (self.tooth_height / 2) face_angle_rad = radians(self.face_angle) # cartesian isosceles trapezoid dimensions side_angle = face_angle_rad - (tooth_arc / 2) side_tangent_radius = sin(face_angle_rad) * self.effective_radius extra_side_angle = side_angle + acos(side_tangent_radius / outer_radius) tooth = cadquery.Workplane('XY', origin=(0, 0, -self.width / 2)) \ .moveTo( side_tangent_radius * cos(side_angle), side_tangent_radius * sin(side_angle) ) opposite_point = ( -side_tangent_radius * cos(side_angle), side_tangent_radius * sin(side_angle) ) if self.face_angle: tooth = tooth.lineTo(*opposite_point) #tooth = tooth.threePointArc( # (0, -side_tangent_radius), # opposite_point #) tooth = tooth.lineTo( -cos(extra_side_angle) * outer_radius, sin(extra_side_angle) * outer_radius ) opposite_point = ( cos(extra_side_angle) * outer_radius, sin(extra_side_angle) * outer_radius ) if self.flat_top: tooth = tooth.lineTo(*opposite_point) else: tooth = tooth.threePointArc((0, outer_radius), opposite_point) tooth = tooth.close().extrude(self.width) return tooth
def _make_tooth_template(self)
Builds a single tooth including the cylinder with tooth faces tangential to its circumference.
2.96726
2.869408
1.034102
return Mate(self, CoordSystem.from_plane( cadquery.Plane( origin=(0, 0, self.width / 2), xDir=(1, 0, 0), normal=(0, 0, 1), ).rotated((0, 0, angle)) # rotate about z-axis ))
def get_mate_center(self, angle=0)
Mate at ring's center rotated ``angle`` degrees. :param angle: rotation around z-axis (unit: deg) :type angle: :class:`float` :return: mate in ring's center rotated about z-axis :rtype: :class:`Mate <cqparts.constraint.Mate>`
5.012469
4.36501
1.148329
min_arc = asin(((ball_diam + min_gap) / 2) / rolling_radius) * 2 return int((2 * pi) / min_arc)
def get_max_ballcount(cls, ball_diam, rolling_radius, min_gap=0.)
The maximum number of balls given ``rolling_radius`` and ``ball_diam`` :param min_gap: minimum gap between balls (measured along vector between spherical centers) :type min_gap: :class:`float` :return: maximum ball count :rtype: :class:`int`
5.356009
6.482172
0.826268
def inner(cls): global display_environments assert issubclass(cls, DisplayEnvironment), "can only map DisplayEnvironment classes" # Add class to it's local globals() so constructor can reference # its own type add_to = kwargs.pop('add_to', {}) add_to[cls.__name__] = cls # Create display environment disp_env = cls(**kwargs) # is already mappped? try: i = display_environments.index(disp_env) # raises ValueError # report duplicate raise RuntimeError( ("environment %r already mapped, " % display_environments[i]) + ("can't map duplicate %r" % disp_env) ) except ValueError: pass # as expected # map class display_environments = sorted(display_environments + [disp_env]) return cls return inner
def map_environment(**kwargs)
Decorator to map a DisplayEnvironment for displaying components. The decorated environment will be chosen if its condition is ``True``, and its order is the smallest. :param add_to: if set to ``globals()``, display environment's constructor may reference its own type. :type add_to: :class:`dict` Any additional named parameters will be passed to the constructor of the decorated DisplayEnvironment. See :class:`DisplayEnvironment` for example usage. **NameError on importing** The following code:: @map_environment( name='abc', order=10, condition=lambda: True, ) class SomeDisplayEnv(DisplayEnvironment): def __init__(self, *args, **kwargs): super(SomeDisplayEnv, self).__init__(*args, **kwargs) Will raise the Exception:: NameError: global name 'SomeDisplayEnv' is not defined Because this ``map_environment`` decorator attempts to instantiate this class before it's returned to populate the ``global()`` dict. To cicrumvent this problem, set ``add_to`` to ``globals()``:: @map_environment( name='abc', order=10, condition=lambda: True, add_to=globals(), ) class SomeDisplayEnv(DisplayEnvironment): ... as above
6.640506
5.555499
1.195303
if type(self) is DisplayEnvironment: raise RuntimeError( ("%r is not a functional display environment, " % (type(self))) + "it's meant to be inherited by an implemented environment" ) raise NotImplementedError( "display_callback function not overridden by %r" % (type(self)) )
def display_callback(self, component, **kwargs)
Display given component in this environment. .. note:: To be overridden by inheriting classes An example of a introducing a custom display environment. .. doctest:: import cqparts from cqparts.display.environment import DisplayEnvironment, map_environment def is_text_env(): # function that returns True if it's run in the # desired environment. import sys # Python 2.x if sys.version_info[0] == 2: return isinstance(sys.stdout, file) # Python 3.x import io return isinstance(sys.stdout, io.TextIOWrapper) @map_environment( name="text", order=0, # force display to be first priority condition=is_text_env, ) class TextDisplay(DisplayEnvironment): def display_callback(self, component, **kwargs): # Print component details to STDOUT if isinstance(component, cqparts.Assembly): sys.stdout.write(component.tree_str(add_repr=True)) else: # assumed to be a cqparts.Part sys.stdout.write("%r\\n" % (component)) ``is_text_env()`` checks if there's a valid ``sys.stdout`` to write to, ``TextDisplay`` defines how to display any given component, and the ``@map_environment`` decorator adds the display paired with its environment test function. When using :meth:`display() <cqparts.display.display>`, this display will be used if ``is_text_env()`` returns ``True``, and no previously mapped environment with a smaller ``order`` tested ``True``: .. doctest:: # create component to display from cqparts_misc.basic.primatives import Cube cube = Cube() # display component from cqparts.display import display display(cube) The ``display_callback`` will be called via :meth:`display() <DisplayEnvironment.display>`. So to call this display method directly: .. doctest:: TextDisplay().display(cube) :raises: NotImplementedError if not overridden
7.60907
6.642797
1.145462
self.vert_data.write( struct.pack('<f', x) + struct.pack('<f', y) + struct.pack('<f', z) ) # retain min/max values self.vert_min = _list3_min(self.vert_min, (x, y, z)) self.vert_max = _list3_max(self.vert_max, (x, y, z))
def add_vertex(self, x, y, z)
Add a ``VEC3`` of ``floats`` to the ``vert_data`` buffer
2.840235
2.613825
1.08662
self.idx_data.write( struct.pack(self.idx_fmt, i) + struct.pack(self.idx_fmt, j) + struct.pack(self.idx_fmt, k) )
def add_poly_index(self, i, j, k)
Add 3 ``SCALAR`` of ``uint`` to the ``idx_data`` buffer.
2.783998
2.045841
1.360809
streams = ( self.vert_data, self.idx_data, ) # Chain streams seamlessly for stream in streams: stream.seek(0) while True: chunk = stream.read(block_size) if chunk: yield chunk else: break
def buffer_iter(self, block_size=1024)
Iterate through chunks of the vertices, and indices buffers seamlessly. .. note:: To see a usage example, look at the :class:`ShapeBuffer` description.
4.992037
4.206048
1.186871
buffer = BytesIO() for chunk in self.buffer_iter(): log.debug('buffer.write(%r)', chunk) buffer.write(chunk) buffer.seek(0) return buffer.read()
def read(self)
Read buffer out as a single stream. .. warning:: Avoid using this function! **Why?** This is a *convenience* function; it doesn't encourage good memory management. All memory required for a mesh is duplicated, and returned as a single :class:`str`. So at best, using this function will double the memory required for a single model. **Instead:** Wherever possible, please use :meth:`buffer_iter`.
4.349849
4.2964
1.01244
if self.component is None: # no component, world == local return copy(self.local_coords) else: cmp_origin = self.component.world_coords if cmp_origin is None: raise ValueError( "mate's component does not have world coordinates; " "cannot get mate's world coordinates" ) return cmp_origin + self.local_coords
def world_coords(self)
:return: world coordinates of mate. :rtype: :class:`CoordSystem <cqparts.utils.geometry.CoordSystem>` :raises ValueError: if ``.component`` does not have valid world coordinates. If ``.component`` is ``None``, then the ``.local_coords`` are returned.
5.611469
3.94015
1.424177
complex_obj = self.make() bb = complex_obj.findSolid().BoundingBox() simple_obj = cadquery.Workplane('XY', origin=(bb.xmin, bb.ymin, bb.zmin)) \ .box(bb.xlen, bb.ylen, bb.zlen, centered=(False, False, False)) return simple_obj
def make_simple(self)
Create and return *simplified* solid part. The simplified representation of a ``Part`` is to lower the export quality of an ``Assembly`` or ``Part`` for rendering. Overriding this is optional, but highly recommended. The default behaviour returns the full complexity object's bounding box. But to do this, theh full complexity object must be generated first. There are 2 main problems with this: #. building the full complexity part is not efficient. #. a bounding box may not be a good representation of the part. **Bolts** A good example of this is a bolt. * building a bolt's thread is not a trivial task; it can take some time to generate. * a box is not a good visual representation of a bolt So for the ``Fastener`` parts, all ``make_simple`` methods are overridden to provide 2 cylinders, one for the bolt's head, and another for the thread.
4.238244
3.748719
1.130585
if self._local_obj is None: # Simplified or Complex if self._simple: value = self.make_simple() else: value = self.make() # Verify type if not isinstance(value, cadquery.CQ): raise MakeError("invalid object type returned by make(): %r" % value) # Buffer object self._local_obj = value return self._local_obj
def local_obj(self)
Buffered result of :meth:`make` which is (probably) a :class:`cadquery.Workplane` instance. If ``_simple`` is ``True``, then :meth:`make_simple` is returned instead. .. note:: This is usually the correct way to get your part's object for rendering, exporting, or measuring. Only call :meth:`cqparts.Part.make` directly if you explicitly intend to re-generate the model from scratch, then dispose of it.
6.342872
4.6636
1.360081
if self._world_obj is None: local_obj = self.local_obj world_coords = self.world_coords if (local_obj is not None) and (world_coords is not None): # Copy local object, apply transform to move to its new home. self._world_obj = world_coords + local_obj return self._world_obj
def world_obj(self)
The :meth:`local_obj <local_obj>` object in the :meth:`world_coords <Component.world_coords>` coordinate system. .. note:: This is automatically generated when called, and :meth:`world_coords <Component.world_coords>` is not ``Null``.
4.167368
3.663378
1.137575
if self.world_coords: return self.world_obj.findSolid().BoundingBox() return self.local_obj.findSolid().BoundingBox()
def bounding_box(self)
Generate a bounding box based on the full complexity part. :return: bounding box of part :rtype: cadquery.BoundBox
10.194729
7.265739
1.403123
# head obj = self.head.make_cutter() # neck if self.neck_length: # neck cut diameter (if thread is larger than the neck, thread must fit through) (inner_radius, outer_radius) = self.thread.get_radii() neck_cut_radius = max(outer_radius, self.neck_diam / 2) neck = cadquery.Workplane( 'XY', origin=(0, 0, -self.neck_length) ).circle(neck_cut_radius).extrude(self.neck_length) obj = obj.union(neck) # thread (pilot hole) pilot_hole = self.thread.make_pilothole_cutter() \ .translate((0, 0, -self.length)) obj = obj.union(pilot_hole) return obj
def make_cutter(self)
Makes a shape to be used as a negative; it can be cut away from other shapes to make a perfectly shaped pocket for this part. For example, for a countersunk screw with a neck, the following cutter would be generated. .. image:: /_static/img/fastenerpart/male.cutter.png If the head were an externally driven shape (like a hex bolt), then the cutter's head would be wide enough to accommodate a tool to fasten it.
3.902965
4.023652
0.970006
" get the cutout for the shaft" return cq.Workplane('XY', origin=(0, 0, 0)) \ .circle((self.diam / 2) + clearance) \ .extrude(10)
def get_cutout(self, clearance=0)
get the cutout for the shaft
5.472136
4.494912
1.217407
" connect to the bottom of the cup" return Mate(self, CoordSystem(\ origin=(0, 0, -self.height),\ xDir=(1, 0, 0),\ normal=(0, 0, 1)))
def mate_bottom(self)
connect to the bottom of the cup
6.552974
5.378432
1.21838
def decorator(cls): base_class = Parameter if nullable else NonNullParameter return type(cls.__name__, (base_class,), { # Preserve text for documentation '__name__': cls.__name__, '__doc__': cls.__doc__, '__module__': cls.__module__, # Sphinx doc type string '_doc_type': ":class:`{class_name} <{module}.{class_name}>`".format( class_name=cls.__name__, module=__name__ ), # 'type': lambda self, value: cls(**value) }) return decorator
def as_parameter(nullable=True, strict=True)
Decorate a container class as a functional :class:`Parameter` class for a :class:`ParametricObject`. :param nullable: if set, parameter's value may be Null :type nullable: :class:`bool` .. doctest:: >>> from cqparts.params import as_parameter, ParametricObject >>> @as_parameter(nullable=True) ... class Stuff(object): ... def __init__(self, a=1, b=2, c=3): ... self.a = a ... self.b = b ... self.c = c ... @property ... def abc(self): ... return (self.a, self.b, self.c) >>> class Thing(ParametricObject): ... foo = Stuff({'a': 10, 'b': 100}, doc="controls stuff") >>> thing = Thing(foo={'a': 20}) >>> thing.foo.a 20 >>> thing.foo.abc (20, 2, 3)
4.934908
5.418645
0.910727
V2_PATH = 'api/' V3_PATH = 'api/s/' + self.site_id + '/' if(version == 'v2'): return V2_PATH if(version == 'v3'): return V3_PATH if(version == 'v4'): return V3_PATH if(version == 'v5'): return V3_PATH else: return V2_PATH
def _construct_api_path(self, version)
Returns valid base API path based on version given The base API path for the URL is different depending on UniFi server version. Default returns correct path for latest known stable working versions.
2.625477
2.574824
1.019672
js = json.dumps({'_sort': '-time', 'archived': False}) params = urllib.urlencode({'json': js}) return self._read(self.api_url + 'list/alarm', params)
def get_alerts_unarchived(self)
Return a list of Alerts unarchived.
8.496346
7.304046
1.163238
js = json.dumps( {'attrs': ["bytes", "num_sta", "time"], 'start': int(endtime - 86400) * 1000, 'end': int(endtime - 3600) * 1000}) params = urllib.urlencode({'json': js}) return self._read(self.api_url + 'stat/report/hourly.system', params)
def get_statistics_24h(self, endtime)
Return statistical data last 24h from time
5.6348
5.51478
1.021763
#Set test to 0 instead of NULL params = json.dumps({'_depth': 2, 'test': 0}) return self._read(self.api_url + 'stat/device', params)
def get_aps(self)
Return a list of all AP:s, with significant information about each.
17.3976
14.351721
1.212231
if not name: raise APIError('%s is not a valid name' % str(name)) for ap in self.get_aps(): if ap.get('state', 0) == 1 and ap.get('name', None) == name: self.restart_ap(ap['mac'])
def restart_ap_name(self, name)
Restart an access point (by name). Arguments: name -- the name address of the AP to restart.
3.703029
3.893657
0.951041
js = json.dumps({'cmd': 'archive-all-alarms'}) params = urllib.urlencode({'json': js}) answer = self._read(self.api_url + 'cmd/evtmgr', params)
def archive_all_alerts(self)
Archive all Alerts
8.685262
7.736373
1.122653
js = json.dumps({'cmd': 'backup'}) params = urllib.urlencode({'json': js}) answer = self._read(self.api_url + 'cmd/system', params) return answer[0].get('url')
def create_backup(self)
Ask controller to create a backup archive file, response contains the path to the backup file. Warning: This process puts significant load on the controller may render it partially unresponsive for other requests.
8.110043
6.35058
1.277055
download_path = self.create_backup() opener = self.opener.open(self.url + download_path) unifi_archive = opener.read() backupfile = open(target_file, 'w') backupfile.write(unifi_archive) backupfile.close()
def get_backup(self, target_file='unifi-backup.unf')
Get a backup archive from a controller. Arguments: target_file -- Filename or full path to download the backup archive to, should have .unf extension for restore.
3.715725
4.279233
0.868316
cmd = 'authorize-guest' js = {'mac': guest_mac, 'minutes': minutes} if up_bandwidth: js['up'] = up_bandwidth if down_bandwidth: js['down'] = down_bandwidth if byte_quota: js['bytes'] = byte_quota if ap_mac and self.version != 'v2': js['ap_mac'] = ap_mac return self._run_command(cmd, params=js)
def authorize_guest(self, guest_mac, minutes, up_bandwidth=None, down_bandwidth=None, byte_quota=None, ap_mac=None)
Authorize a guest based on his MAC address. Arguments: guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff minutes -- duration of the authorization in minutes up_bandwith -- up speed allowed in kbps (optional) down_bandwith -- down speed allowed in kbps (optional) byte_quota -- quantity of bytes allowed in MB (optional) ap_mac -- access point MAC address (UniFi >= 3.x) (optional)
2.329654
2.562781
0.909033
cmd = 'unauthorize-guest' js = {'mac': guest_mac} return self._run_command(cmd, params=js)
def unauthorize_guest(self, guest_mac)
Unauthorize a guest based on his MAC address. Arguments: guest_mac -- the guest MAC address : aa:bb:cc:dd:ee:ff
6.586578
7.85052
0.838999
# access credentials credentials = get_credentials(credentials) # auth for gspread gc = gspread.authorize(credentials) try: # if gfile is file_id gc.open_by_key(gfile).__repr__() gfile_id = gfile except: # else look for file_id in drive gfile_id = get_file_id(credentials, gfile) if gfile_id is None: raise RuntimeError( "Trying to open non-existent or inaccessible spreadsheet") wks = get_worksheet(gc, gfile_id, wks_name) if wks is None: raise RuntimeError( "Trying to open non-existent or inaccessible worksheet") raw_data = wks.get_all_values() if not raw_data: raise ValueError( 'Worksheet is empty or invalid.' ) start_row_int, start_col_int = gspread.utils.a1_to_rowcol(start_cell) rows, cols = np.shape(raw_data) if start_col_int > cols or (row_names and start_col_int + 1 > cols): raise RuntimeError( "Start col (%s) out of the table columns(%s)" % (start_col_int + row_names, cols)) if start_row_int > rows or (col_names and start_row_int + 1 > rows): raise RuntimeError( "Start row (%s) out of the table rows(%s)" % (start_row_int + col_names, rows)) raw_data = [row[start_col_int-1:] for row in raw_data[start_row_int-1:]] if row_names and col_names: row_names = [row[0] for row in raw_data[1:]] col_names = raw_data[0][1:] raw_data = [row[1:] for row in raw_data[1:]] elif row_names: row_names = [row[0] for row in raw_data] col_names = np.arange(len(raw_data[0]) - 1) raw_data = [row[1:] for row in raw_data] elif col_names: row_names = np.arange(len(raw_data) - 1) col_names = raw_data[0] raw_data = raw_data[1:] else: row_names = np.arange(len(raw_data)) col_names = np.arange(len(raw_data[0])) df = pd.DataFrame([pd.Series(row) for row in raw_data], index=row_names) df.columns = col_names return df
def download(gfile, wks_name=None, col_names=False, row_names=False, credentials=None, start_cell = 'A1')
Download Google Spreadsheet and convert it to Pandas DataFrame :param gfile: path to Google Spreadsheet or gspread ID :param wks_name: worksheet name :param col_names: assing top row to column names for Pandas DataFrame :param row_names: assing left column to row names for Pandas DataFrame :param credentials: provide own credentials :param start_cell: specify where to start capturing of the DataFrame; default is A1 :type gfile: str :type wks_name: str :type col_names: bool :type row_names: bool :type credentials: class 'oauth2client.client.OAuth2Credentials' :type start_cell: str :returns: Pandas DataFrame :rtype: class 'pandas.core.frame.DataFrame' :Example: >>> from df2gspread import gspread2df as g2d >>> df = g2d.download(gfile="1U-kSDyeD-...", col_names=True, row_names=True) >>> df col1 col2 field1 1 2 field2 3 4
2.063717
2.127438
0.970048
# if the utility was provided credentials just return those if credentials: if _is_valid_credentials(credentials): # auth for gspread return credentials else: print("Invalid credentials supplied. Will generate from default token.") token = refresh_token or DEFAULT_TOKEN dir_name = os.path.dirname(DEFAULT_TOKEN) try: os.makedirs(dir_name) except OSError: if not os.path.isdir(dir_name): raise store = file.Storage(token) credentials = store.get() try: import argparse flags = argparse.ArgumentParser( parents=[tools.argparser]).parse_known_args()[0] except ImportError: flags = None logr.error( 'Unable to parse oauth2client args; `pip install argparse`') if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets( client_secret_file, SCOPES) flow.redirect_uri = client.OOB_CALLBACK_URN if flags: credentials = tools.run_flow(flow, store, flags) else: # Needed only for compatability with Python 2.6 credentials = tools.run(flow, store) logr.info('Storing credentials to ' + DEFAULT_TOKEN) return credentials
def get_credentials(credentials=None, client_secret_file=CLIENT_SECRET_FILE, refresh_token=None)
Consistently returns valid credentials object. See Also: https://developers.google.com/drive/web/quickstart/python Args: client_secret_file (str): path to client secrets file, defaults to .gdrive_private refresh_token (str): path to a user provided refresh token that is already pre-authenticated credentials (`~oauth2client.client.OAuth2Credentials`, optional): handle direct input of credentials, which will check credentials for valid type and return them Returns: `~oauth2client.client.OAuth2Credentials`: google credentials object
3.183464
3.157486
1.008227
if private_key_file is not None: with open(os.path.expanduser(private_key_file)) as f: private_key = f.read() else: private_key = None if client_email is None: with open(os.path.expanduser(client_secret_file)) as client_file: client_data = json.load(client_file) if 'installed' in client_data: # handle regular json format where key is separate client_email = client_data['installed']['client_id'] if private_key is None: raise RuntimeError('You must have the private key file \ with the regular json file. Try creating a new \ public/private key pair and downloading as json.') else: # handle newer case where json file has everything in it client_email = client_data['client_email'] private_key = client_data['private_key'] if client_email is None or private_key is None: raise RuntimeError( 'Client email and/or private key not provided by inputs.') credentials = client.SignedJwtAssertionCredentials( client_email, private_key, SCOPES) return credentials
def create_service_credentials(private_key_file=None, client_email=None, client_secret_file=CLIENT_SECRET_FILE)
Create credentials from service account information. See Also: https://developers.google.com/api-client-library/python/auth/service-accounts Args: client_secret_file (str): path to json file with just the client_email when providing the `private_key_file` separately, or this file can have both the `client_email` and `private_key` contained in it. Defaults to .gdrive_private client_email (str): service email account private_key_file (str): path to the p12 private key, defaults to same name of file used for regular authentication Returns: `~oauth2client.client.OAuth2Credentials`: google credentials object
3.392282
3.377579
1.004353
# auth for apiclient http = credentials.authorize(Http()) service = discovery.build('drive', 'v3', http=http, cache_discovery=False) file_id = service.files().get(fileId='root', fields='id').execute().get('id') # folder/folder/folder/spreadsheet pathway = gfile.strip('/').split('/') for idx, name in enumerate(pathway): files = service.files().list( q="name = '{}' and trashed = false and '{}' in parents".format(name, file_id)).execute()['files'] if len(files) > 0: # Why do you ever need to use several folders with the same name?! file_id = files[0].get('id') elif write_access == True: body = { 'mimeType': 'application/vnd.google-apps.' + ('spreadsheet' if idx == len(pathway)-1 else 'folder'), 'name': name, 'parents': [file_id] } file_id = service.files().create(body=body, fields='id').execute().get('id') else: return None return file_id
def get_file_id(credentials, gfile, write_access=False)
Get file ID by provided path. If file does not exist and `write_access` is true, it will create whole path for you. :param credentials: provide own credentials :param gfile: path to Google Spreadsheet :param write_access: allows to create full path if file does not exist :type credentials: class 'oauth2client.client.OAuth2Credentials' :type gfile: str :type write_access: boolean :returns: file ID :rtype: str :Example: >>> from df2gspread.gfiles import get_file_id >>> from df2gspread.utils import get_credentials >>> gfile = '/some/folder/with/file' >>> credentials = get_credentials() >>> get_file_id(credentials=credentials, gfile=gfile, write_access=True) u'78asbcsSND8sdSACNsa7ggcasca8shscaSACVD'
2.735503
2.993113
0.913933
spsh = gc.open_by_key(gfile_id) # if worksheet name is not provided , take first worksheet if wks_name is None: wks = spsh.sheet1 # if worksheet name provided and exist in given spreadsheet else: try: wks = spsh.worksheet(wks_name) except: #rows, cols = new_sheet_dimensions wks = spsh.add_worksheet( wks_name, *new_sheet_dimensions) if write_access == True else None return wks
def get_worksheet(gc, gfile_id, wks_name, write_access=False, new_sheet_dimensions=(1000, 100))
DOCS...
3.183744
3.06051
1.040266
try: http = credentials.authorize(Http()) service = discovery.build( 'drive', 'v3', http=http, cache_discovery=False) service.files().delete(fileId=file_id).execute() except errors.HttpError as e: logr.error(e) raise
def delete_file(credentials, file_id)
DOCS...
2.414703
2.34062
1.031651
values = wks.get_all_values() if values: df_ = pd.DataFrame(index=range(len(values)), columns=range(len(values[0]))) df_ = df_.fillna('') wks = upload(df_, gfile_id, wks_name=wks_name, col_names=False, row_names=False, clean=False, credentials=credentials) return wks
def clean_worksheet(wks, gfile_id, wks_name, credentials)
DOCS...
3.56321
3.48072
1.023699
if isinstance(list_a, str): list_a = [list_a] if isinstance(list_b, str): list_b = [list_b] if isinstance(list_a, tuple): list_a = list(list_a) if isinstance(list_b, tuple): list_b = list(list_b) coco = CountryConverter(country_data, additional_data) name_dict_a = dict() match_dict_a = dict() for name_a in list_a: name_dict_a[name_a] = [] match_dict_a[name_a] = [] for regex in coco.regexes: if regex.search(name_a): match_dict_a[name_a].append(regex) if len(match_dict_a[name_a]) == 0: logging.warning('Could not identify {} in list_a'.format(name_a)) _not_found_entry = name_a if not not_found else not_found name_dict_a[name_a].append(_not_found_entry) if not enforce_sublist: name_dict_a[name_a] = name_dict_a[name_a][0] continue if len(match_dict_a[name_a]) > 1: logging.warning( 'Multiple matches for name {} in list_a'.format(name_a)) for match_case in match_dict_a[name_a]: b_matches = 0 for name_b in list_b: if match_case.search(name_b): b_matches += 1 name_dict_a[name_a].append(name_b) if b_matches == 0: logging.warning( 'Could not find any ' 'correspondence for {} in list_b'.format(name_a)) _not_found_entry = name_a if not not_found else not_found name_dict_a[name_a].append(_not_found_entry) if b_matches > 1: logging.warning('Multiple matches for ' 'name {} in list_b'.format(name_a)) if not enforce_sublist and (len(name_dict_a[name_a]) == 1): name_dict_a[name_a] = name_dict_a[name_a][0] return name_dict_a
def match(list_a, list_b, not_found='not_found', enforce_sublist=False, country_data=COUNTRY_DATA_FILE, additional_data=None)
Matches the country names given in two lists into a dictionary. This function matches names given in list_a to the one provided in list_b using regular expressions defined in country_data. Parameters ---------- list_a : list Names of countries to identify list_b : list Master list of names for countries not_found : str, optional Fill in value for not found entries. If None, keep the input value (default: 'not found') enforce_sublist : boolean, optional If True, all entries in both list are list. If False(default), only multiple matches are list, rest are strings country_data : Pandas DataFrame or path to data file (optional) This is by default set to COUNTRY_DATA_FILE - the standard (tested) country list for coco. additional_data: (list of) Pandas DataFrames or data files (optional) Additional data to include for a specific analysis. This must be given in the same format as specified in the country_data file. (utf-8 encoded tab separated data, same column headers in all files) Returns ------- dict: A dictionary with a key for every entry in list_a. The value correspond to the matching entry in list_b if found. If there is a 1:1 correspondence, the value is a str (if enforce_sublist is False), otherwise multiple entries as list.
1.777799
1.764279
1.007663
init = {'country_data': COUNTRY_DATA_FILE, 'additional_data': None, 'only_UNmember': False, 'include_obsolete': False} init.update({kk: kargs.get(kk) for kk in init.keys() if kk in kargs}) coco = CountryConverter(**init) kargs = {kk: ii for kk, ii in kargs.items() if kk not in init.keys()} return coco.convert(*args, **kargs)
def convert(*args, **kargs)
Wrapper around CountryConverter.convert() Uses the same parameters. This function has the same performance as CountryConverter.convert for one call; for multiple calls it is better to instantiate a common CountryConverter (this avoid loading the source data file multiple times). Note ---- A lot of the functionality can also be done directly in Pandas DataFrames. For example: cc = CountryConverter() names = ['USA', 'SWZ', 'PRI'] cc.data[cc.data['ISO3'].isin(names)][['ISO2', 'continent']] Parameters ---------- names : str or list like Countries in 'src' classification to convert to 'to' classification src : str, optional Source classification to : str, optional Output classification (valid str for an index of the country data file), default: name_short enforce_list : boolean, optional If True, enforces the output to be list (if only one name was passed) or to be a list of lists (if multiple names were passed). If False (default), the output will be a string (if only one name was passed) or a list of str and/or lists (str if a one to one matching, list otherwise). not_found : str, optional Fill in value for not found entries. If None, keep the input value (default: 'not found') country_data : Pandas DataFrame or path to data file (optional) This is by default set to COUNTRY_DATA_FILE - the standard (tested) country list for coco. additional_data: (list of) Pandas DataFrames or data files (optional) Additional data to include for a specific analysis. This must be given in the same format as specified in the country_data_file. (utf-8 encoded tab separated data, same column headers as in the general country data file) Returns ------- list or str, depending on enforce_list
5.376709
4.642571
1.158132
parser = argparse.ArgumentParser( description=('The country converter (coco): a Python package for ' 'converting country names between ' 'different classifications schemes. ' 'Version: {}'.format(__version__) ), prog='coco', usage=('%(prog)s --names --src --to]')) parser.add_argument('names', help=('List of countries to convert ' '(space separated, country names consisting of ' 'multiple words must be put in quotation marks).' 'Possible classifications: ' + ', '.join(valid_classifications) + '; NB: long, official and short are provided ' 'as shortcuts for the names classifications' ), nargs='*') parser.add_argument('-s', '--src', '--source', '-f', '--from', help=('Classification of the names given, ' '(default: inferred from names)')) parser.add_argument('-t', '--to', help=('Required classification of the passed names' '(default: "ISO3"')) parser.add_argument('-o', '--output_sep', help=('Seperator for output names ' '(default: space), e.g. "," ')) parser.add_argument('-n', '--not_found', default='not found', help=('Fill in value for none found entries. ' 'If "None" (string), keep the input value ' '(default: not found)')) parser.add_argument('-a', '--additional_data', help=('Data file with additional country data' '(Same format as the original data file - ' 'utf-8 encoded tab separated data, same ' 'column headers as in the general country ' 'data file; default: not found)')) args = parser.parse_args() args.src = args.src or None args.to = args.to or 'ISO3' args.not_found = args.not_found if args.not_found != 'None' else None args.output_sep = args.output_sep or ' ' return args
def _parse_arg(valid_classifications)
Command line parser for coco Parameters ---------- valid_classifications: list Available classifications, used for checking input parameters. Returns ------- args : ArgumentParser namespace
5.51321
5.412762
1.018558
args = _parse_arg(CountryConverter().valid_class) coco = CountryConverter(additional_data=args.additional_data) converted_names = coco.convert( names=args.names, src=args.src, to=args.to, enforce_list=False, not_found=args.not_found) print(args.output_sep.join( [str(etr) for etr in converted_names] if isinstance(converted_names, list) else [str(converted_names)]))
def main()
Main entry point - used for command line call
6.874198
6.427686
1.069467
excluder = re.compile('|'.join(exclude_prefix)) split_entries = excluder.split(name) return {'clean_name': split_entries[0], 'excluded_countries': split_entries[1:]}
def _separate_exclude_cases(name, exclude_prefix)
Splits the excluded Parameters ---------- name : str Name of the country/region to convert. exclude_prefix : list of valid regex strings List of indicators which negate the subsequent country/region. These prefixes and everything following will not be converted. E.g. 'Asia excluding China' becomes 'Asia' and 'China excluding Hong Kong' becomes 'China' prior to conversion Returns ------- dict with 'clean_name' : str as name without anything following exclude_prefix 'excluded_countries' : list list of excluded countries
5.848025
3.535189
1.654233
if type(to) is str: to = [to] return self.data[self.data.EU < 2015][to]
def EU28as(self, to='name_short')
Return EU28 countries in the specified classification Parameters ---------- to : str, optional Output classification (valid str for an index of country_data file), default: name_short Returns ------- Pandas DataFrame
6.027252
11.718089
0.514355
if isinstance(to, str): to = [to] return self.data[self.data.EU < 2013][to]
def EU27as(self, to='name_short')
Return EU27 countries in the specified classification Parameters ---------- to : str, optional Output classification (valid str for an index of country_data file), default: name_short Returns ------- Pandas DataFrame
6.208893
11.319423
0.548517
if isinstance(to, str): to = [to] return self.data[self.data.OECD > 0][to]
def OECDas(self, to='name_short')
Return OECD member states in the specified classification Parameters ---------- to : str, optional Output classification (valid str for an index of country_data file), default: name_short Returns ------- Pandas DataFrame
5.111752
8.596473
0.594634
if isinstance(to, str): to = [to] return self.data[self.data.UNmember > 0][to]
def UNas(self, to='name_short')
Return UN member states in the specified classification Parameters ---------- to : str, optional Output classification (valid str for an index of country_data file), default: name_short Returns ------- Pandas DataFrame
7.318176
9.562958
0.765263
if isinstance(to, str): to = [to] return self.data[self.data.obsolete > 0][to]
def obsoleteas(self, to='name_short')
Return obsolete countries in the specified classification Parameters ---------- to : str, optional Output classification (valid str for an index of country_data file), default: name_short Returns ------- Pandas DataFrame
4.435348
7.315449
0.606299
result = {nn: None for nn in self.data[classA].values} if restrict is None: df = self.data.copy() else: df = self.data[restrict].copy() if replace_numeric and df[classB].dtype.kind in 'bifc': df.loc[~df[classB].isnull(), classB] = classB df.loc[df[classB].isnull(), classB] = None result.update(df.groupby(classA) .aggregate(lambda x: list(x.unique())) .to_dict()[classB]) return result
def get_correspondance_dict(self, classA, classB, restrict=None, replace_numeric=True)
Returns a correspondance between classification A and B as dict Parameters ---------- classA: str Valid classification (column name of data) classB: str Valid classification (column name of data). restrict: boolean vector of size cc.data, optional where cc is the name of the CountryConverter instance. Used to restrict the data sheet if necessary. E.g. to convert to countries which were OECD members before 1970 use cc.get_correspondance_dict('ISO3', 'OECD', restrict=cc.data.OECD < 1970) replace_numeric: boolean, optional If True (default) replace numeric values with the column header. This can be used if get a correspondance to, for example, 'OECD' instead of to the OECD membership years. Set to False if the actual numbers are required (as for UNcode). Returns ------- dict with keys: based on classA items: list of correspoding entries in classB or None
2.917904
3.064189
0.95226
lower_case_valid_class = [et.lower() for et in self.valid_class] alt_valid_names = { 'name_short': ['short', 'short_name', 'name', 'names'], 'name_official': ['official', 'long_name', 'long'], 'UNcode': ['un', 'unnumeric'], 'ISOnumeric': ['isocode'], } for item in alt_valid_names.items(): if para.lower() in item[1]: para = item[0] try: validated_para = self.valid_class[ lower_case_valid_class.index(para.lower())] except ValueError: raise KeyError( '{} is not a valid country classification'.format(para)) return validated_para
def _validate_input_para(self, para, column_names)
Convert the input classificaton para to the correct df column name Parameters ---------- para : string column_names : list of strings Returns ------- validated_para : string Converted to the case used in the country file
5.004489
4.548271
1.100306
try: int(name) src_format = 'ISOnumeric' except ValueError: if len(name) == 2: src_format = 'ISO2' elif len(name) == 3: src_format = 'ISO3' else: src_format = 'regex' return src_format
def _get_input_format_from_name(self, name)
Determines the input format based on the given country name Parameters ---------- name : string Returns ------- string : valid input format
3.671144
3.390846
1.082663
coeff = binom(n, k) def _bpoly(x): return coeff * x ** k * (1 - x) ** (n - k) return _bpoly
def Bernstein(n, k)
Bernstein polynomial.
3.824974
3.835788
0.997181
at = np.asarray(at) at_flat = at.ravel() N = len(points) curve = np.zeros((at_flat.shape[0], 2)) for ii in range(N): curve += np.outer(Bernstein(N - 1, ii)(at_flat), points[ii]) return curve.reshape(at.shape + (2,))
def Bezier(points, at)
Build Bézier curve from points. Deprecated. CatmulClark builds nicer splines
3.058589
3.174301
0.963547
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(address) while 1: data = pty.read(4096, blocking=blocking) if not data and not pty.isalive(): while not data and not pty.iseof(): data += pty.read(4096, blocking=blocking) if not data: try: client.send(b'') except socket.error: pass break try: client.send(data) except socket.error: break client.close()
def _read_in_thread(address, pty, blocking)
Read data from the pty in a thread.
2.532738
2.5015
1.012488
if isinstance(argv, str): argv = shlex.split(argv, posix=False) if not isinstance(argv, (list, tuple)): raise TypeError("Expected a list or tuple for argv, got %r" % argv) # Shallow copy of argv so we can modify it argv = argv[:] command = argv[0] env = env or os.environ path = env.get('PATH', os.defpath) command_with_path = which(command, path=path) if command_with_path is None: raise FileNotFoundError( 'The command was not found or was not ' + 'executable: %s.' % command ) command = command_with_path argv[0] = command cmdline = ' ' + subprocess.list2cmdline(argv[1:]) cwd = cwd or os.getcwd() proc = PTY(dimensions[1], dimensions[0]) # Create the environemnt string. envStrs = [] for (key, value) in env.items(): envStrs.append('%s=%s' % (key, value)) env = '\0'.join(envStrs) + '\0' if PY2: command = _unicode(command) cwd = _unicode(cwd) cmdline = _unicode(cmdline) env = _unicode(env) if len(argv) == 1: proc.spawn(command, cwd=cwd, env=env) else: proc.spawn(command, cwd=cwd, env=env, cmdline=cmdline) inst = cls(proc) inst._winsize = dimensions # Set some informational attributes inst.argv = argv if env is not None: inst.env = env if cwd is not None: inst.launch_dir = cwd return inst
def spawn(cls, argv, cwd=None, env=None, dimensions=(24, 80))
Start the given command in a child process in a pseudo terminal. This does all the setting up the pty, and returns an instance of PtyProcess. Dimensions of the psuedoterminal used for the subprocess can be specified as a tuple (rows, cols), or the default (24, 80) will be used.
2.913357
2.910092
1.001122
if not self.closed: self.pty.close() self.fileobj.close() self._server.close() # Give kernel time to update process status. time.sleep(self.delayafterclose) if self.isalive(): if not self.terminate(force): raise IOError('Could not terminate the child.') self.fd = -1 self.closed = True del self.pty self.pty = None
def close(self, force=False)
This closes the connection with the child application. Note that calling close() more than once is valid. This emulates standard Python behavior with files. Set force to True if you want to make sure that the child is terminated (SIGKILL is sent if the child ignores SIGINT).
5.591215
5.138527
1.088097
data = self.fileobj.recv(size) if not data: self.flag_eof = True raise EOFError('Pty is closed') return self.decoder.decode(data, final=False)
def read(self, size=1024)
Read and return at most ``size`` characters from the pty. Can block if there is nothing to read. Raises :exc:`EOFError` if the terminal was closed.
6.416997
5.721834
1.121493
buf = [] while 1: try: ch = self.read(1) except EOFError: return ''.join(buf) buf.append(ch) if ch == '\n': return ''.join(buf)
def readline(self)
Read one line from the pseudoterminal as bytes. Can block if there is nothing to read. Raises :exc:`EOFError` if the terminal was closed.
2.443588
2.712216
0.900956
if not self.isalive(): raise EOFError('Pty is closed') if PY2: s = _unicode(s) success, nbytes = self.pty.write(s) if not success: raise IOError('Write failed') return nbytes
def write(self, s)
Write the string ``s`` to the pseudoterminal. Returns the number of bytes written.
5.799431
5.105132
1.136
if not self.isalive(): return True self.kill(signal.SIGINT) time.sleep(self.delayafterterminate) if not self.isalive(): return True if force: self.kill(signal.SIGKILL) time.sleep(self.delayafterterminate) if not self.isalive(): return True else: return False
def terminate(self, force=False)
This forces a child process to terminate.
2.293795
2.209177
1.038303
'''Helper method that wraps send() with mnemonic access for sending control character to the child (such as Ctrl-C or Ctrl-D). For example, to send Ctrl-G (ASCII 7, bell, '\a'):: child.sendcontrol('g') See also, sendintr() and sendeof(). ''' char = char.lower() a = ord(char) if 97 <= a <= 122: a = a - ord('a') + 1 byte = bytes([a]) return self.pty.write(byte.decode('utf-8')), byte d = {'@': 0, '`': 0, '[': 27, '{': 27, '\\': 28, '|': 28, ']': 29, '}': 29, '^': 30, '~': 30, '_': 31, '?': 127} if char not in d: return 0, b'' byte = bytes([d[char]]) return self.pty.write(byte.decode('utf-8')), byte
def sendcontrol(self, char)
Helper method that wraps send() with mnemonic access for sending control character to the child (such as Ctrl-C or Ctrl-D). For example, to send Ctrl-G (ASCII 7, bell, '\a'):: child.sendcontrol('g') See also, sendintr() and sendeof().
4.249848
2.208593
1.924233
self._winsize = (rows, cols) self.pty.set_size(cols, rows)
def setwinsize(self, rows, cols)
Set the terminal window size of the child tty.
6.159956
4.461937
1.380556
size_p = PLARGE_INTEGER(LARGE_INTEGER(0)) if not blocking: windll.kernel32.GetFileSizeEx(self.conout_pipe, size_p) size = size_p[0] length = min(size, length) data = ctypes.create_string_buffer(length) if length > 0: num_bytes = PLARGE_INTEGER(LARGE_INTEGER(0)) ReadFile(self.conout_pipe, data, length, num_bytes, None) return data.value
def read(self, length=1000, blocking=False)
Read ``length`` bytes from current process output stream. Note: This method is not fully non-blocking, however it behaves like one.
4.251016
4.322642
0.98343
data = data.encode('utf-8') data_p = ctypes.create_string_buffer(data) num_bytes = PLARGE_INTEGER(LARGE_INTEGER(0)) bytes_to_write = len(data) success = WriteFile(self.conin_pipe, data_p, bytes_to_write, num_bytes, None) return success, num_bytes[0]
def write(self, data)
Write string data to current process input stream.
5.712055
5.607478
1.01865
windll.kernel32.CloseHandle(self.conout_pipe) windll.kernel32.CloseHandle(self.conin_pipe)
def close(self)
Close all communication process streams.
4.273942
3.377825
1.265294
succ = windll.kernel32.PeekNamedPipe( self.conout_pipe, None, None, None, None, None ) return not bool(succ)
def iseof(self)
Check if current process streams are still open.
8.375601
6.604301
1.268204
try: url = "https://www.instagram.com/{}/".format(username) page = requests.get(url) # Raise error for 404 cause by a bad profile name page.raise_for_status() return html.fromstring(page.content) except HTTPError: logging.exception('user profile "{}" not found'.format(username)) except (ConnectionError, socket_error) as e: logging.exception("instagram.com unreachable")
def instagram_scrap_profile(username)
Scrap an instagram profile page :param username: :return:
4.1235
4.319949
0.954525
try: tree = instagram_scrap_profile(username) return tree.xpath('//script') except AttributeError: logging.exception("scripts not found") return None
def instagram_profile_js(username)
Retrieve the script tags from the parsed page. :param username: :return:
7.425647
7.897005
0.940312
scripts = instagram_profile_js(username) source = None if scripts: for script in scripts: if script.text: if script.text[0:SCRIPT_JSON_PREFIX] == "window._sharedData": source = script.text[SCRIPT_JSON_DATA_INDEX:-1] return source
def instagram_profile_json(username)
Get the JSON data string from the scripts. :param username: :return:
5.407798
5.595721
0.966417
try: edges = profile['entry_data']['ProfilePage'][page]['graphql']['user']['edge_owner_to_timeline_media']['edges'] return [edge['node'] for edge in edges] except KeyError: logging.exception("path to profile media not found")
def get_profile_media(profile, page = 0)
Parse a generated media object :param profile: :param page: :return:
3.641125
3.737483
0.974219
try: tagname, username = token.split_contents() return InstagramUserRecentMediaNode(username) except ValueError: raise template.TemplateSyntaxError( "%r tag requires a single argument" % token.contents.split()[0] )
def instagram_user_recent_media(parser, token)
Tag for getting data about recent media of an user. :param parser: :param token: :return:
1.988488
2.180016
0.912144
bits = self.v == 4 and 32 or 128 return bin(self.ip).split('b')[1].rjust(bits, '0')
def bin(self)
Full-length binary representation of the IP address. >>> ip = IP("127.0.0.1") >>> print(ip.bin()) 01111111000000000000000000000001
9.006375
8.05466
1.118157
b = self.bin() for i in range(len(b), 0, -1): if b[:i] in self._range[self.v]: return self._range[self.v][b[:i]] return 'UNKNOWN'
def info(self)
Show IANA allocation information for the current IP address. >>> ip = IP("127.0.0.1") >>> print(ip.info()) LOOPBACK
5.630705
6.35152
0.886513
# hex notation if dq.startswith('0x'): return self._dqtoi_hex(dq) # IPv6 if ':' in dq: return self._dqtoi_ipv6(dq) elif len(dq) == 32: # Assume full heximal notation self.v = 6 return int(dq, 16) # IPv4 if '.' in dq: return self._dqtoi_ipv4(dq) raise ValueError('Invalid address input')
def _dqtoi(self, dq)
Convert dotquad or hextet to long.
4.452216
4.257342
1.045774
if self.v == 4: return '.'.join(map(str, [ (n >> 24) & 0xff, (n >> 16) & 0xff, (n >> 8) & 0xff, n & 0xff, ])) else: n = '%032x' % n return ':'.join(n[4 * x:4 * x + 4] for x in range(0, 8))
def _itodq(self, n)
Convert long to dotquad or hextet.
2.643075
2.362269
1.118871
if self.v == 4: quads = self.dq.split('.') try: zero = quads.index('0') if zero == 1 and quads.index('0', zero + 1): quads.pop(zero) quads.pop(zero) return '.'.join(quads) elif zero == 2: quads.pop(zero) return '.'.join(quads) except ValueError: # No zeroes pass return self.dq else: quads = map(lambda q: '%x' % (int(q, 16)), self.dq.split(':')) quadc = ':%s:' % (':'.join(quads),) zeros = [0, -1] # Find the largest group of zeros for match in re.finditer(r'(:[:0]+)', quadc): count = len(match.group(1)) - 1 if count > zeros[0]: zeros = [count, match.start(1)] count, where = zeros if count: quadc = quadc[:where] + ':' + quadc[where + count:] quadc = re.sub(r'((^:)|(:$))', '', quadc) quadc = re.sub(r'((^:)|(:$))', '::', quadc) return quadc
def to_compressed(self)
Compress an IP address to its shortest possible compressed form. >>> print(IP('127.0.0.1').to_compressed()) 127.1 >>> print(IP('127.1.0.1').to_compressed()) 127.1.1 >>> print(IP('127.0.1.1').to_compressed()) 127.0.1.1 >>> print(IP('2001:1234:0000:0000:0000:0000:0000:5678').to_compressed()) 2001:1234::5678 >>> print(IP('1234:0000:0000:beef:0000:0000:0000:5678').to_compressed()) 1234:0:0:beef::5678 >>> print(IP('0000:0000:0000:0000:0000:0000:0000:0001').to_compressed()) ::1 >>> print(IP('fe80:0000:0000:0000:0000:0000:0000:0000').to_compressed()) fe80::
3.358556
3.27431
1.025729
if self.v == 4: return self else: if self.bin().startswith('0' * 96): return IP(int(self), version=4) elif self.bin().startswith('0' * 80 + '1' * 16): return IP(int(self) & MAX_IPV4, version=4) elif int(self) & BASE_6TO4: return IP((int(self) - BASE_6TO4) >> 80, version=4) else: return ValueError('%s: IPv6 address is not IPv4 compatible or mapped, ' 'nor an 6-to-4 IP' % self.dq)
def to_ipv4(self)
Convert (an IPv6) IP address to an IPv4 address, if possible. Only works for IPv4-compat (::/96), IPv4-mapped (::ffff/96), and 6-to-4 (2002::/16) addresses. >>> ip = IP('2002:c000:022a::') >>> print(ip.to_ipv4()) 192.0.2.42
5.213228
4.59464
1.134633
value = value.lstrip('b') if len(value) == 32: return cls(int(value, 2)) elif len(value) == 128: return cls(int(value, 2)) else: return ValueError('%r: invalid binary notation' % (value,))
def from_bin(cls, value)
Initialize a new network from binary notation.
3.04769
2.604125
1.170332
if len(value) == 8: return cls(int(value, 16)) elif len(value) == 32: return cls(int(value, 16)) else: raise ValueError('%r: invalid hexadecimal notation' % (value,))
def from_hex(cls, value)
Initialize a new network from hexadecimal notation.
2.638585
2.34483
1.125277
assert ip_type in ['6-to-4', 'compat', 'mapped'], 'Conversion ip_type not supported' if self.v == 4: if ip_type == '6-to-4': return IP(BASE_6TO4 | int(self) << 80, version=6) elif ip_type == 'compat': return IP(int(self), version=6) elif ip_type == 'mapped': return IP(0xffff << 32 | int(self), version=6) else: return self
def to_ipv6(self, ip_type='6-to-4')
Convert (an IPv4) IP address to an IPv6 address. >>> ip = IP('192.0.2.42') >>> print(ip.to_ipv6()) 2002:c000:022a:0000:0000:0000:0000:0000 >>> print(ip.to_ipv6('compat')) 0000:0000:0000:0000:0000:0000:c000:022a >>> print(ip.to_ipv6('mapped')) 0000:0000:0000:0000:0000:ffff:c000:022a
3.706442
3.832609
0.967081
if self.v == 4: return '.'.join(list(self.dq.split('.')[::-1]) + ['in-addr', 'arpa']) else: return '.'.join(list(self.hex())[::-1] + ['ip6', 'arpa'])
def to_reverse(self)
Convert the IP address to a PTR record. Using the .in-addr.arpa zone for IPv4 and .ip6.arpa for IPv6 addresses. >>> ip = IP('192.0.2.42') >>> print(ip.to_reverse()) 42.2.0.192.in-addr.arpa >>> print(ip.to_ipv6().to_reverse()) 0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.a.2.2.0.0.0.0.c.2.0.0.2.ip6.arpa
6.930242
4.168448
1.662547
if self.version() == 4: return (MAX_IPV4 >> (32 - self.mask)) << (32 - self.mask) else: return (MAX_IPV6 >> (128 - self.mask)) << (128 - self.mask)
def netmask_long(self)
Network netmask derived from subnet size, as long. >>> localnet = Network('127.0.0.1/8') >>> print(localnet.netmask_long()) 4278190080
2.804533
3.018532
0.929105
if self.version() == 4: return self.network_long() | (MAX_IPV4 - self.netmask_long()) else: return self.network_long() \ | (MAX_IPV6 - self.netmask_long())
def broadcast_long(self)
Broadcast address, as long. >>> localnet = Network('127.0.0.1/8') >>> print(localnet.broadcast_long()) 2147483647
5.239156
4.312519
1.214871
if (self.version() == 4 and self.mask > 30) or \ (self.version() == 6 and self.mask > 126): return self else: return IP(self.network_long() + 1, version=self.version())
def host_first(self)
First available host in this subnet.
4.592599
3.778474
1.215464
if (self.version() == 4 and self.mask == 32) or \ (self.version() == 6 and self.mask == 128): return self elif (self.version() == 4 and self.mask == 31) or \ (self.version() == 6 and self.mask == 127): return IP(int(self) + 1, version=self.version()) else: return IP(self.broadcast_long() - 1, version=self.version())
def host_last(self)
Last available host in this subnet.
2.641354
2.38286
1.10848
other = Network(other) return self.network_long() <= other.network_long() <= self.broadcast_long() or \ other.network_long() <= self.network_long() <= other.broadcast_long()
def check_collision(self, other)
Check another network against the given network.
5.671286
4.251403
1.33398
# Apply the include/exclude patterns: listitems = self._visible_models(context['request']) # Convert to a similar data structure like the dashboard icons have. # This allows sorting the items identically. models = [ {'name': model._meta.model_name, 'app_name': model._meta.app_label, 'title': capfirst(model._meta.verbose_name_plural), 'url': self._get_admin_change_url(model, context) } for model, perms in listitems if self.is_item_visible(model, perms) ] # Sort models. sort_cms_models(models) # Convert to items for model in models: self.children.append(items.MenuItem(title=model['title'], url=model['url']))
def init_with_context(self, context)
Initialize the menu.
5.96543
6.11469
0.97559
super(ReturnToSiteItem, self).init_with_context(context) # See if the current page is being edited, update URL accordingly. edited_model = self.get_edited_object(context['request']) if edited_model: try: url = edited_model.get_absolute_url() except (AttributeError, urls.NoReverseMatch) as e: pass else: if url: self.url = url
def init_with_context(self, context)
Find the current URL based on the context. It uses :func:`get_edited_object` to find the model, and calls ``get_absolute_url()`` to get the frontend URL.
4.521208
3.537112
1.27822
resolvermatch = urls.resolve(request.path_info) if resolvermatch.namespace == 'admin' and resolvermatch.url_name and resolvermatch.url_name.endswith('_change'): # In "appname_modelname_change" view of the admin. # Extract the appname and model from the url name. # For some custom views, url_name might not be filled in (e.g. django-polymorphic's subclass_view) match = RE_CHANGE_URL.match(resolvermatch.url_name) if not match: return None # object_id can be string (e.g. a country code as PK). try: object_id = resolvermatch.kwargs['object_id'] # Django 2.0+ except KeyError: object_id = resolvermatch.args[0] return self.get_object_by_natural_key(match.group(1), match.group(2), object_id) return None
def get_edited_object(self, request)
Return the object which is currently being edited. Returns ``None`` if the match could not be made.
4.804374
4.73176
1.015346
try: model_type = ContentType.objects.get_by_natural_key(app_label, model_name) except ContentType.DoesNotExist: return None # Pointless to fetch the object, if there is no URL to generate # Avoid another database query. ModelClass = model_type.model_class() if not hasattr(ModelClass, 'get_absolute_url'): return None try: return model_type.get_object_for_this_type(pk=object_id) except ObjectDoesNotExist: return None
def get_object_by_natural_key(self, app_label, model_name, object_id)
Return a model based on a natural key. This is a utility function for :func:`get_edited_object`.
3.039439
3.16
0.961848
return PersonalModule( layout='inline', draggable=False, deletable=False, collapsible=False, )
def get_personal_module(self)
Instantiate the :class:`~fluent_dashboard.modules.PersonalModule` for use in the dashboard.
11.863875
6.739509
1.760347
modules = [] appgroups = get_application_groups() for title, kwargs in appgroups: AppListClass = get_class(kwargs.pop('module')) # e.g. CmsAppIconlist, AppIconlist, Applist modules.append(AppListClass(title, **kwargs)) return modules
def get_application_modules(self)
Instantiate all application modules (i.e. :class:`~admin_tools.dashboard.modules.AppList`, :class:`~fluent_dashboard.modules.AppIconList` and :class:`~fluent_dashboard.modules.CmsAppIconList`) for use in the dashboard.
7.862395
5.611747
1.40106
return modules.RecentActions( _('Recent Actions'), include_list=self.get_app_content_types(), limit=5, enabled=False, collapsible=False )
def get_recent_actions_module(self)
Instantiate the :class:`~admin_tools.dashboard.modules.RecentActions` module for use in the appliation index page.
10.046516
6.31484
1.590938
groups = [] for title, groupdict in appsettings.FLUENT_DASHBOARD_APP_GROUPS: # Allow to pass all possible arguments to the DashboardModule class. module_kwargs = groupdict.copy() # However, the 'models' is treated special, to have catch-all support. if '*' in groupdict['models']: default_module = appsettings.FLUENT_DASHBOARD_DEFAULT_MODULE module_kwargs['exclude'] = ALL_KNOWN_APPS + list(module_kwargs.get('exclude', [])) del module_kwargs['models'] else: default_module = 'CmsAppIconList' # Get module to display, can be a alias for known variations. module = groupdict.get('module', default_module) if module in MODULE_ALIASES: module = MODULE_ALIASES[module] module_kwargs['module'] = module groups.append((title, module_kwargs),) return groups
def get_application_groups()
Return the applications of the system, organized in various groups. These groups are not connected with the application names, but rather with a pattern of applications.
6.487571
6.676157
0.971752
cms_models.sort(key=lambda model: ( get_cms_model_order(model['name']) if is_cms_app(model['app_name']) else 999, model['app_name'], model['title'] ))
def sort_cms_models(cms_models)
Sort a set of CMS-related models in a custom (predefined) order.
3.610047
3.516829
1.026506
for pat in appsettings.FLUENT_DASHBOARD_CMS_APP_NAMES: if fnmatch(app_name, pat): return True return False
def is_cms_app(app_name)
Return whether the given application is a CMS app
6.215446
6.051098
1.02716