code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def run(edges, iterations=1000, force_strength=5.0, dampening=0.01, max_velocity=2.0, max_distance=50, is_3d=True): """Runs a force-directed-layout algorithm on the input graph. iterations - Number of FDL iterations to run in coordinate generation force_strength - Strength of Coulomb and Hooke forces (edit this to scale the distance between nodes) dampening - Multiplier to reduce force applied to nodes max_velocity - Maximum distance a node can move in one step max_distance - The maximum distance considered for interactions """ # Get a list of node ids from the edge data nodes = set(e['source'] for e in edges) | set(e['target'] for e in edges) # Convert to a data-storing object and initialize some values d = 3 if is_3d else 2 nodes = {n: {'velocity': [0.0] * d, 'force': [0.0] * d} for n in nodes} # Repeat n times (is there a more Pythonic way to do this?) for _ in repeat(None, iterations): # Add in Coulomb-esque node-node repulsive forces for node1, node2 in combinations(nodes.values(), 2): _coulomb(node1, node2, force_strength, max_distance) # And Hooke-esque edge spring forces for edge in edges: _hooke(nodes[edge['source']], nodes[edge['target']], force_strength * edge.get('size', 1), max_distance) # Move by resultant force for node in nodes.values(): # Constrain the force to the bounds specified by input parameter force = [_constrain(dampening * f, -max_velocity, max_velocity) for f in node['force']] # Update velocities and reset force node['velocity'] = [v + dv for v, dv in zip(node['velocity'], force)] node['force'] = [0] * d # Clean and return for node in nodes.values(): del node['force'] node['location'] = node['velocity'] del node['velocity'] # Even if it's 2D, let's specify three dimensions if not is_3d: node['location'] += [0.0] return nodes
Runs a force-directed-layout algorithm on the input graph. iterations - Number of FDL iterations to run in coordinate generation force_strength - Strength of Coulomb and Hooke forces (edit this to scale the distance between nodes) dampening - Multiplier to reduce force applied to nodes max_velocity - Maximum distance a node can move in one step max_distance - The maximum distance considered for interactions
def get_event_attendees(self, id, **data): """ GET /events/:id/attendees/ Returns a :ref:`paginated <pagination>` response with a key of ``attendees``, containing a list of :format:`attendee`. """ return self.get("/events/{0}/attendees/".format(id), data=data)
GET /events/:id/attendees/ Returns a :ref:`paginated <pagination>` response with a key of ``attendees``, containing a list of :format:`attendee`.
def get_context_data(self, **kwargs): """ checks if there is SocialFrind model record for the user if not attempt to create one if all fail, redirects to the next page """ context = super(FriendListView, self).get_context_data(**kwargs) friends = [] for friend_list in self.social_friend_lists: fs = friend_list.existing_social_friends() for f in fs: friends.append(f) # Add friends to context context['friends'] = friends connected_providers = [] for sa in self.social_auths: connected_providers.append(sa.provider) context['connected_providers'] = connected_providers return context
checks if there is SocialFrind model record for the user if not attempt to create one if all fail, redirects to the next page
def _paths_must_exists(path): """ Raises error if path doesn't exist. :param path: str path to check :return: str same path passed in """ path = to_unicode(path) if not os.path.exists(path): raise argparse.ArgumentTypeError("{} is not a valid file/folder.".format(path)) return path
Raises error if path doesn't exist. :param path: str path to check :return: str same path passed in
def column_spec_path(cls, project, location, dataset, table_spec, column_spec): """Return a fully-qualified column_spec string.""" return google.api_core.path_template.expand( "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}", project=project, location=location, dataset=dataset, table_spec=table_spec, column_spec=column_spec, )
Return a fully-qualified column_spec string.
def check_update(): """ Return True if an update is available on pypi """ r = requests.get("https://pypi.python.org/pypi/prof/json") data = r.json() if versiontuple(data['info']['version']) > versiontuple(__version__): return True return False
Return True if an update is available on pypi
def _update_assignment_email_status(offer_assignment_id, send_id, status, site_code=None): """ Update the offer_assignment and offer_assignment_email model using the Ecommerce assignmentemail api. Arguments: offer_assignment_id (str): Key of the entry in the offer_assignment model. send_id (str): Unique message id from Sailthru status (str): status to be sent to the api site_code (str): site code Returns: True or False based on model update status from Ecommerce api """ api = get_ecommerce_client(url_postfix='assignment-email/', site_code=site_code) post_data = { 'offer_assignment_id': offer_assignment_id, 'send_id': send_id, 'status': status, } try: api_response = api.status().post(post_data) except RequestException: logger.exception( '[Offer Assignment] An error occurred while updating offer assignment email status for ' 'offer id {token_offer} and message id {token_send_id} via the Ecommerce API.'.format( token_offer=offer_assignment_id, token_send_id=send_id ) ) return False return True if api_response.get('status') == 'updated' else False
Update the offer_assignment and offer_assignment_email model using the Ecommerce assignmentemail api. Arguments: offer_assignment_id (str): Key of the entry in the offer_assignment model. send_id (str): Unique message id from Sailthru status (str): status to be sent to the api site_code (str): site code Returns: True or False based on model update status from Ecommerce api
def msg_curse(self, args=None, max_width=None): """Return the dict to display in the curse interface.""" # Init the return message ret = [] # Only process if stats exist and display plugin enable... if not self.stats or self.is_disable(): return ret # Max size for the interface name name_max_width = max_width - 7 # Header msg = '{:{width}}'.format('FOLDERS', width=name_max_width) ret.append(self.curse_add_line(msg, "TITLE")) # Data for i in self.stats: ret.append(self.curse_new_line()) if len(i['path']) > name_max_width: # Cut path if it is too long path = '_' + i['path'][-name_max_width + 1:] else: path = i['path'] msg = '{:{width}}'.format(nativestr(path), width=name_max_width) ret.append(self.curse_add_line(msg)) try: msg = '{:>9}'.format(self.auto_unit(i['size'])) except (TypeError, ValueError): msg = '{:>9}'.format(i['size']) ret.append(self.curse_add_line(msg, self.get_alert(i, header='folder_' + i['indice']))) return ret
Return the dict to display in the curse interface.
def list_available_tools(self): """ Lists all the Benchmarks configuration files found in the configuration folders :return: """ benchmarks = [] if self.alternative_config_dir: for n in glob.glob(os.path.join(self.alternative_config_dir, self.BENCHMARKS_DIR, '*.conf')): benchmarks.append(BenchmarkToolConfiguration(n)) for n in glob.glob(os.path.join(self.default_config_dir, self.BENCHMARKS_DIR, '*.conf')): benchmarks.append(BenchmarkToolConfiguration(n)) return benchmarks
Lists all the Benchmarks configuration files found in the configuration folders :return:
def inDignities(self, idA, idB): """ Returns the dignities of A which belong to B. """ objA = self.chart.get(idA) info = essential.getInfo(objA.sign, objA.signlon) # Should we ignore exile and fall? return [dign for (dign, ID) in info.items() if ID == idB]
Returns the dignities of A which belong to B.
def _find_key_cols(df): """Identify columns in a DataFrame that could be a unique key""" keys = [] for col in df: if len(df[col].unique()) == len(df[col]): keys.append(col) return keys
Identify columns in a DataFrame that could be a unique key
def is_seq_of(seq, expected_type, seq_type=None): """Check whether it is a sequence of some type. Args: seq (Sequence): The sequence to be checked. expected_type (type): Expected type of sequence items. seq_type (type, optional): Expected sequence type. Returns: bool: Whether the sequence is valid. """ if seq_type is None: exp_seq_type = collections_abc.Sequence else: assert isinstance(seq_type, type) exp_seq_type = seq_type if not isinstance(seq, exp_seq_type): return False for item in seq: if not isinstance(item, expected_type): return False return True
Check whether it is a sequence of some type. Args: seq (Sequence): The sequence to be checked. expected_type (type): Expected type of sequence items. seq_type (type, optional): Expected sequence type. Returns: bool: Whether the sequence is valid.
def _publish_queue_grpc(self): """ send the messages in the tx queue to the GRPC manager :return: None """ messages = EventHub_pb2.Messages(msg=self._tx_queue) publish_request = EventHub_pb2.PublishRequest(messages=messages) self.grpc_manager.send_message(publish_request)
send the messages in the tx queue to the GRPC manager :return: None
def build_api_struct(self): """ Calls the clean method of the class and returns the info in a structure that Atlas API is accepting. """ self.clean() data = {"type": self.measurement_type} # add all options for option in self.used_options: option_key, option_value = self.v2_translator(option) data.update({option_key: option_value}) return data
Calls the clean method of the class and returns the info in a structure that Atlas API is accepting.
def attach_attachment(self, analysis, attachment): """ Attach a file or a given set of files to an analysis :param analysis: analysis where the files are to be attached :param attachment: files to be attached. This can be either a single file or a list of files :return: None """ if not attachment: return if isinstance(attachment, list): for attach in attachment: self.attach_attachment(analysis, attach) return # current attachments an_atts = analysis.getAttachment() atts_filenames = [att.getAttachmentFile().filename for att in an_atts] if attachment.getAttachmentFile().filename not in atts_filenames: an_atts.append(attachment) logger.info( "Attaching %s to %s" % (attachment.UID(), analysis)) analysis.setAttachment([att.UID() for att in an_atts]) analysis.reindexObject() else: self.warn("Attachment %s was not linked to analysis %s" % (attachment.UID(), analysis))
Attach a file or a given set of files to an analysis :param analysis: analysis where the files are to be attached :param attachment: files to be attached. This can be either a single file or a list of files :return: None
def setup(self, phase=None, quantity='', conductance='', **kwargs): r""" This method takes several arguments that are essential to running the algorithm and adds them to the settings. Parameters ---------- phase : OpenPNM Phase object The phase on which the algorithm is to be run. quantity : string The name of the physical quantity to be calculated. conductance : string The name of the pore-scale transport conductance values. These are typically calculated by a model attached to a *Physics* object associated with the given *Phase*. solver : string To use the default scipy solver, set this value to `spsolve` or `umfpack`. To use an iterative solver or a non-scipy solver, additional arguments are required as described next. solver_family : string The solver package to use. OpenPNM currently supports ``scipy``, ``pyamg`` and ``petsc`` (if you have it installed). The default is ``scipy``. solver_type : string The specific solver to use. For instance, if ``solver_family`` is ``scipy`` then you can specify any of the iterative solvers such as ``cg`` or ``gmres``. [More info here] (https://docs.scipy.org/doc/scipy/reference/sparse.linalg.html) solver_preconditioner : string This is used by the PETSc solver to specify which preconditioner to use. The default is ``jacobi``. solver_atol : scalar Used to control the accuracy to which the iterative solver aims. The default is 1e-6. solver_rtol : scalar Used by PETSc as an additional tolerance control. The default is 1e-6. solver_maxiter : scalar Limits the number of iterations to attempt before quiting when aiming for the specified tolerance. The default is 5000. """ if phase: self.settings['phase'] = phase.name if quantity: self.settings['quantity'] = quantity if conductance: self.settings['conductance'] = conductance self.settings.update(**kwargs)
r""" This method takes several arguments that are essential to running the algorithm and adds them to the settings. Parameters ---------- phase : OpenPNM Phase object The phase on which the algorithm is to be run. quantity : string The name of the physical quantity to be calculated. conductance : string The name of the pore-scale transport conductance values. These are typically calculated by a model attached to a *Physics* object associated with the given *Phase*. solver : string To use the default scipy solver, set this value to `spsolve` or `umfpack`. To use an iterative solver or a non-scipy solver, additional arguments are required as described next. solver_family : string The solver package to use. OpenPNM currently supports ``scipy``, ``pyamg`` and ``petsc`` (if you have it installed). The default is ``scipy``. solver_type : string The specific solver to use. For instance, if ``solver_family`` is ``scipy`` then you can specify any of the iterative solvers such as ``cg`` or ``gmres``. [More info here] (https://docs.scipy.org/doc/scipy/reference/sparse.linalg.html) solver_preconditioner : string This is used by the PETSc solver to specify which preconditioner to use. The default is ``jacobi``. solver_atol : scalar Used to control the accuracy to which the iterative solver aims. The default is 1e-6. solver_rtol : scalar Used by PETSc as an additional tolerance control. The default is 1e-6. solver_maxiter : scalar Limits the number of iterations to attempt before quiting when aiming for the specified tolerance. The default is 5000.
def _clip_line( self, line_pt_1, line_pt_2 ): """ clip line to canvas """ x_min = min(line_pt_1[0], line_pt_2[0]) x_max = max(line_pt_1[0], line_pt_2[0]) y_min = min(line_pt_1[1], line_pt_2[1]) y_max = max(line_pt_1[1], line_pt_2[1]) extent = self.extent() if line_pt_1[0] == line_pt_2[0]: return ( (line_pt_1[0], max(y_min, extent[1])), (line_pt_1[0], min(y_max, extent[3])) ) if line_pt_1[1] == line_pt_2[1]: return ( (max(x_min, extent[0]), line_pt_1[1]), (min(x_max, extent[2]), line_pt_1[1]) ) if ((extent[0] <= line_pt_1[0] < extent[2]) and (extent[1] <= line_pt_1[1] < extent[3]) and (extent[0] <= line_pt_2[0] < extent[2]) and (extent[1] <= line_pt_2[1] < extent[3])): return line_pt_1, line_pt_2 ts = [0.0, 1.0, float(extent[0] - line_pt_1[0]) / (line_pt_2[0] - line_pt_1[0]), float(extent[2] - line_pt_1[0]) / (line_pt_2[0] - line_pt_1[0]), float(extent[1] - line_pt_1[1]) / (line_pt_2[1] - line_pt_1[1]), float(extent[3] - line_pt_1[1]) / (line_pt_2[1] - line_pt_1[1]) ] ts.sort() if (ts[2] < 0) or (ts[2] >= 1) or (ts[3] < 0) or (ts[2] >= 1): return None result =\ [(pt_1 + t * (pt_2 - pt_1))\ for t in (ts[2], ts[3])\ for (pt_1, pt_2) in zip(line_pt_1, line_pt_2)] return (result[:2], result[2:])
clip line to canvas
def get_stack_index(self, stack_index, plugin_index): """Get the real index of the selected item.""" other_plugins_count = sum([other_tabs[0].count() \ for other_tabs in \ self.plugins_tabs[:plugin_index]]) real_index = stack_index - other_plugins_count return real_index
Get the real index of the selected item.
async def analog_write(self, command): """ This method writes a value to an analog pin. It is used to set the output of a PWM pin or the angle of a Servo. :param command: {"method": "analog_write", "params": [PIN, WRITE_VALUE]} :returns: No return message. """ pin = int(command[0]) value = int(command[1]) await self.core.analog_write(pin, value)
This method writes a value to an analog pin. It is used to set the output of a PWM pin or the angle of a Servo. :param command: {"method": "analog_write", "params": [PIN, WRITE_VALUE]} :returns: No return message.
def multiget(client, keys, **options): """Executes a parallel-fetch across multiple threads. Returns a list containing :class:`~riak.riak_object.RiakObject` or :class:`~riak.datatypes.Datatype` instances, or 4-tuples of bucket-type, bucket, key, and the exception raised. If a ``pool`` option is included, the request will use the given worker pool and not a transient :class:`~riak.client.multi.MultiGetPool`. This option will be passed by the client if the ``multiget_pool_size`` option was set on client initialization. :param client: the client to use :type client: :class:`~riak.client.RiakClient` :param keys: the keys to fetch in parallel :type keys: list of three-tuples -- bucket_type/bucket/key :param options: request options to :meth:`RiakBucket.get <riak.bucket.RiakBucket.get>` :type options: dict :rtype: list """ transient_pool = False outq = Queue() if 'pool' in options: pool = options['pool'] del options['pool'] else: pool = MultiGetPool() transient_pool = True try: pool.start() for bucket_type, bucket, key in keys: task = Task(client, outq, bucket_type, bucket, key, None, options) pool.enq(task) results = [] for _ in range(len(keys)): if pool.stopped(): raise RuntimeError( 'Multi-get operation interrupted by pool ' 'stopping!') results.append(outq.get()) outq.task_done() finally: if transient_pool: pool.stop() return results
Executes a parallel-fetch across multiple threads. Returns a list containing :class:`~riak.riak_object.RiakObject` or :class:`~riak.datatypes.Datatype` instances, or 4-tuples of bucket-type, bucket, key, and the exception raised. If a ``pool`` option is included, the request will use the given worker pool and not a transient :class:`~riak.client.multi.MultiGetPool`. This option will be passed by the client if the ``multiget_pool_size`` option was set on client initialization. :param client: the client to use :type client: :class:`~riak.client.RiakClient` :param keys: the keys to fetch in parallel :type keys: list of three-tuples -- bucket_type/bucket/key :param options: request options to :meth:`RiakBucket.get <riak.bucket.RiakBucket.get>` :type options: dict :rtype: list
def info(self, message, domain=None): """ Shortcut function for `utils.loggable.info` Args: message: see `utils.loggable.info` domain: see `utils.loggable.info` """ if domain is None: domain = self.extension_name info(message, domain)
Shortcut function for `utils.loggable.info` Args: message: see `utils.loggable.info` domain: see `utils.loggable.info`
def add_pyspark_path(): """Add PySpark to the library path based on the value of SPARK_HOME. """ try: spark_home = os.environ['SPARK_HOME'] sys.path.append(os.path.join(spark_home, 'python')) py4j_src_zip = glob(os.path.join(spark_home, 'python', 'lib', 'py4j-*-src.zip')) if len(py4j_src_zip) == 0: raise ValueError('py4j source archive not found in %s' % os.path.join(spark_home, 'python', 'lib')) else: py4j_src_zip = sorted(py4j_src_zip)[::-1] sys.path.append(py4j_src_zip[0]) except KeyError: logging.error("""SPARK_HOME was not set. please set it. e.g. SPARK_HOME='/home/...' ./bin/pyspark [program]""") exit(-1) except ValueError as e: logging.error(str(e)) exit(-1)
Add PySpark to the library path based on the value of SPARK_HOME.
def result(self, wait=False): """ Gets the result of the method call. If the call was successful, return the result, otherwise, reraise the exception. :param wait: Block until the result is available, or just get the result. :raises: RuntimeError when called and the result is not yet available. """ if wait: self._async_resp.wait() if not self.finished(): raise RuntimeError("Result is not ready yet") raw_response = self._async_resp.get() return Result(result=raw_response["result"], error=raw_response["error"], id=raw_response["id"], method_call=self.request)
Gets the result of the method call. If the call was successful, return the result, otherwise, reraise the exception. :param wait: Block until the result is available, or just get the result. :raises: RuntimeError when called and the result is not yet available.
def cmd_follow(self, args): '''control following of vehicle''' if len(args) < 2: print("map follow 0|1") return follow = int(args[1]) self.map.set_follow(follow)
control following of vehicle
def _serialize_uint(value, size=32, padding=0): """ Translates a python integral or a BitVec into a 32 byte string, MSB first """ if size <= 0 or size > 32: raise ValueError from .account import EVMAccount # because of circular import if not isinstance(value, (int, BitVec, EVMAccount)): raise ValueError if issymbolic(value): # FIXME This temporary array variable should be obtained from a specific constraint store bytes = ArrayVariable(index_bits=256, index_max=32, value_bits=8, name='temp{}'.format(uuid.uuid1())) if value.size <= size * 8: value = Operators.ZEXTEND(value, size * 8) else: # automatically truncate, e.g. if they passed a BitVec(256) for an `address` argument (160 bits) value = Operators.EXTRACT(value, 0, size * 8) bytes = ArrayProxy(bytes.write_BE(padding, value, size)) else: value = int(value) bytes = bytearray() for _ in range(padding): bytes.append(0) for position in reversed(range(size)): bytes.append(Operators.EXTRACT(value, position * 8, 8)) assert len(bytes) == size + padding return bytes
Translates a python integral or a BitVec into a 32 byte string, MSB first
def invalidate(self, key): """Remove the given data item along with all items that depend on it in the graph.""" if key not in self.data: return del self.data[key] # Find all components that used it and invalidate their results for cname in self.components: if key in self.depends[cname]: for downstream_key in self.provides[cname]: self.invalidate(downstream_key)
Remove the given data item along with all items that depend on it in the graph.
def from_parmed(cls, path, *args, **kwargs): """ Try to load a file automatically with ParmEd. Not guaranteed to work, but might be useful if it succeeds. Arguments --------- path : str Path to file that ParmEd can load """ st = parmed.load_file(path, structure=True, *args, **kwargs) box = kwargs.pop('box', getattr(st, 'box', None)) velocities = kwargs.pop('velocities', getattr(st, 'velocities', None)) positions = kwargs.pop('positions', getattr(st, 'positions', None)) return cls(master=st, topology=st.topology, positions=positions, box=box, velocities=velocities, path=path, **kwargs)
Try to load a file automatically with ParmEd. Not guaranteed to work, but might be useful if it succeeds. Arguments --------- path : str Path to file that ParmEd can load
def create_textfile_with_contents(filename, contents, encoding='utf-8'): """ Creates a textual file with the provided contents in the workdir. Overwrites an existing file. """ ensure_directory_exists(os.path.dirname(filename)) if os.path.exists(filename): os.remove(filename) outstream = codecs.open(filename, "w", encoding) outstream.write(contents) if contents and not contents.endswith("\n"): outstream.write("\n") outstream.flush() outstream.close() assert os.path.exists(filename), "ENSURE file exists: %s" % filename
Creates a textual file with the provided contents in the workdir. Overwrites an existing file.
def get_new_document(self, cursor_pos=None): """ Create a `Document` instance that contains the resulting text. """ lines = [] # Original text, before cursor. if self.original_document.text_before_cursor: lines.append(self.original_document.text_before_cursor) # Selected entries from the history. for line_no in sorted(self.selected_lines): lines.append(self.history_lines[line_no]) # Original text, after cursor. if self.original_document.text_after_cursor: lines.append(self.original_document.text_after_cursor) # Create `Document` with cursor at the right position. text = '\n'.join(lines) if cursor_pos is not None and cursor_pos > len(text): cursor_pos = len(text) return Document(text, cursor_pos)
Create a `Document` instance that contains the resulting text.
def volume_present(name, bricks, stripe=False, replica=False, device_vg=False, transport='tcp', start=False, force=False, arbiter=False): ''' Ensure that the volume exists name name of the volume bricks list of brick paths replica replica count for volume arbiter use every third brick as arbiter (metadata only) .. versionadded:: 2019.2.0 start ensure that the volume is also started .. code-block:: yaml myvolume: glusterfs.volume_present: - bricks: - host1:/srv/gluster/drive1 - host2:/srv/gluster/drive2 Replicated Volume: glusterfs.volume_present: - name: volume2 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3 - replica: 2 - start: True Replicated Volume with arbiter brick: glusterfs.volume_present: - name: volume3 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3 - host3:/srv/gluster/drive4 - replica: 3 - arbiter: True - start: True ''' ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} if suc.check_name(name, 'a-zA-Z0-9._-'): ret['comment'] = 'Invalid characters in volume name.' return ret volumes = __salt__['glusterfs.list_volumes']() if name not in volumes: if __opts__['test']: comment = 'Volume {0} will be created'.format(name) if start: comment += ' and started' ret['comment'] = comment ret['result'] = None return ret vol_created = __salt__['glusterfs.create_volume']( name, bricks, stripe, replica, device_vg, transport, start, force, arbiter) if not vol_created: ret['comment'] = 'Creation of volume {0} failed'.format(name) return ret old_volumes = volumes volumes = __salt__['glusterfs.list_volumes']() if name in volumes: ret['changes'] = {'new': volumes, 'old': old_volumes} ret['comment'] = 'Volume {0} is created'.format(name) else: ret['comment'] = 'Volume {0} already exists'.format(name) if start: if __opts__['test']: # volume already exists ret['comment'] = ret['comment'] + ' and will be started' ret['result'] = None return ret if int(__salt__['glusterfs.info']()[name]['status']) == 1: ret['result'] = True ret['comment'] = ret['comment'] + ' and is started' else: vol_started = __salt__['glusterfs.start_volume'](name) if vol_started: ret['result'] = True ret['comment'] = ret['comment'] + ' and is now started' if not ret['changes']: ret['changes'] = {'new': 'started', 'old': 'stopped'} else: ret['comment'] = ret['comment'] + ' but failed to start. Check logs for further information' return ret if __opts__['test']: ret['result'] = None else: ret['result'] = True return ret
Ensure that the volume exists name name of the volume bricks list of brick paths replica replica count for volume arbiter use every third brick as arbiter (metadata only) .. versionadded:: 2019.2.0 start ensure that the volume is also started .. code-block:: yaml myvolume: glusterfs.volume_present: - bricks: - host1:/srv/gluster/drive1 - host2:/srv/gluster/drive2 Replicated Volume: glusterfs.volume_present: - name: volume2 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3 - replica: 2 - start: True Replicated Volume with arbiter brick: glusterfs.volume_present: - name: volume3 - bricks: - host1:/srv/gluster/drive2 - host2:/srv/gluster/drive3 - host3:/srv/gluster/drive4 - replica: 3 - arbiter: True - start: True
def get_user(self, login): """ http://confluence.jetbrains.net/display/YTD2/GET+user """ return youtrack.User(self._get("/admin/user/" + urlquote(login.encode('utf8'))), self)
http://confluence.jetbrains.net/display/YTD2/GET+user
def get_converter(rule): """ Parse rule will extract the converter from the rule as a generator We iterate through the parse_rule results to find the converter parse_url returns the static rule part in the first iteration parse_url returns the dynamic rule part in the second iteration if its dynamic """ for converter, _, _ in parse_rule(str(rule)): if converter is not None: return converter return None
Parse rule will extract the converter from the rule as a generator We iterate through the parse_rule results to find the converter parse_url returns the static rule part in the first iteration parse_url returns the dynamic rule part in the second iteration if its dynamic
def _name_to_index(self, channels): """ Return the channel indices for the specified channel names. Integers contained in `channel` are returned unmodified, if they are within the range of ``self.channels``. Parameters ---------- channels : int or str or list of int or list of str Name(s) of the channel(s) of interest. Returns ------- int or list of int Numerical index(ces) of the specified channels. """ # Check if list, then run recursively if hasattr(channels, '__iter__') \ and not isinstance(channels, six.string_types): return [self._name_to_index(ch) for ch in channels] if isinstance(channels, six.string_types): # channels is a string containing a channel name if channels in self.channels: return self.channels.index(channels) else: raise ValueError("{} is not a valid channel name." .format(channels)) if isinstance(channels, int): if (channels < len(self.channels) and channels >= -len(self.channels)): return channels else: raise ValueError("index out of range") else: raise TypeError("input argument should be an integer, string or " "list of integers or strings")
Return the channel indices for the specified channel names. Integers contained in `channel` are returned unmodified, if they are within the range of ``self.channels``. Parameters ---------- channels : int or str or list of int or list of str Name(s) of the channel(s) of interest. Returns ------- int or list of int Numerical index(ces) of the specified channels.
def on_proposal(self, proposal, proto): "called to inform about synced peers" assert isinstance(proto, HDCProtocol) assert isinstance(proposal, Proposal) if proposal.height >= self.cm.height: assert proposal.lockset.is_valid self.last_active_protocol = proto
called to inform about synced peers
def buffer(self, frame): """Enable buffering for the frame from that point onwards.""" frame.buffer = self.temporary_identifier() self.writeline('%s = []' % frame.buffer)
Enable buffering for the frame from that point onwards.
def _create_variables(self, n_features, n_classes): """Create the TensorFlow variables for the model. :param n_features: number of features :param n_classes: number of classes :return: self """ self.W_ = tf.Variable( tf.zeros([n_features, n_classes]), name='weights') self.b_ = tf.Variable( tf.zeros([n_classes]), name='biases')
Create the TensorFlow variables for the model. :param n_features: number of features :param n_classes: number of classes :return: self
def list_cluster_role_binding(self, **kwargs): # noqa: E501 """list_cluster_role_binding # noqa: E501 list or watch objects of kind ClusterRoleBinding # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_cluster_role_binding(async_req=True) >>> result = thread.get() :param async_req bool :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1ClusterRoleBindingList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_cluster_role_binding_with_http_info(**kwargs) # noqa: E501 else: (data) = self.list_cluster_role_binding_with_http_info(**kwargs) # noqa: E501 return data
list_cluster_role_binding # noqa: E501 list or watch objects of kind ClusterRoleBinding # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_cluster_role_binding(async_req=True) >>> result = thread.get() :param async_req bool :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1ClusterRoleBindingList If the method is called asynchronously, returns the request thread.
def _is_junction(arg): ''' Return True, if arg is a junction statement. ''' return isinstance(arg, dict) and len(arg) == 1 and next(six.iterkeys(arg)) == 'junction'
Return True, if arg is a junction statement.
def _save_function_initial_state(self, function_key, function_address, state): """ Save the initial state of a function, and merge it with existing ones if there are any. :param FunctionKey function_key: The key to this function. :param int function_address: Address of the function. :param SimState state: Initial state of the function. :return: None """ l.debug('Saving the initial state for function %#08x with function key %s', function_address, function_key ) if function_key in self._function_initial_states[function_address]: existing_state = self._function_initial_states[function_address][function_key] merged_state, _, _ = existing_state.merge(state) self._function_initial_states[function_address][function_key] = merged_state else: self._function_initial_states[function_address][function_key] = state
Save the initial state of a function, and merge it with existing ones if there are any. :param FunctionKey function_key: The key to this function. :param int function_address: Address of the function. :param SimState state: Initial state of the function. :return: None
def get_urlclass_from (scheme, assume_local_file=False): """Return checker class for given URL scheme. If the scheme cannot be matched and assume_local_file is True, assume a local file. """ if scheme in ("http", "https"): klass = httpurl.HttpUrl elif scheme == "ftp": klass = ftpurl.FtpUrl elif scheme == "file": klass = fileurl.FileUrl elif scheme == "telnet": klass = telneturl.TelnetUrl elif scheme == "mailto": klass = mailtourl.MailtoUrl elif scheme in ("nntp", "news", "snews"): klass = nntpurl.NntpUrl elif scheme == "dns": klass = dnsurl.DnsUrl elif scheme == "itms-services": klass = itmsservicesurl.ItmsServicesUrl elif scheme and unknownurl.is_unknown_scheme(scheme): klass = unknownurl.UnknownUrl elif assume_local_file: klass = fileurl.FileUrl else: klass = unknownurl.UnknownUrl return klass
Return checker class for given URL scheme. If the scheme cannot be matched and assume_local_file is True, assume a local file.
def set_local_address(ams_netid): # type: (Union[str, SAmsNetId]) -> None """Set the local NetID (**Linux only**). :param str: new AmsNetID :rtype: None **Usage:** >>> import pyads >>> pyads.open_port() >>> pyads.set_local_address('0.0.0.0.1.1') """ if isinstance(ams_netid, str): ams_netid_st = _parse_ams_netid(ams_netid) else: ams_netid_st = ams_netid assert isinstance(ams_netid_st, SAmsNetId) if linux: return adsSetLocalAddress(ams_netid_st) else: raise ADSError( text="SetLocalAddress is not supported for Windows clients." )
Set the local NetID (**Linux only**). :param str: new AmsNetID :rtype: None **Usage:** >>> import pyads >>> pyads.open_port() >>> pyads.set_local_address('0.0.0.0.1.1')
def add_property(self, prop, objects=()): """Add a property to the definition and add ``objects`` as related.""" self._properties.add(prop) self._objects |= objects self._pairs.update((o, prop) for o in objects)
Add a property to the definition and add ``objects`` as related.
def pathparts(self): """A list of the parts of the path, with the root node returning an empty list. """ try: parts = self.parent.pathparts() parts.append(self.name) return parts except AttributeError: return []
A list of the parts of the path, with the root node returning an empty list.
def read_message_handler(stream): """ Send message to user if the opponent has read the message """ while True: packet = yield from stream.get() session_id = packet.get('session_key') user_opponent = packet.get('username') message_id = packet.get('message_id') if session_id and user_opponent and message_id is not None: user_owner = get_user_from_session(session_id) if user_owner: message = models.Message.objects.filter(id=message_id).first() if message: message.read = True message.save() logger.debug('Message ' + str(message_id) + ' is now read') opponent_socket = ws_connections.get((user_opponent, user_owner.username)) if opponent_socket: yield from target_message(opponent_socket, {'type': 'opponent-read-message', 'username': user_opponent, 'message_id': message_id}) else: pass # message not found else: pass # invalid session id else: pass
Send message to user if the opponent has read the message
def predict(self, temp_type): """ Transpile the predict method. Parameters ---------- :param temp_type : string The kind of export type (embedded, separated, exported). Returns ------- :return : string The transpiled predict method as string. """ # Exported: if temp_type == 'exported': temp = self.temp('exported.class') return temp.format(class_name=self.class_name, method_name=self.method_name, n_features=self.n_features) # Embedded: if temp_type == 'embedded': method = self.create_method_embedded() return self.create_class_embedded(method)
Transpile the predict method. Parameters ---------- :param temp_type : string The kind of export type (embedded, separated, exported). Returns ------- :return : string The transpiled predict method as string.
def risk(self, domain, **kwargs): """Returns back the risk score for a given domain""" return self._results('risk', '/v1/risk', items_path=('components', ), domain=domain, cls=Reputation, **kwargs)
Returns back the risk score for a given domain
def _generate_non_lastnames_variations(non_lastnames): """Generate variations for all non-lastnames. E.g. For 'John Richard', this method generates: [ 'John', 'J', 'Richard', 'R', 'John Richard', 'John R', 'J Richard', 'J R', ] """ if not non_lastnames: return [] # Generate name transformations in place for all non lastnames. Transformations include: # 1. Drop non last name, 2. use initial, 3. use full non lastname for idx, non_lastname in enumerate(non_lastnames): non_lastnames[idx] = (u'', non_lastname[0], non_lastname) # Generate the cartesian product of the transformed non lastnames and flatten them. return [ (u' '.join(var_elem for var_elem in variation if var_elem)).strip() for variation in product(*non_lastnames) ]
Generate variations for all non-lastnames. E.g. For 'John Richard', this method generates: [ 'John', 'J', 'Richard', 'R', 'John Richard', 'John R', 'J Richard', 'J R', ]
def api_auth(func): """ If the user is not logged in, this decorator looks for basic HTTP auth data in the request header. """ @wraps(func) def _decorator(request, *args, **kwargs): authentication = APIAuthentication(request) if authentication.authenticate(): return func(request, *args, **kwargs) raise Http404 return _decorator
If the user is not logged in, this decorator looks for basic HTTP auth data in the request header.
def exception_info(self, timeout=None): """Return a tuple of (exception, traceback) raised by the call that the future represents. Args: timeout: The number of seconds to wait for the exception if the future isn't done. If None, then there is no limit on the wait time. Returns: The exception raised by the call that the future represents or None if the call completed without raising. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout. """ with self._condition: if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: raise CancelledError() elif self._state == FINISHED: return self._exception, self._traceback self._condition.wait(timeout) if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: raise CancelledError() elif self._state == FINISHED: return self._exception, self._traceback else: raise TimeoutError()
Return a tuple of (exception, traceback) raised by the call that the future represents. Args: timeout: The number of seconds to wait for the exception if the future isn't done. If None, then there is no limit on the wait time. Returns: The exception raised by the call that the future represents or None if the call completed without raising. Raises: CancelledError: If the future was cancelled. TimeoutError: If the future didn't finish executing before the given timeout.
def insert(self, i, arg): r"""Insert whitespace, an unparsed argument string, or an argument object. :param int i: Index to insert argument into :param Arg arg: Argument to insert >>> arguments = TexArgs(['\n', RArg('arg0'), '[arg2]']) >>> arguments.insert(1, '[arg1]') >>> len(arguments) 3 >>> arguments [RArg('arg0'), OArg('arg1'), OArg('arg2')] >>> arguments.all ['\n', RArg('arg0'), OArg('arg1'), OArg('arg2')] >>> arguments.insert(10, '[arg3]') >>> arguments[3] OArg('arg3') """ arg = self.__coerce(arg) if isinstance(arg, Arg): super().insert(i, arg) if len(self) <= 1: self.all.append(arg) else: if i > len(self): i = len(self) - 1 before = self[i - 1] index_before = self.all.index(before) self.all.insert(index_before + 1, arg)
r"""Insert whitespace, an unparsed argument string, or an argument object. :param int i: Index to insert argument into :param Arg arg: Argument to insert >>> arguments = TexArgs(['\n', RArg('arg0'), '[arg2]']) >>> arguments.insert(1, '[arg1]') >>> len(arguments) 3 >>> arguments [RArg('arg0'), OArg('arg1'), OArg('arg2')] >>> arguments.all ['\n', RArg('arg0'), OArg('arg1'), OArg('arg2')] >>> arguments.insert(10, '[arg3]') >>> arguments[3] OArg('arg3')
def _mkdirs(d): """ Make all directories up to d. No exception is raised if d exists. """ try: os.makedirs(d) except OSError as e: if e.errno != errno.EEXIST: raise
Make all directories up to d. No exception is raised if d exists.
def normalize(u): ''' normalize(u) yields a vetor with the same direction as u but unit length, or, if u has zero length, yields u. ''' u = np.asarray(u) unorm = np.sqrt(np.sum(u**2, axis=0)) z = np.isclose(unorm, 0) c = np.logical_not(z) / (unorm + z) return u * c
normalize(u) yields a vetor with the same direction as u but unit length, or, if u has zero length, yields u.
def fetch(self, minutes=values.unset, start_date=values.unset, end_date=values.unset, task_queue_sid=values.unset, task_queue_name=values.unset, friendly_name=values.unset, task_channel=values.unset): """ Fetch a WorkersStatisticsInstance :param unicode minutes: Filter cumulative statistics by up to 'x' minutes in the past. :param datetime start_date: Filter cumulative statistics by a start date. :param datetime end_date: Filter cumulative statistics by a end date. :param unicode task_queue_sid: Filter the real-time and cumulative statistics based on Workers tied to a particular queue :param unicode task_queue_name: Filter the real-time and cumulative statistics based on Workers tied to a particular queue :param unicode friendly_name: The friendly_name :param unicode task_channel: Filter cumulative statistics by TaskChannel. :returns: Fetched WorkersStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_statistics.WorkersStatisticsInstance """ params = values.of({ 'Minutes': minutes, 'StartDate': serialize.iso8601_datetime(start_date), 'EndDate': serialize.iso8601_datetime(end_date), 'TaskQueueSid': task_queue_sid, 'TaskQueueName': task_queue_name, 'FriendlyName': friendly_name, 'TaskChannel': task_channel, }) payload = self._version.fetch( 'GET', self._uri, params=params, ) return WorkersStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], )
Fetch a WorkersStatisticsInstance :param unicode minutes: Filter cumulative statistics by up to 'x' minutes in the past. :param datetime start_date: Filter cumulative statistics by a start date. :param datetime end_date: Filter cumulative statistics by a end date. :param unicode task_queue_sid: Filter the real-time and cumulative statistics based on Workers tied to a particular queue :param unicode task_queue_name: Filter the real-time and cumulative statistics based on Workers tied to a particular queue :param unicode friendly_name: The friendly_name :param unicode task_channel: Filter cumulative statistics by TaskChannel. :returns: Fetched WorkersStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.workers_statistics.WorkersStatisticsInstance
def _create_and_add_parameters(params): ''' Parses the configuration and creates Parameter instances. ''' global _current_parameter if _is_simple_type(params): _current_parameter = SimpleParameter(params) _current_option.add_parameter(_current_parameter) else: # must be a list for i in params: if _is_simple_type(i): _current_parameter = SimpleParameter(i) else: _current_parameter = TypedParameter() _parse_typed_parameter(i) _current_option.add_parameter(_current_parameter)
Parses the configuration and creates Parameter instances.
def _load(self, scale=1.0): """Load the SLSTR relative spectral responses """ LOG.debug("File: %s", str(self.requested_band_filename)) ncf = Dataset(self.requested_band_filename, 'r') wvl = ncf.variables['wavelength'][:] * scale resp = ncf.variables['response'][:] self.rsr = {'wavelength': wvl, 'response': resp}
Load the SLSTR relative spectral responses
def update(self, auth_payload=values.unset): """ Update the ChallengeInstance :param unicode auth_payload: Optional payload to verify the Challenge :returns: Updated ChallengeInstance :rtype: twilio.rest.authy.v1.service.entity.factor.challenge.ChallengeInstance """ return self._proxy.update(auth_payload=auth_payload, )
Update the ChallengeInstance :param unicode auth_payload: Optional payload to verify the Challenge :returns: Updated ChallengeInstance :rtype: twilio.rest.authy.v1.service.entity.factor.challenge.ChallengeInstance
def as_url(self): ''' Reverse object converted to `web.URL`. If Reverse is bound to env: * try to build relative URL, * use current domain name, port and scheme as default ''' if '' in self._scope: return self._finalize().as_url if not self._is_endpoint: raise UrlBuildingError('Not an endpoint {}'.format(repr(self))) if self._ready: path, host = self._path, self._host else: return self().as_url # XXX there is a little mess with `domain` and `host` terms if ':' in host: domain, port = host.split(':') else: domain = host port = None if self._bound_env: request = self._bound_env.request scheme_port = {'http': '80', 'https': '443'}.get(request.scheme, '80') # Domain to compare with the result of build. # If both values are equal, domain part can be hidden from result. # Take it from route_state, not from env.request, because # route_state contains domain values with aliased replaced by their # primary value primary_domain = self._bound_env._route_state.primary_domain host_split = request.host.split(':') request_domain = host_split[0] request_port = host_split[1] if len(host_split) > 1 else scheme_port port = port or request_port return URL(path, host=domain or request_domain, port=port if port != scheme_port else None, scheme=request.scheme, fragment=self._fragment, show_host=host and (domain != primary_domain \ or port != request_port)) return URL(path, host=domain, port=port, fragment=self._fragment, show_host=True)
Reverse object converted to `web.URL`. If Reverse is bound to env: * try to build relative URL, * use current domain name, port and scheme as default
def pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count, random_shuffler=None, shuffle=False, sort_within_batch=False): """Sort within buckets, then batch, then shuffle batches. Partitions data into chunks of size 100*batch_size, sorts examples within each chunk using sort_key, then batch these examples and shuffle the batches. """ if random_shuffler is None: random_shuffler = random.shuffle for p in batch(data, batch_size * 100, batch_size_fn): p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn) \ if sort_within_batch \ else batch(p, batch_size, batch_size_fn) if shuffle: for b in random_shuffler(list(p_batch)): yield b else: for b in list(p_batch): yield b
Sort within buckets, then batch, then shuffle batches. Partitions data into chunks of size 100*batch_size, sorts examples within each chunk using sort_key, then batch these examples and shuffle the batches.
def ellipsemode(self, mode=None): ''' Set the current ellipse drawing mode. :param mode: CORNER, CENTER, CORNERS :return: ellipsemode if mode is None or valid. ''' if mode in (self.CORNER, self.CENTER, self.CORNERS): self.ellipsemode = mode return self.ellipsemode elif mode is None: return self.ellipsemode else: raise ShoebotError(_("ellipsemode: invalid input"))
Set the current ellipse drawing mode. :param mode: CORNER, CENTER, CORNERS :return: ellipsemode if mode is None or valid.
def read_data(self,variable_instance): """ read values from the device """ if self.inst is None: return if variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE': return self.parse_value(self.inst.query('?U6P0')) elif variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE_DCV': return self.parse_value(self.inst.query('?U6P0F1T3')) elif variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE_ACV': return self.parse_value(self.inst.query('?U6P0F2T3')) elif variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE_DCV+ACV': return self.parse_value(self.inst.query('?U6P0F3T3')) elif variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE_2W_OHM': return self.parse_value(self.inst.query('?U6P0F4T3')) elif variable_instance.visavariable.device_property.upper() == 'PRESENT_VALUE_4W_OHM': return self.parse_value(self.inst.query('?U6P0F5T3')) return None
read values from the device
def load(self, path=None): """ Loads the XML-file (with sentiment annotations) from the given path. By default, Sentiment.path is lazily loaded. """ # <word form="great" wordnet_id="a-01123879" pos="JJ" polarity="1.0" subjectivity="1.0" intensity="1.0" /> # <word form="damnmit" polarity="-0.75" subjectivity="1.0" label="profanity" /> if not path: path = self._path if not os.path.exists(path): return words, synsets, labels = {}, {}, {} xml = cElementTree.parse(path) xml = xml.getroot() for w in xml.findall("word"): if self._confidence is None \ or self._confidence <= float(w.attrib.get("confidence", 0.0)): w, pos, p, s, i, label, synset = ( w.attrib.get("form"), w.attrib.get("pos"), w.attrib.get("polarity", 0.0), w.attrib.get("subjectivity", 0.0), w.attrib.get("intensity", 1.0), w.attrib.get("label"), w.attrib.get(self._synset) # wordnet_id, cornetto_id, ... ) psi = (float(p), float(s), float(i)) if w: words.setdefault(w, {}).setdefault(pos, []).append(psi) if w and label: labels[w] = label if synset: synsets.setdefault(synset, []).append(psi) self._language = xml.attrib.get("language", self._language) # Average scores of all word senses per part-of-speech tag. for w in words: words[w] = dict((pos, [avg(each) for each in zip(*psi)]) for pos, psi in words[w].items()) # Average scores of all part-of-speech tags. for w, pos in list(words.items()): words[w][None] = [avg(each) for each in zip(*pos.values())] # Average scores of all synonyms per synset. for id, psi in synsets.items(): synsets[id] = [avg(each) for each in zip(*psi)] dict.update(self, words) dict.update(self.labeler, labels) dict.update(self._synsets, synsets)
Loads the XML-file (with sentiment annotations) from the given path. By default, Sentiment.path is lazily loaded.
def add_subnet(self, subnet_type, quantity=None, vlan_id=None, version=4, test_order=False): """Orders a new subnet :param str subnet_type: Type of subnet to add: private, public, global :param int quantity: Number of IPs in the subnet :param int vlan_id: VLAN id for the subnet to be placed into :param int version: 4 for IPv4, 6 for IPv6 :param bool test_order: If true, this will only verify the order. """ package = self.client['Product_Package'] category = 'sov_sec_ip_addresses_priv' desc = '' if version == 4: if subnet_type == 'global': quantity = 0 category = 'global_ipv4' elif subnet_type == 'public': category = 'sov_sec_ip_addresses_pub' else: category = 'static_ipv6_addresses' if subnet_type == 'global': quantity = 0 category = 'global_ipv6' desc = 'Global' elif subnet_type == 'public': desc = 'Portable' # In the API, every non-server item is contained within package ID 0. # This means that we need to get all of the items and loop through them # looking for the items we need based upon the category, quantity, and # item description. price_id = None quantity_str = str(quantity) for item in package.getItems(id=0, mask='itemCategory'): category_code = utils.lookup(item, 'itemCategory', 'categoryCode') if all([category_code == category, item.get('capacity') == quantity_str, version == 4 or (version == 6 and desc in item['description'])]): price_id = item['prices'][0]['id'] break order = { 'packageId': 0, 'prices': [{'id': price_id}], 'quantity': 1, # This is necessary in order for the XML-RPC endpoint to select the # correct order container 'complexType': 'SoftLayer_Container_Product_Order_Network_Subnet', } if subnet_type != 'global': order['endPointVlanId'] = vlan_id if test_order: return self.client['Product_Order'].verifyOrder(order) else: return self.client['Product_Order'].placeOrder(order)
Orders a new subnet :param str subnet_type: Type of subnet to add: private, public, global :param int quantity: Number of IPs in the subnet :param int vlan_id: VLAN id for the subnet to be placed into :param int version: 4 for IPv4, 6 for IPv6 :param bool test_order: If true, this will only verify the order.
def create_event_handler(event_type, handler): """Register a comm and return a serializable object with target name""" target_name = '{hash}_{event_type}'.format(hash=hash(handler), event_type=event_type) def handle_comm_opened(comm, msg): @comm.on_msg def _handle_msg(msg): data = msg['content']['data'] event = json.loads(data) return_value = handler(event) if return_value: comm.send(return_value) comm.send('Comm target "{target_name}" registered by vdom'.format(target_name=target_name)) # Register a new comm for this event handler if get_ipython(): get_ipython().kernel.comm_manager.register_target(target_name, handle_comm_opened) # Return a serialized object return target_name
Register a comm and return a serializable object with target name
def disconnect(self, connection): """The other side has asked us to disconnect. """ proto = self.getLocalProtocol(connection) proto.transport.loseConnection() return {}
The other side has asked us to disconnect.
def camera_position(self, camera_location): """ Set camera position of all active render windows """ if camera_location is None: return if isinstance(camera_location, str): camera_location = camera_location.lower() if camera_location == 'xy': self.view_xy() elif camera_location == 'xz': self.view_xz() elif camera_location == 'yz': self.view_yz() elif camera_location == 'yx': self.view_xy(True) elif camera_location == 'zx': self.view_xz(True) elif camera_location == 'zy': self.view_yz(True) return if isinstance(camera_location[0], (int, float)): return self.view_vector(camera_location) # everything is set explicitly self.camera.SetPosition(camera_location[0]) self.camera.SetFocalPoint(camera_location[1]) self.camera.SetViewUp(camera_location[2]) # reset clipping range self.ResetCameraClippingRange() self.camera_set = True
Set camera position of all active render windows
def get_unit_id(unit_name): """ Return the unit id to the unit 'unit_name' """ unit_name = unit_name.lower() attribute = 'uniqueIdentifier' response = LDAP_search( pattern_search='(cn={})'.format(unit_name), attribute=attribute ) unit_id = "" try: for element in response: if 'dn' in element and element['dn'].startswith('ou={},'.format(unit_name)): unit_id = element['attributes'][attribute][0] except Exception: raise EpflLdapException("The unit named '{}' was not found".format(unit_name)) finally: if not unit_id: raise EpflLdapException("The unit named '{}' was not found".format(unit_name)) return unit_id
Return the unit id to the unit 'unit_name'
def clone(self, snapshot_name_or_id=None, mode=library.CloneMode.machine_state, options=None, name=None, uuid=None, groups=None, basefolder='', register=True): """Clone this Machine Options: snapshot_name_or_id - value can be either ISnapshot, name, or id mode - set the CloneMode value options - define the CloneOptions options name - define a name of the new VM uuid - set the uuid of the new VM groups - specify which groups the new VM will exist under basefolder - specify which folder to set the VM up under register - register this VM with the server Note: Default values create a linked clone from the current machine state Return a IMachine object for the newly cloned vm """ if options is None: options = [library.CloneOptions.link] if groups is None: groups = [] vbox = virtualbox.VirtualBox() if snapshot_name_or_id is not None: if isinstance(snapshot_name_or_id, basestring): snapshot = self.find_snapshot(snapshot_name_or_id) else: snapshot = snapshot_name_or_id vm = snapshot.machine else: # linked clone can only be created from a snapshot... # try grabbing the current_snapshot if library.CloneOptions.link in options: vm = self.current_snapshot.machine else: vm = self if name is None: name = "%s Clone" % vm.name # Build the settings file create_flags = '' if uuid is not None: create_flags = "UUID=%s" % uuid primary_group = '' if groups: primary_group = groups[0] # Make sure this settings file does not already exist test_name = name settings_file = '' for i in range(1, 1000): settings_file = vbox.compose_machine_filename(test_name, primary_group, create_flags, basefolder) if not os.path.exists(os.path.dirname(settings_file)): break test_name = "%s (%s)" % (name, i) name = test_name # Create the new machine and clone it! vm_clone = vbox.create_machine(settings_file, name, groups, '', create_flags) progress = vm.clone_to(vm_clone, mode, options) progress.wait_for_completion(-1) if register: vbox.register_machine(vm_clone) return vm_clone
Clone this Machine Options: snapshot_name_or_id - value can be either ISnapshot, name, or id mode - set the CloneMode value options - define the CloneOptions options name - define a name of the new VM uuid - set the uuid of the new VM groups - specify which groups the new VM will exist under basefolder - specify which folder to set the VM up under register - register this VM with the server Note: Default values create a linked clone from the current machine state Return a IMachine object for the newly cloned vm
def is_stop(self): ''' has either of the stop processing flags been set ''' if len(self._processed_coordinators) > 0: self.free_processed_queue() return self._cancel_called or self._processing_stop
has either of the stop processing flags been set
def get_transition(self, # suppress(too-many-arguments) line, line_index, column, is_escaped, comment_system_transitions, eof=False): """Get transition from InTextParser.""" parser_transition = { STATE_IN_COMMENT: InCommentParser, STATE_IN_QUOTE: InQuoteParser } (state, start_state_from, waiting_until) = comment_system_transitions.from_text(line, line_index, column, is_escaped) # We need to move ahead by a certain number of characters # if we hit a new state if state != STATE_IN_TEXT: return (parser_transition[state](start_state_from, waiting_until), start_state_from[1] - column, None) else: return (self, 1, None)
Get transition from InTextParser.
def write_summary(all_procs, summary_file): """ Write a summary of all run processes to summary_file in tab-delimited format. """ if not summary_file: return with summary_file: writer = csv.writer(summary_file, delimiter='\t', lineterminator='\n') writer.writerow(('directory', 'command', 'start_time', 'end_time', 'run_time', 'exit_status', 'result')) rows = ((p.working_dir, ' '.join(p.command), p.start_time, p.end_time, p.running_time, p.return_code, p.status) for p in all_procs) writer.writerows(rows)
Write a summary of all run processes to summary_file in tab-delimited format.
def check_theta(self): """Validate the computed theta against the copula specification. This method is used to assert the computed theta is in the valid range for the copula.""" lower, upper = self.theta_interval if (not lower <= self.theta <= upper) or (self.theta in self.invalid_thetas): message = 'The computed theta value {} is out of limits for the given {} copula.' raise ValueError(message.format(self.theta, self.copula_type.name))
Validate the computed theta against the copula specification. This method is used to assert the computed theta is in the valid range for the copula.
def stacked_node_layout(self,EdgeAttribute=None,network=None,NodeAttribute=None,\ nodeList=None,x_position=None,y_start_position=None,verbose=None): """ Execute the Stacked Node Layout on a network. :param EdgeAttribute (string, optional): The name of the edge column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param network (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value c an also be used to specify the current network. :param NodeAttribute (string, optional): The name of the node column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param nodeList (string, optional): Specifies a list of nodes. The keywords all, selected, or unselected can be used to specify nodes by their selection state. The pattern COLUMN:VALUE sets this parameter to any rows that contain the specified column value; if the COLUMN prefix is not used, the NAME column is matched by default. A list of COLUMN :VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to match multiple values. :param x_position (string, optional): X start position, in numeric value :param y_start_position (string, optional): Y start position, in numeric va lue """ network=check_network(self,network,verbose=verbose) PARAMS=set_param(['EdgeAttribute','network','NodeAttribute','nodeList',\ 'x_position','y_start_position'],[EdgeAttribute,network,NodeAttribute,\ nodeList,x_position,y_start_position]) response=api(url=self.__url+"/stacked-node-layout", PARAMS=PARAMS, method="POST", verbose=verbose) return response
Execute the Stacked Node Layout on a network. :param EdgeAttribute (string, optional): The name of the edge column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param network (string, optional): Specifies a network by name, or by SUID if the prefix SUID: is used. The keyword CURRENT, or a blank value c an also be used to specify the current network. :param NodeAttribute (string, optional): The name of the node column contai ning numeric values that will be used as weights in the layout algor ithm. Only columns containing numeric values are shown :param nodeList (string, optional): Specifies a list of nodes. The keywords all, selected, or unselected can be used to specify nodes by their selection state. The pattern COLUMN:VALUE sets this parameter to any rows that contain the specified column value; if the COLUMN prefix is not used, the NAME column is matched by default. A list of COLUMN :VALUE pairs of the format COLUMN1:VALUE1,COLUMN2:VALUE2,... can be used to match multiple values. :param x_position (string, optional): X start position, in numeric value :param y_start_position (string, optional): Y start position, in numeric va lue
def create_api_stage(restApiId, stageName, deploymentId, description='', cacheClusterEnabled=False, cacheClusterSize='0.5', variables=None, region=None, key=None, keyid=None, profile=None): ''' Creates a new API stage for a given restApiId and deploymentId. CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_stage restApiId stagename deploymentId \\ description='' cacheClusterEnabled=True|False cacheClusterSize='0.5' variables='{"name": "value"}' ''' try: variables = dict() if variables is None else variables conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) stage = conn.create_stage(restApiId=restApiId, stageName=stageName, deploymentId=deploymentId, description=description, cacheClusterEnabled=cacheClusterEnabled, cacheClusterSize=cacheClusterSize, variables=variables) return {'created': True, 'stage': _convert_datetime_str(stage)} except ClientError as e: return {'created': False, 'error': __utils__['boto3.get_error'](e)}
Creates a new API stage for a given restApiId and deploymentId. CLI Example: .. code-block:: bash salt myminion boto_apigateway.create_api_stage restApiId stagename deploymentId \\ description='' cacheClusterEnabled=True|False cacheClusterSize='0.5' variables='{"name": "value"}'
def get_all_service_user_objects(self, include_machine = False): """ Fetches all service user objects from the AD, and returns MSADUser object. Service user refers to an user whith SPN (servicePrincipalName) attribute set """ logger.debug('Polling AD for all user objects, machine accounts included: %s'% include_machine) if include_machine == True: ldap_filter = r'(servicePrincipalName=*)' else: ldap_filter = r'(&(servicePrincipalName=*)(!(sAMAccountName = *$)))' attributes = MSADUser.ATTRS for entry in self.pagedsearch(ldap_filter, attributes): # TODO: return ldapuser object yield MSADUser.from_ldap(entry, self._ldapinfo) logger.debug('Finished polling for entries!')
Fetches all service user objects from the AD, and returns MSADUser object. Service user refers to an user whith SPN (servicePrincipalName) attribute set
def _extractall(self, path=".", members=None): """Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers(). """ import copy import operator from tarfile import ExtractError directories = [] if members is None: members = self for tarinfo in members: if tarinfo.isdir(): directories.append(tarinfo) tarinfo = copy.copy(tarinfo) tarinfo.mode = 448 self.extract(tarinfo, path) if sys.version_info < (2, 4): def sorter(dir1, dir2): return cmp(dir1.name, dir2.name) directories.sort(sorter) directories.reverse() else: directories.sort(key=operator.attrgetter('name'), reverse=True) for tarinfo in directories: dirpath = os.path.join(path, tarinfo.name) try: self.chown(tarinfo, dirpath) self.utime(tarinfo, dirpath) self.chmod(tarinfo, dirpath) except ExtractError: e = sys.exc_info()[1] if self.errorlevel > 1: raise else: self._dbg(1, "tarfile: %s" % e)
Extract all members from the archive to the current working directory and set owner, modification time and permissions on directories afterwards. `path' specifies a different directory to extract to. `members' is optional and must be a subset of the list returned by getmembers().
def run_prepare(*data): """ Run seqcluster prepare to merge all samples in one file """ out_dir = os.path.join(dd.get_work_dir(data[0][0]), "seqcluster", "prepare") out_dir = os.path.abspath(safe_makedir(out_dir)) prepare_dir = os.path.join(out_dir, "prepare") tools = dd.get_expression_caller(data[0][0]) if len(tools) == 0: logger.info("You didn't specify any other expression caller tool." "You can add to the YAML file:" "expression_caller:[trna, seqcluster, mirdeep2]") fn = [] for sample in data: name = sample[0]["rgnames"]['sample'] fn.append("%s\t%s" % (sample[0]['collapse'], name)) args = namedtuple('args', 'debug print_debug minc minl maxl out') args = args(False, False, 2, 17, 40, out_dir) ma_out = op.join(out_dir, "seqs.ma") seq_out = op.join(out_dir, "seqs.fastq") min_shared = max(int(len(fn) / 10.0), 1) if not file_exists(ma_out): seq_l, sample_l = prepare._read_fastq_files(fn, args) with file_transaction(ma_out) as ma_tx: with open(ma_tx, 'w') as ma_handle: with open(seq_out, 'w') as seq_handle: logger.info("Prepare seqs.fastq with -minl 17 -maxl 40 -minc 2 --min_shared 0.1") prepare._create_matrix_uniq_seq(sample_l, seq_l, ma_handle, seq_handle, min_shared) for sample in data: sample[0]["seqcluster_prepare_ma"] = ma_out sample[0]["seqcluster_prepare_fastq"] = seq_out return data
Run seqcluster prepare to merge all samples in one file
def _set_properties(self): """Setup title, icon, size, scale, statusbar, main grid""" self.set_icon(icons["PyspreadLogo"]) # Without minimum size, initial size is minimum size in wxGTK self.minSizeSet = False # Leave save mode post_command_event(self, self.SafeModeExitMsg)
Setup title, icon, size, scale, statusbar, main grid
def _validate_checksum(self): """Given a mnemonic word string, confirm seed checksum (last word) matches the computed checksum. :rtype: bool """ phrase = self.phrase.split(" ") if self.word_list.get_checksum(self.phrase) == phrase[-1]: return True raise ValueError("Invalid checksum")
Given a mnemonic word string, confirm seed checksum (last word) matches the computed checksum. :rtype: bool
def deleteAllStyles(self, verbose=None): """ Deletes all vision styles except for default style :param verbose: print more :returns: default: successful operation """ response=api(url=self.___url+'styles', method="DELETE", verbose=verbose) return response
Deletes all vision styles except for default style :param verbose: print more :returns: default: successful operation
def _setuie(self, i): """Initialise bitstring with unsigned interleaved exponential-Golomb code for integer i. Raises CreationError if i < 0. """ if i < 0: raise CreationError("Cannot use negative initialiser for unsigned " "interleaved exponential-Golomb.") self._setbin_unsafe('1' if i == 0 else '0' + '0'.join(bin(i + 1)[3:]) + '1')
Initialise bitstring with unsigned interleaved exponential-Golomb code for integer i. Raises CreationError if i < 0.
def is_done(self): """ Returns True if the read stream is done (either it's returned EOF or the pump doesn't have wait_for_output set), and the write side does not have pending bytes to send. """ return (not self.wait_for_output or self.eof) and \ not (hasattr(self.to_stream, 'needs_write') and self.to_stream.needs_write())
Returns True if the read stream is done (either it's returned EOF or the pump doesn't have wait_for_output set), and the write side does not have pending bytes to send.
def POST(self): """ Add new entry """ form = self.form() if not form.validates(): todos = model.get_todos() return render.index(todos, form) model.new_todo(form.d.title) raise web.seeother('/')
Add new entry
def upgrade(): """Upgrade database.""" op.create_table( 'pidrelations_pidrelation', sa.Column('created', sa.DateTime(), nullable=False), sa.Column('updated', sa.DateTime(), nullable=False), sa.Column('parent_id', sa.Integer(), nullable=False), sa.Column('child_id', sa.Integer(), nullable=False), sa.Column('relation_type', sa.SmallInteger(), nullable=False), sa.Column('index', sa.Integer(), nullable=True), sa.ForeignKeyConstraint( ['child_id'], ['pidstore_pid.id'], name=op.f('fk_pidrelations_pidrelation_child_id_pidstore_pid'), onupdate='CASCADE', ondelete='CASCADE' ), sa.ForeignKeyConstraint( ['parent_id'], ['pidstore_pid.id'], name=op.f('fk_pidrelations_pidrelation_parent_id_pidstore_pid'), onupdate='CASCADE', ondelete='CASCADE'), sa.PrimaryKeyConstraint( 'parent_id', 'child_id', name=op.f('pk_pidrelations_pidrelation') ) )
Upgrade database.
def parse(cls, value, record_bytes): """Parses the pointer label. Parameters ---------- pointer_data Supported values for `pointer_data` are:: ^PTR = nnn ^PTR = nnn <BYTES> ^PTR = "filename" ^PTR = ("filename") ^PTR = ("filename", nnn) ^PTR = ("filename", nnn <BYTES>) record_bytes Record multiplier value Returns ------- Pointer object """ if isinstance(value, six.string_types): return cls(value, 0) if isinstance(value, list): if len(value) == 1: return cls(value[0], 0) if len(value) == 2: return cls(value[0], cls._parse_bytes(value[1], record_bytes)) raise ValueError('Unsupported pointer type') return cls(None, cls._parse_bytes(value, record_bytes))
Parses the pointer label. Parameters ---------- pointer_data Supported values for `pointer_data` are:: ^PTR = nnn ^PTR = nnn <BYTES> ^PTR = "filename" ^PTR = ("filename") ^PTR = ("filename", nnn) ^PTR = ("filename", nnn <BYTES>) record_bytes Record multiplier value Returns ------- Pointer object
def make_key_url(self, key): """Gets a URL for a key.""" if type(key) is bytes: key = key.decode('utf-8') buf = io.StringIO() buf.write(u'keys') if not key.startswith(u'/'): buf.write(u'/') buf.write(key) return self.make_url(buf.getvalue())
Gets a URL for a key.
def add(self, properties): """ Add a faked HBA resource. Parameters: properties (dict): Resource properties. Special handling and requirements for certain properties: * 'element-id' will be auto-generated with a unique value across all instances of this resource type, if not specified. * 'element-uri' will be auto-generated based upon the element ID, if not specified. * 'class' will be auto-generated to 'hba', if not specified. * 'adapter-port-uri' identifies the backing FCP port for this HBA and is required to be specified. * 'device-number' will be auto-generated with a unique value within the partition in the range 0x8000 to 0xFFFF, if not specified. This method also updates the 'hba-uris' property in the parent faked Partition resource, by adding the URI for the faked HBA resource. Returns: :class:`~zhmcclient_mock.FakedHba`: The faked HBA resource. Raises: :exc:`zhmcclient_mock.InputError`: Some issue with the input properties. """ new_hba = super(FakedHbaManager, self).add(properties) partition = self.parent # Reflect the new NIC in the partition assert 'hba-uris' in partition.properties partition.properties['hba-uris'].append(new_hba.uri) # Create a default device-number if not specified if 'device-number' not in new_hba.properties: devno = partition.devno_alloc() new_hba.properties['device-number'] = devno # Create a default wwpn if not specified if 'wwpn' not in new_hba.properties: wwpn = partition.wwpn_alloc() new_hba.properties['wwpn'] = wwpn return new_hba
Add a faked HBA resource. Parameters: properties (dict): Resource properties. Special handling and requirements for certain properties: * 'element-id' will be auto-generated with a unique value across all instances of this resource type, if not specified. * 'element-uri' will be auto-generated based upon the element ID, if not specified. * 'class' will be auto-generated to 'hba', if not specified. * 'adapter-port-uri' identifies the backing FCP port for this HBA and is required to be specified. * 'device-number' will be auto-generated with a unique value within the partition in the range 0x8000 to 0xFFFF, if not specified. This method also updates the 'hba-uris' property in the parent faked Partition resource, by adding the URI for the faked HBA resource. Returns: :class:`~zhmcclient_mock.FakedHba`: The faked HBA resource. Raises: :exc:`zhmcclient_mock.InputError`: Some issue with the input properties.
def stop(self): """ Stop this process. Once closed, it should not, and cannot be used again. :return: :py:attr:`~exitcode`. """ self.child.terminate() self._cleanup() return self.child.exitcode
Stop this process. Once closed, it should not, and cannot be used again. :return: :py:attr:`~exitcode`.
def ping(self, params=None): """ Returns True if the cluster is up, False otherwise. `<http://www.elastic.co/guide/>`_ """ try: return self.transport.perform_request("HEAD", "/", params=params) except TransportError: return False
Returns True if the cluster is up, False otherwise. `<http://www.elastic.co/guide/>`_
def ntp_authentication_key_encryption_type_md5_type_md5(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ntp = ET.SubElement(config, "ntp", xmlns="urn:brocade.com:mgmt:brocade-ntp") authentication_key = ET.SubElement(ntp, "authentication-key") keyid_key = ET.SubElement(authentication_key, "keyid") keyid_key.text = kwargs.pop('keyid') encryption_type = ET.SubElement(authentication_key, "encryption-type") md5_type = ET.SubElement(encryption_type, "md5-type") md5 = ET.SubElement(md5_type, "md5") md5.text = kwargs.pop('md5') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def parse(self): """ Convert line to shape object """ log.debug(self) self.parse_composite() self.split_line() self.convert_coordinates() self.convert_meta() self.make_shape() log.debug(self)
Convert line to shape object
def save_images(self): """Save selected images. This uses Astropy FITS package to save the outputs no matter what user chose to load the images. """ res_dict = self.treeview.get_selected() clobber = self.settings.get('clobber', False) self.treeview.clear_selection() # Automatically disables Save button # If user gives empty string, no suffix. if self.suffix: sfx = '_' + self.suffix else: sfx = '' # Also include channel name in suffix. This is useful if user likes to # open the same image in multiple channels. if self.settings.get('include_chname', True): sfx += '_' + self.chname # Process each selected file. Each can have multiple edited extensions. for infile in res_dict: f_pfx = os.path.splitext(infile)[0] # prefix f_ext = '.fits' # Only FITS supported oname = f_pfx + sfx + f_ext outfile = os.path.join(self.outdir, oname) self.w.status.set_text( 'Writing out {0} to {1} ...'.format(shorten_name(infile, 10), shorten_name(oname, 10))) self.logger.debug( 'Writing out {0} to {1} ...'.format(infile, oname)) if os.path.exists(outfile) and not clobber: self.logger.error('{0} already exists'.format(outfile)) continue bnch = res_dict[infile] if bnch.path is None or not os.path.isfile(bnch.path): self._write_mosaic(f_pfx, outfile) else: shutil.copyfile(bnch.path, outfile) self._write_mef(f_pfx, bnch.extlist, outfile) self.logger.info('{0} written'.format(outfile)) self.w.status.set_text('Saving done, see log')
Save selected images. This uses Astropy FITS package to save the outputs no matter what user chose to load the images.
def get_custom_value(self, field_name): """ Get a value for a specified custom field field_name - Name of the custom field you want. """ custom_field = self.get_custom_field(field_name) return CustomFieldValue.objects.get_or_create( field=custom_field, object_id=self.id)[0].value
Get a value for a specified custom field field_name - Name of the custom field you want.
def contingency_table(y, z): """Note: if y and z are not rounded to 0 or 1, they are ignored """ y = K.cast(K.round(y), K.floatx()) z = K.cast(K.round(z), K.floatx()) def count_matches(y, z): return K.sum(K.cast(y, K.floatx()) * K.cast(z, K.floatx())) ones = K.ones_like(y) zeros = K.zeros_like(y) y_ones = K.equal(y, ones) y_zeros = K.equal(y, zeros) z_ones = K.equal(z, ones) z_zeros = K.equal(z, zeros) tp = count_matches(y_ones, z_ones) tn = count_matches(y_zeros, z_zeros) fp = count_matches(y_zeros, z_ones) fn = count_matches(y_ones, z_zeros) return (tp, tn, fp, fn)
Note: if y and z are not rounded to 0 or 1, they are ignored
def add_variable(self, name): """Add a variable to the problem""" if name in self._variables: raise ValueError( "A variable named " + name + " already exists." ) self._variables[name] = len(self._variables) self.bounds[name] = (0, None) new_col = np.zeros(shape=[len(self._constraints), 1]) self._add_col_to_A(new_col) self._reset_solution()
Add a variable to the problem
def matrix_to_marching_cubes(matrix, pitch, origin): """ Convert an (n,m,p) matrix into a mesh, using marching_cubes. Parameters ----------- matrix: (n,m,p) bool, voxel matrix pitch: float, what pitch was the voxel matrix computed with origin: (3,) float, what is the origin of the voxel matrix Returns ---------- mesh: Trimesh object, generated by meshing voxels using the marching cubes algorithm in skimage """ from skimage import measure from .base import Trimesh matrix = np.asanyarray(matrix, dtype=np.bool) rev_matrix = np.logical_not(matrix) # Takes set about 0. # Add in padding so marching cubes can function properly with # voxels on edge of AABB pad_width = 1 rev_matrix = np.pad(rev_matrix, pad_width=(pad_width), mode='constant', constant_values=(1)) # pick between old and new API if hasattr(measure, 'marching_cubes_lewiner'): func = measure.marching_cubes_lewiner else: func = measure.marching_cubes # Run marching cubes. meshed = func(volume=rev_matrix, level=.5, # it is a boolean voxel grid spacing=(pitch, pitch, pitch)) # allow results from either marching cubes function in skimage # binaries available for python 3.3 and 3.4 appear to use the classic # method if len(meshed) == 2: log.warning('using old marching cubes, may not be watertight!') vertices, faces = meshed normals = None elif len(meshed) == 4: vertices, faces, normals, vals = meshed # Return to the origin, add in the pad_width vertices = np.subtract(np.add(vertices, origin), pad_width * pitch) # create the mesh mesh = Trimesh(vertices=vertices, faces=faces, vertex_normals=normals) return mesh
Convert an (n,m,p) matrix into a mesh, using marching_cubes. Parameters ----------- matrix: (n,m,p) bool, voxel matrix pitch: float, what pitch was the voxel matrix computed with origin: (3,) float, what is the origin of the voxel matrix Returns ---------- mesh: Trimesh object, generated by meshing voxels using the marching cubes algorithm in skimage
def find(self,cell_designation,cell_filter=lambda x,c: 'c' in x and x['c'] == c): """ finds spike containers in a multi spike containers collection """ res = [i for i,sc in enumerate(self.spike_containers) if cell_filter(sc.meta,cell_designation)] if len(res) > 0: return res[0]
finds spike containers in a multi spike containers collection
def rosen_nesterov(self, x, rho=100): """needs exponential number of steps in a non-increasing f-sequence. x_0 = (-1,1,...,1) See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function" """ f = 0.25 * (x[0] - 1)**2 f += rho * sum((x[1:] - 2 * x[:-1]**2 + 1)**2) return f
needs exponential number of steps in a non-increasing f-sequence. x_0 = (-1,1,...,1) See Jarre (2011) "On Nesterov's Smooth Chebyshev-Rosenbrock Function"
def load(self, modules): """Load Python modules and check their usability :param modules: list of the modules that must be loaded :return: """ self.modules_assoc = [] for module in modules: if not module.enabled: logger.info("Module %s is declared but not enabled", module.name) # Store in our modules list but do not try to load # Probably someone else will load this module later... self.modules[module.uuid] = module continue logger.info("Importing Python module '%s' for %s...", module.python_name, module.name) try: python_module = importlib.import_module(module.python_name) # Check existing module properties # Todo: check all mandatory properties if not hasattr(python_module, 'properties'): # pragma: no cover self.configuration_errors.append("Module %s is missing a 'properties' " "dictionary" % module.python_name) raise AttributeError logger.info("Module properties: %s", getattr(python_module, 'properties')) # Check existing module get_instance method if not hasattr(python_module, 'get_instance') or \ not isinstance(getattr(python_module, 'get_instance'), collections.Callable): # pragma: no cover self.configuration_errors.append("Module %s is missing a 'get_instance' " "function" % module.python_name) raise AttributeError self.modules_assoc.append((module, python_module)) logger.info("Imported '%s' for %s", module.python_name, module.name) except ImportError as exp: # pragma: no cover, simple protection self.configuration_errors.append("Module %s (%s) can't be loaded, Python " "importation error: %s" % (module.python_name, module.name, str(exp))) except AttributeError: # pragma: no cover, simple protection self.configuration_errors.append("Module %s (%s) can't be loaded, " "module configuration" % (module.python_name, module.name)) else: logger.info("Loaded Python module '%s' (%s)", module.python_name, module.name)
Load Python modules and check their usability :param modules: list of the modules that must be loaded :return:
def interact(self, **local): """ Drops the user into an interactive Python session with the ``sess`` variable set to the current session instance. If keyword arguments are supplied, these names will also be available within the session. """ import code code.interact(local=dict(sess=self, **local))
Drops the user into an interactive Python session with the ``sess`` variable set to the current session instance. If keyword arguments are supplied, these names will also be available within the session.
def find_distinct(self, collection, key): """ Search a collection for the distinct key values provided. Args: collection: The db collection. See main class documentation. key: The name of the key to find distinct values. For example with the indicators collection, the key could be "type". Returns: List of distinct values. """ obj = getattr(self.db, collection) result = obj.distinct(key) return result
Search a collection for the distinct key values provided. Args: collection: The db collection. See main class documentation. key: The name of the key to find distinct values. For example with the indicators collection, the key could be "type". Returns: List of distinct values.