code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
async def start(self): """ Start discarding media. """ for track, task in self.__tracks.items(): if task is None: self.__tracks[track] = asyncio.ensure_future(blackhole_consume(track))
Start discarding media.
def per_section(it, is_delimiter=lambda x: x.isspace()): """ From http://stackoverflow.com/a/25226944/610569 """ ret = [] for line in it: if is_delimiter(line): if ret: yield ret # OR ''.join(ret) ret = [] else: ret.append(line.rstrip()) # OR ret.append(line) if ret: yield ret
From http://stackoverflow.com/a/25226944/610569
def abfGroupFiles(groups,folder): """ when given a dictionary where every key contains a list of IDs, replace the keys with the list of files matching those IDs. This is how you get a list of files belonging to each child for each parent. """ assert os.path.exists(folder) files=os.listdir(folder) group2={} for parent in groups.keys(): if not parent in group2.keys(): group2[parent]=[] for ID in groups[parent]: for fname in [x.lower() for x in files if ID in x.lower()]: group2[parent].extend([fname]) return group2
when given a dictionary where every key contains a list of IDs, replace the keys with the list of files matching those IDs. This is how you get a list of files belonging to each child for each parent.
def inc(self, name, value=1): """ Increment value """ clone = self._clone() clone._qsl = [(q, v) if q != name else (q, int(v) + value) for (q, v) in self._qsl] if name not in dict(clone._qsl).keys(): clone._qsl.append((name, value)) return clone
Increment value
def start_fitting(self): """ Launches the fitting routine on another thread """ self.queue = queue.Queue() self.peak_vals = [] self.fit_thread = QThread() #must be assigned as an instance variable, not local, as otherwise thread is garbage #collected immediately at the end of the function before it runs self.fitobj = self.do_fit(str(self.data_filepath.text()), self.matplotlibwidget, self.queue, self.peak_vals, self.peak_locs) self.fitobj.moveToThread(self.fit_thread) self.fit_thread.started.connect(self.fitobj.run) self.fitobj.finished.connect(self.fit_thread.quit) # clean up. quit thread after script is finished self.fitobj.status.connect(self.update_status) self.fit_thread.start()
Launches the fitting routine on another thread
def set_transfer_spec(self): ''' run the function to set the transfer spec on error set associated exception ''' _ret = False try: self._args.transfer_spec_func(self._args) _ret = True except Exception as ex: self.notify_exception(AsperaTransferSpecError(ex), False) return _ret
run the function to set the transfer spec on error set associated exception
def save_script_file_for_state_and_source_path(state, state_path_full, as_copy=False): """Saves the script file for a state to the directory of the state. The script name will be set to the SCRIPT_FILE constant. :param state: The state of which the script file should be saved :param str state_path_full: The path to the file system storage location of the state :param bool as_copy: Temporary storage flag to signal that the given path is not the new file_system_path """ from rafcon.core.states.execution_state import ExecutionState if isinstance(state, ExecutionState): source_script_file = os.path.join(state.script.path, state.script.filename) destination_script_file = os.path.join(state_path_full, SCRIPT_FILE) try: write_file(destination_script_file, state.script_text) except Exception: logger.exception("Storing of script file failed: {0} -> {1}".format(state.get_path(), destination_script_file)) raise if not source_script_file == destination_script_file and not as_copy: state.script.filename = SCRIPT_FILE state.script.path = state_path_full
Saves the script file for a state to the directory of the state. The script name will be set to the SCRIPT_FILE constant. :param state: The state of which the script file should be saved :param str state_path_full: The path to the file system storage location of the state :param bool as_copy: Temporary storage flag to signal that the given path is not the new file_system_path
def quarantineWorker(self, *args, **kwargs): """ Quarantine a worker Quarantine a worker This method takes input: ``v1/quarantine-worker-request.json#`` This method gives output: ``v1/worker-response.json#`` This method is ``experimental`` """ return self._makeApiCall(self.funcinfo["quarantineWorker"], *args, **kwargs)
Quarantine a worker Quarantine a worker This method takes input: ``v1/quarantine-worker-request.json#`` This method gives output: ``v1/worker-response.json#`` This method is ``experimental``
def get(self, key, default=None, reraise=False): """ Get the given key from the cache, if present. A default value can be provided in case the requested key is not present, otherwise, None will be returned. :param key: the key to query :type key: str :param default: the value to return if the key does not exist in cache :param reraise: wether an exception should be thrown if now value is found, defaults to False. :type key: bool Example usage: .. code-block:: python cache.set('my_key', 'my_value') cache.get('my_key') >>> 'my_value' cache.get('not_present', 'default_value') >>> 'default_value' cache.get('not_present', reraise=True) >>> raise lifter.exceptions.NotInCache """ if not self.enabled: if reraise: raise exceptions.DisabledCache() return default try: return self._get(key) except exceptions.NotInCache: if reraise: raise return default
Get the given key from the cache, if present. A default value can be provided in case the requested key is not present, otherwise, None will be returned. :param key: the key to query :type key: str :param default: the value to return if the key does not exist in cache :param reraise: wether an exception should be thrown if now value is found, defaults to False. :type key: bool Example usage: .. code-block:: python cache.set('my_key', 'my_value') cache.get('my_key') >>> 'my_value' cache.get('not_present', 'default_value') >>> 'default_value' cache.get('not_present', reraise=True) >>> raise lifter.exceptions.NotInCache
def onSave(self, grid):#, age_data_type='site'): """ Save grid data in the data object """ # deselect column, including remove 'EDIT ALL' label if self.drop_down_menu: self.drop_down_menu.clean_up() # save all changes to er_magic data object self.grid_builder.save_grid_data() # don't actually write data in this step (time-consuming) # instead, write to files when user is done editing #self.er_magic_data.write_files() wx.MessageBox('Saved!', 'Info', style=wx.OK | wx.ICON_INFORMATION)
Save grid data in the data object
def upload_path(instance, filename): ''' Sanitize the user-provided file name, add timestamp for uniqness. ''' filename = filename.replace(" ", "_") filename = unicodedata.normalize('NFKD', filename).lower() return os.path.join(str(timezone.now().date().isoformat()), filename)
Sanitize the user-provided file name, add timestamp for uniqness.
def filter_dict(self, query, **kwargs): ''' Filter for :func:`~ommongo.fields.mapping.DictField`. **Examples**: ``query.filter_dict({"User.Fullname": "Oji"})`` ''' for name, value in query.items(): field = name.split(".")[0] try: getattr(self.type, field) except AttributeError: raise FieldNotFoundException("Field not found %s" % (field)) self.query_bypass(query, raw_output=False, **kwargs) return self
Filter for :func:`~ommongo.fields.mapping.DictField`. **Examples**: ``query.filter_dict({"User.Fullname": "Oji"})``
def project_remove_folder(object_id, input_params={}, always_retry=False, **kwargs): """ Invokes the /project-xxxx/removeFolder API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FremoveFolder """ return DXHTTPRequest('/%s/removeFolder' % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /project-xxxx/removeFolder API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FremoveFolder
def positionIf(pred, seq): """ >>> positionIf(lambda x: x > 3, range(10)) 4 """ for i,e in enumerate(seq): if pred(e): return i return -1
>>> positionIf(lambda x: x > 3, range(10)) 4
def filter_by_domain(self, domain): """ Apply the given domain to a copy of this query """ query = self._copy() query.domain = domain return query
Apply the given domain to a copy of this query
def vmach2cas(M, h): """ Mach to CAS conversion """ tas = vmach2tas(M, h) cas = vtas2cas(tas, h) return cas
Mach to CAS conversion
def _theorem6p4(): """See Theorem 6.4 in paper. Let E(x) denote the edges added when eliminating x. (edges_x below). Prunes (s,b) when (s,a) is explored and E(a) is a subset of E(b). For this theorem we only record E(a) rather than (s,E(a)) because we only need to check for pruning in the same s context (i.e the same level of recursion). """ pruning_set4 = list() def _prune4(edges_b): for edges_a in pruning_set4: if edges_a.issubset(edges_b): return True return False def _explored4(edges_a): pruning_set4.append(edges_a) # (s,E_a) with (s,a) explored return _prune4, _explored4
See Theorem 6.4 in paper. Let E(x) denote the edges added when eliminating x. (edges_x below). Prunes (s,b) when (s,a) is explored and E(a) is a subset of E(b). For this theorem we only record E(a) rather than (s,E(a)) because we only need to check for pruning in the same s context (i.e the same level of recursion).
def hide_me(tb, g=globals()): """Hide stack traceback of given stack""" base_tb = tb try: while tb and tb.tb_frame.f_globals is not g: tb = tb.tb_next while tb and tb.tb_frame.f_globals is g: tb = tb.tb_next except Exception as e: logging.exception(e) tb = base_tb if not tb: tb = base_tb return tb
Hide stack traceback of given stack
async def purgeRequests(self, *args, **kwargs): """ Open Purge Requests for a provisionerId/workerType pair List of caches that need to be purged if they are from before a certain time. This is safe to be used in automation from workers. This method gives output: ``v1/purge-cache-request-list.json#`` This method is ``stable`` """ return await self._makeApiCall(self.funcinfo["purgeRequests"], *args, **kwargs)
Open Purge Requests for a provisionerId/workerType pair List of caches that need to be purged if they are from before a certain time. This is safe to be used in automation from workers. This method gives output: ``v1/purge-cache-request-list.json#`` This method is ``stable``
def write(self, vals): """ Overrides orm write method. @param self: The object pointer @param vals: dictionary of fields value. Update Hotel Room Reservation line history""" reservation_line_obj = self.env['hotel.room.reservation.line'] room_obj = self.env['hotel.room'] prod_id = vals.get('product_id') or self.product_id.id chkin = vals.get('checkin_date') or self.checkin_date chkout = vals.get('checkout_date') or self.checkout_date is_reserved = self.is_reserved if prod_id and is_reserved: prod_domain = [('product_id', '=', prod_id)] prod_room = room_obj.search(prod_domain, limit=1) if (self.product_id and self.checkin_date and self.checkout_date): old_prd_domain = [('product_id', '=', self.product_id.id)] old_prod_room = room_obj.search(old_prd_domain, limit=1) if prod_room and old_prod_room: # Check for existing room lines. srch_rmline = [('room_id', '=', old_prod_room.id), ('check_in', '=', self.checkin_date), ('check_out', '=', self.checkout_date), ] rm_lines = reservation_line_obj.search(srch_rmline) if rm_lines: rm_line_vals = {'room_id': prod_room.id, 'check_in': chkin, 'check_out': chkout} rm_lines.write(rm_line_vals) return super(HotelFolioLineExt, self).write(vals)
Overrides orm write method. @param self: The object pointer @param vals: dictionary of fields value. Update Hotel Room Reservation line history
def get_policy_configurations(self, project, repository_id=None, ref_name=None, policy_type=None, top=None, continuation_token=None): """GetPolicyConfigurations. [Preview API] Retrieve a list of policy configurations by a given set of scope/filtering criteria. :param str project: Project ID or project name :param str repository_id: The repository id. :param str ref_name: The fully-qualified Git ref name (e.g. refs/heads/master). :param str policy_type: The policy type filter. :param int top: Maximum number of policies to return. :param str continuation_token: Pass a policy configuration ID to fetch the next page of results, up to top number of results, for this endpoint. :rtype: :class:`<GitPolicyConfigurationResponse> <azure.devops.v5_1.git.models.GitPolicyConfigurationResponse>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if repository_id is not None: query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str') if ref_name is not None: query_parameters['refName'] = self._serialize.query('ref_name', ref_name, 'str') if policy_type is not None: query_parameters['policyType'] = self._serialize.query('policy_type', policy_type, 'str') if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') if continuation_token is not None: query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str') response = self._send(http_method='GET', location_id='2c420070-a0a2-49cc-9639-c9f271c5ff07', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) response_object = models.GitPolicyConfigurationResponse() response_object.policy_configurations = self._deserialize('[PolicyConfiguration]', self._unwrap_collection(response)) response_object.continuation_token = response.headers.get('x-ms-continuationtoken') return response_object
GetPolicyConfigurations. [Preview API] Retrieve a list of policy configurations by a given set of scope/filtering criteria. :param str project: Project ID or project name :param str repository_id: The repository id. :param str ref_name: The fully-qualified Git ref name (e.g. refs/heads/master). :param str policy_type: The policy type filter. :param int top: Maximum number of policies to return. :param str continuation_token: Pass a policy configuration ID to fetch the next page of results, up to top number of results, for this endpoint. :rtype: :class:`<GitPolicyConfigurationResponse> <azure.devops.v5_1.git.models.GitPolicyConfigurationResponse>`
def unicode_key(key): """ CONVERT PROPERTY VALUE TO QUOTED NAME OF SAME """ if not isinstance(key, (text_type, binary_type)): from mo_logs import Log Log.error("{{key|quote}} is not a valid key", key=key) return quote(text_type(key))
CONVERT PROPERTY VALUE TO QUOTED NAME OF SAME
def process_python(self, path): """Process a python file.""" (pylint_stdout, pylint_stderr) = epylint.py_run( ' '.join([str(path)] + self.pylint_opts), return_std=True) emap = {} print(pylint_stderr.read()) for line in pylint_stdout: sys.stderr.write(line) key = line.split(':')[-1].split('(')[0].strip() if key not in self.pylint_cats: continue if key not in emap: emap[key] = 1 else: emap[key] += 1 sys.stderr.write('\n') self.python_map[str(path)] = emap
Process a python file.
def scan(self, table, scan_filter=None, attributes_to_get=None, request_limit=None, max_results=None, count=False, exclusive_start_key=None, item_class=Item): """ Perform a scan of DynamoDB. :type table: :class:`boto.dynamodb.table.Table` :param table: The Table object that is being scanned. :type scan_filter: A list of tuples :param scan_filter: A list of tuples where each tuple consists of an attribute name, a comparison operator, and either a scalar or tuple consisting of the values to compare the attribute to. Valid comparison operators are shown below along with the expected number of values that should be supplied. * EQ - equal (1) * NE - not equal (1) * LE - less than or equal (1) * LT - less than (1) * GE - greater than or equal (1) * GT - greater than (1) * NOT_NULL - attribute exists (0, use None) * NULL - attribute does not exist (0, use None) * CONTAINS - substring or value in list (1) * NOT_CONTAINS - absence of substring or value in list (1) * BEGINS_WITH - substring prefix (1) * IN - exact match in list (N) * BETWEEN - >= first value, <= second value (2) :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type request_limit: int :param request_limit: The maximum number of items to retrieve from Amazon DynamoDB on each request. You may want to set a specific request_limit based on the provisioned throughput of your table. The default behavior is to retrieve as many results as possible per request. :type max_results: int :param max_results: The maximum number of results that will be retrieved from Amazon DynamoDB in total. For example, if you only wanted to see the first 100 results from the query, regardless of how many were actually available, you could set max_results to 100 and the generator returned from the query method will only yeild 100 results max. :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Scan operation, even if the operation has no matching items for the assigned filter. :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` :rtype: generator """ sf = self.dynamize_scan_filter(scan_filter) response = True n = 0 while response: if response is True: pass elif response.has_key("LastEvaluatedKey"): exclusive_start_key = response['LastEvaluatedKey'] else: break response = self.layer1.scan(table.name, sf, attributes_to_get,request_limit, count, exclusive_start_key, object_hook=item_object_hook) if response: for item in response['Items']: if max_results and n == max_results: break yield item_class(table, attrs=item) n += 1
Perform a scan of DynamoDB. :type table: :class:`boto.dynamodb.table.Table` :param table: The Table object that is being scanned. :type scan_filter: A list of tuples :param scan_filter: A list of tuples where each tuple consists of an attribute name, a comparison operator, and either a scalar or tuple consisting of the values to compare the attribute to. Valid comparison operators are shown below along with the expected number of values that should be supplied. * EQ - equal (1) * NE - not equal (1) * LE - less than or equal (1) * LT - less than (1) * GE - greater than or equal (1) * GT - greater than (1) * NOT_NULL - attribute exists (0, use None) * NULL - attribute does not exist (0, use None) * CONTAINS - substring or value in list (1) * NOT_CONTAINS - absence of substring or value in list (1) * BEGINS_WITH - substring prefix (1) * IN - exact match in list (N) * BETWEEN - >= first value, <= second value (2) :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type request_limit: int :param request_limit: The maximum number of items to retrieve from Amazon DynamoDB on each request. You may want to set a specific request_limit based on the provisioned throughput of your table. The default behavior is to retrieve as many results as possible per request. :type max_results: int :param max_results: The maximum number of results that will be retrieved from Amazon DynamoDB in total. For example, if you only wanted to see the first 100 results from the query, regardless of how many were actually available, you could set max_results to 100 and the generator returned from the query method will only yeild 100 results max. :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Scan operation, even if the operation has no matching items for the assigned filter. :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` :rtype: generator
def fillPelicanHole(site, username, password, tstat_name, start_time, end_time): """Fill a hole in a Pelican thermostat's data stream. Arguments: site -- The thermostat's Pelican site name username -- The Pelican username for the site password -- The Pelican password for the site tstat_name -- The name of the thermostat, as identified by Pelican start_time -- The start of the data hole in UTC, e.g. "2018-01-29 15:00:00" end_time -- The end of the data hole in UTC, e.g. "2018-01-29 16:00:00" Returns: A Pandas dataframe with historical Pelican data that falls between the specified start and end times. Note that this function assumes the Pelican thermostat's local time zone is US/Pacific. It will properly handle PST vs. PDT. """ start = datetime.strptime(start_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time) end = datetime.strptime(end_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time) heat_needs_fan = _lookupHeatNeedsFan(site, username, password, tstat_name) if heat_needs_fan is None: return None # Pelican's API only allows a query covering a time range of up to 1 month # So we may need run multiple requests for historical data history_blocks = [] while start < end: block_start = start block_end = min(start + timedelta(days=30), end) blocks = _lookupHistoricalData(site, username, password, tstat_name, block_start, block_end) if blocks is None: return None history_blocks.extend(blocks) start += timedelta(days=30, minutes=1) output_rows = [] for block in history_blocks: runStatus = block.find("runStatus").text if runStatus.startswith("Heat"): fanState = (heatNeedsFan == "Yes") else: fanState = (runStatus != "Off") api_time = datetime.strptime(block.find("timestamp").text, "%Y-%m-%dT%H:%M").replace(tzinfo=_pelican_time) # Need to convert seconds to nanoseconds timestamp = int(api_time.timestamp() * 10**9) output_rows.append({ "temperature": float(block.find("temperature").text), "relative_humidity": float(block.find("humidity").text), "heating_setpoint": float(block.find("heatSetting").text), "cooling_setpoint": float(block.find("coolSetting").text), # Driver explicitly uses "Schedule" field, but we don't have this in history "override": block.find("setBy").text != "Schedule", "fan": fanState, "mode": _mode_name_mappings[block.find("system").text], "state": _state_mappings.get(runStatus, 0), "time": timestamp, }) df = pd.DataFrame(output_rows) df.drop_duplicates(subset="time", keep="first", inplace=True) return df
Fill a hole in a Pelican thermostat's data stream. Arguments: site -- The thermostat's Pelican site name username -- The Pelican username for the site password -- The Pelican password for the site tstat_name -- The name of the thermostat, as identified by Pelican start_time -- The start of the data hole in UTC, e.g. "2018-01-29 15:00:00" end_time -- The end of the data hole in UTC, e.g. "2018-01-29 16:00:00" Returns: A Pandas dataframe with historical Pelican data that falls between the specified start and end times. Note that this function assumes the Pelican thermostat's local time zone is US/Pacific. It will properly handle PST vs. PDT.
def ndarray_to_list_in_structure(item, squeeze=True): """ Change ndarray in structure of lists and dicts into lists. """ tp = type(item) if tp == np.ndarray: if squeeze: item = item.squeeze() item = item.tolist() elif tp == list: for i in range(len(item)): item[i] = ndarray_to_list_in_structure(item[i]) elif tp == dict: for lab in item: item[lab] = ndarray_to_list_in_structure(item[lab]) return item
Change ndarray in structure of lists and dicts into lists.
def discard(self, key): """ Remove an element. Do not raise an exception if absent. The MutableSet mixin uses this to implement the .remove() method, which *does* raise an error when asked to remove a non-existent item. """ if key in self: i = self.map[key] del self.items[i] del self.map[key] for k, v in self.map.items(): if v >= i: self.map[k] = v - 1
Remove an element. Do not raise an exception if absent. The MutableSet mixin uses this to implement the .remove() method, which *does* raise an error when asked to remove a non-existent item.
def BFS_Tree(G, start): """ Return an oriented tree constructed from bfs starting at 'start'. """ if start not in G.vertices: raise GraphInsertError("Vertex %s doesn't exist." % (start,)) pred = BFS(G, start) T = digraph.DiGraph() queue = Queue() queue.put(start) while queue.qsize() > 0: current = queue.get() for element in pred: if pred[element] == current: T.add_edge(current, element) queue.put(element) return T
Return an oriented tree constructed from bfs starting at 'start'.
def get_protocols(self, device): """Returns a list of available protocols for the specified device.""" return self._reg.device_builder(device, self._rv).protocols
Returns a list of available protocols for the specified device.
def by_player(self): """:class:`bool`: Whether the kill involves other characters.""" return any([k.player and self.name != k.name for k in self.killers])
:class:`bool`: Whether the kill involves other characters.
def download_metadata_cli(master_token, output_csv, verbose=False, debug=False): """ Command line function for downloading metadata. For more information visit :func:`download_metadata<ohapi.command_line.download_metadata>`. """ return download_metadata(master_token, output_csv, verbose, debug)
Command line function for downloading metadata. For more information visit :func:`download_metadata<ohapi.command_line.download_metadata>`.
def guess_service_info_from_path(spec_path): """Guess Python Autorest options based on the spec path. Expected path: specification/compute/resource-manager/readme.md """ spec_path = spec_path.lower() spec_path = spec_path[spec_path.index("specification"):] # Might raise and it's ok split_spec_path = spec_path.split("/") rp_name = split_spec_path[1] is_arm = split_spec_path[2] == "resource-manager" return { "rp_name": rp_name, "is_arm": is_arm }
Guess Python Autorest options based on the spec path. Expected path: specification/compute/resource-manager/readme.md
def get_bounds(tune_params): """ create a bounds array from the tunable parameters """ bounds = [] for values in tune_params.values(): sorted_values = numpy.sort(values) bounds.append((sorted_values[0], sorted_values[-1])) return bounds
create a bounds array from the tunable parameters
def itemat(iterable, index): """Try to get the item at index position in iterable after iterate on iterable items. :param iterable: object which provides the method __getitem__ or __iter__. :param int index: item position to get. """ result = None handleindex = True if isinstance(iterable, dict): handleindex = False else: try: result = iterable[index] except TypeError: handleindex = False if not handleindex: iterator = iter(iterable) if index < 0: # ensure index is positive index += len(iterable) while index >= 0: try: value = next(iterator) except StopIteration: raise IndexError( "{0} index {1} out of range".format( iterable.__class__, index ) ) else: if index == 0: result = value break index -= 1 return result
Try to get the item at index position in iterable after iterate on iterable items. :param iterable: object which provides the method __getitem__ or __iter__. :param int index: item position to get.
def get_mode(path, follow_symlinks=True): ''' Return the mode of a file path file or directory of which to get the mode follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_mode /etc/passwd .. versionchanged:: 2014.1.0 ``follow_symlinks`` option added ''' return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('mode', '')
Return the mode of a file path file or directory of which to get the mode follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_mode /etc/passwd .. versionchanged:: 2014.1.0 ``follow_symlinks`` option added
def get_node_type(self, node, parent=None): """If node is a document, the type is page. If node is a binder with no parent, the type is book. If node is a translucent binder, the type is either chapters (only contain pages) or unit (contains at least one translucent binder). """ if isinstance(node, CompositeDocument): return 'composite-page' elif isinstance(node, (Document, DocumentPointer)): return 'page' elif isinstance(node, Binder) and parent is None: return 'book' for child in node: if isinstance(child, TranslucentBinder): return 'unit' return 'chapter'
If node is a document, the type is page. If node is a binder with no parent, the type is book. If node is a translucent binder, the type is either chapters (only contain pages) or unit (contains at least one translucent binder).
def _post_filter(search, urlkwargs, definitions): """Ingest post filter in query.""" filters, urlkwargs = _create_filter_dsl(urlkwargs, definitions) for filter_ in filters: search = search.post_filter(filter_) return (search, urlkwargs)
Ingest post filter in query.
def as_dict(self): """ Return the URI object as a dictionary""" d = {k:v for (k,v) in self.__dict__.items()} return d
Return the URI object as a dictionary
def get_random_mass(numPoints, massRangeParams): """ This function will generate a large set of points within the chosen mass and spin space, and with the desired minimum remnant disk mass (this applies to NS-BH systems only). It will also return the corresponding PN spin coefficients for ease of use later (though these may be removed at some future point). Parameters ---------- numPoints : int Number of systems to simulate massRangeParams : massRangeParameters instance Instance holding all the details of mass ranges and spin ranges. Returns -------- mass1 : float Mass of heavier body. mass2 : float Mass of lighter body. spin1z : float Spin of body 1. spin2z : float Spin of body 2. """ # WARNING: We expect mass1 > mass2 ALWAYS # Check if EM contraints are required, i.e. if the systems must produce # a minimum remnant disk mass. If this is not the case, proceed treating # the systems as point particle binaries if massRangeParams.remnant_mass_threshold is None: mass1, mass2, spin1z, spin2z = \ get_random_mass_point_particles(numPoints, massRangeParams) # otherwise, load EOS dependent data, generate the EM constraint # (i.e. compute the minimum symmetric mass ratio needed to # generate a given remnant disk mass as a function of the NS # mass and the BH spin along z) and then proceed by accepting # only systems that can yield (at least) the desired remnant # disk mass and that pass the mass and spin range cuts. else: _, max_ns_g_mass = load_ns_sequence(massRangeParams.ns_eos) # Generate EM constraint surface: minumum eta as a function of BH spin # and NS mass required to produce an EM counterpart if not os.path.isfile('constraint_em_bright.npz'): logging.info("""constraint_em_bright.npz not found. Generating the constraint surface for EM bright binaries in the physical parameter space. One day, this will be made faster, for now be patient and wait a few minutes!""") generate_em_constraint_data(massRangeParams.minMass2, massRangeParams.maxMass2, massRangeParams.delta_ns_mass, \ -1.0, massRangeParams.maxBHSpinMag, massRangeParams.delta_bh_spin, \ massRangeParams.ns_eos, massRangeParams.remnant_mass_threshold, 0.0) constraint_datafile = numpy.load('constraint_em_bright.npz') mNS_pts = constraint_datafile['mNS_pts'] bh_spin_z_pts = constraint_datafile['sBH_pts'] eta_mins = constraint_datafile['eta_mins'] # Empty arrays to store points that pass all cuts mass1_out = [] mass2_out = [] spin1z_out = [] spin2z_out = [] # As the EM cut can remove several randomly generated # binaries, track the number of accepted points that pass # all cuts and stop only once enough of them are generated numPointsFound = 0 while numPointsFound < numPoints: # Generate the random points within the required mass # and spin cuts mass1, mass2, spin1z, spin2z = \ get_random_mass_point_particles(numPoints-numPointsFound, massRangeParams) _, eta = pnutils.mass1_mass2_to_mtotal_eta(mass1, mass2) # Now proceed with cutting out EM dim systems # Logical mask to clean up points by removing EM dim binaries mask = numpy.ones(len(mass1), dtype=bool) # Commpute the minimum eta to generate a counterpart min_eta_em = min_eta_for_em_bright(spin1z, mass2, mNS_pts, bh_spin_z_pts, eta_mins) # Remove a point if: # 1) eta is smaller than the eta threshold required to have a counterpart; # 2) the primary is a BH (mass1 >= ns_bh_boundary_mass); # 3) the secondary mass does not exceed the maximum NS mass # allowed by the EOS (if the user runs with --use-eos-max-ns-mass # this last condition will always be true, otherwise the user is # implicitly asking to keep binaries in which the secondary may be # a BH). mask[(mass1 >= massRangeParams.ns_bh_boundary_mass) & (mass2 <= max_ns_g_mass) & (eta < min_eta_em)] = False # Keep only binaries that can produce an EM counterpart and add them to # the pile of accpeted points to output mass1_out = numpy.concatenate((mass1_out, mass1[mask])) mass2_out = numpy.concatenate((mass2_out, mass2[mask])) spin1z_out = numpy.concatenate((spin1z_out,spin1z[mask])) spin2z_out = numpy.concatenate((spin2z_out,spin2z[mask])) # Number of points that survived all cuts numPointsFound = len(mass1_out) # Ready to go mass1 = mass1_out mass2 = mass2_out spin1z = spin1z_out spin2z = spin2z_out return mass1, mass2, spin1z, spin2z
This function will generate a large set of points within the chosen mass and spin space, and with the desired minimum remnant disk mass (this applies to NS-BH systems only). It will also return the corresponding PN spin coefficients for ease of use later (though these may be removed at some future point). Parameters ---------- numPoints : int Number of systems to simulate massRangeParams : massRangeParameters instance Instance holding all the details of mass ranges and spin ranges. Returns -------- mass1 : float Mass of heavier body. mass2 : float Mass of lighter body. spin1z : float Spin of body 1. spin2z : float Spin of body 2.
def log_pdf(self, y, mu, weights=None): """ computes the log of the pdf or pmf of the values under the current distribution Parameters ---------- y : array-like of length n target values mu : array-like of length n expected values weights : array-like shape (n,) or None, default: None sample weights if None, defaults to array of ones Returns ------- pdf/pmf : np.array of length n """ if weights is None: weights = np.ones_like(mu) n = self.levels p = mu / self.levels return sp.stats.binom.logpmf(y, n, p)
computes the log of the pdf or pmf of the values under the current distribution Parameters ---------- y : array-like of length n target values mu : array-like of length n expected values weights : array-like shape (n,) or None, default: None sample weights if None, defaults to array of ones Returns ------- pdf/pmf : np.array of length n
def is_finalized(self): """Return True if the bundle is installed.""" return self.state == self.STATES.FINALIZED or self.state == self.STATES.INSTALLED
Return True if the bundle is installed.
def stream_command_dicts(commands, parallel=False): """ Takes a list of dictionaries with keys corresponding to ``stream_command`` arguments, and runs all concurrently. :param commands: A list of dictionaries, the keys of which should line up with the arguments to ``stream_command`` function. :type commands: ``list`` of ``dict`` :param parallel: If true, commands will be run in parallel. :type parallel: ``bool`` """ if parallel is True: threads = [] for command in commands: target = lambda: stream_command(**command) thread = Thread(target=target) thread.start() threads.append(thread) for t in threads: t.join() else: for command in commands: stream_command(**command)
Takes a list of dictionaries with keys corresponding to ``stream_command`` arguments, and runs all concurrently. :param commands: A list of dictionaries, the keys of which should line up with the arguments to ``stream_command`` function. :type commands: ``list`` of ``dict`` :param parallel: If true, commands will be run in parallel. :type parallel: ``bool``
def _machine_actions(self, action): """ Actions for the machine (e.g. stop, start etc) :param action: Can be "reboot", "start", "stop", "destroy" :returns: An updated list of the added machines """ payload = { 'action': action } data = json.dumps(payload) req = self.request(self.mist_client.uri+'/clouds/'+self.cloud.id+'/machines/'+self.id, data=data) req.post() self.cloud.update_machines()
Actions for the machine (e.g. stop, start etc) :param action: Can be "reboot", "start", "stop", "destroy" :returns: An updated list of the added machines
def set_options(self, **kwargs): """ Set options. @param kwargs: keyword arguments. @see: L{Options} """ p = Unskin(self.options) p.update(kwargs)
Set options. @param kwargs: keyword arguments. @see: L{Options}
def set_public_transport_route(self, public_transport_route): """ Set the public transport route. :param public_transport_route: TransportRoute """ self._query_params += str(QueryParam.ROUTE_ID) + str(public_transport_route)
Set the public transport route. :param public_transport_route: TransportRoute
def get_app_instance(request): """ Returns a tuple containing the current namespace and the AppHookConfig instance :param request: request object :return: namespace, config """ app = None if getattr(request, 'current_page', None) and request.current_page.application_urls: app = apphook_pool.get_apphook(request.current_page.application_urls) if app and app.app_config: try: config = None with override(get_language_from_request(request, check_path=True)): namespace = resolve(request.path_info).namespace config = app.get_config(namespace) return namespace, config except Resolver404: pass return '', None
Returns a tuple containing the current namespace and the AppHookConfig instance :param request: request object :return: namespace, config
def process_increase_expression_amount(self): """Looks for Positive_Regulation events with a specified Cause and a Gene_Expression theme, and processes them into INDRA statements. """ statements = [] pwcs = self.find_event_parent_with_event_child( 'Positive_regulation', 'Gene_expression') for pair in pwcs: pos_reg = pair[0] expression = pair[1] cause = self.get_entity_text_for_relation(pos_reg, 'Cause') target = self.get_entity_text_for_relation(expression, 'Theme') if cause is not None and target is not None: theme_node = self.get_related_node(expression, 'Theme') assert(theme_node is not None) evidence = self.node_to_evidence(theme_node, is_direct=False) statements.append(IncreaseAmount(s2a(cause), s2a(target), evidence=evidence)) return statements
Looks for Positive_Regulation events with a specified Cause and a Gene_Expression theme, and processes them into INDRA statements.
def id_pools_vsn_ranges(self): """ Gets the IdPoolsRanges API Client for VSN Ranges. Returns: IdPoolsRanges: """ if not self.__id_pools_vsn_ranges: self.__id_pools_vsn_ranges = IdPoolsRanges('vsn', self.__connection) return self.__id_pools_vsn_ranges
Gets the IdPoolsRanges API Client for VSN Ranges. Returns: IdPoolsRanges:
def plot_pdf(self, names=None, Nbest=5, lw=2): """Plots Probability density functions of the distributions :param str,list names: names can be a single distribution name, or a list of distribution names, or kept as None, in which case, the first Nbest distribution will be taken (default to best 5) """ assert Nbest > 0 if Nbest > len(self.distributions): Nbest = len(self.distributions) if isinstance(names, list): for name in names: pylab.plot(self.x, self.fitted_pdf[name], lw=lw, label=name) elif names: pylab.plot(self.x, self.fitted_pdf[names], lw=lw, label=names) else: try: names = self.df_errors.sort_values( by="sumsquare_error").index[0:Nbest] except: names = self.df_errors.sort("sumsquare_error").index[0:Nbest] for name in names: if name in self.fitted_pdf.keys(): pylab.plot(self.x, self.fitted_pdf[name], lw=lw, label=name) else: print("%s was not fitted. no parameters available" % name) pylab.grid(True) pylab.legend()
Plots Probability density functions of the distributions :param str,list names: names can be a single distribution name, or a list of distribution names, or kept as None, in which case, the first Nbest distribution will be taken (default to best 5)
def update(self, dtrain, iteration, fobj=None): """ Update for one iteration, with objective function calculated internally. Parameters ---------- dtrain : DMatrix Training data. iteration : int Current iteration number. fobj : function Customized objective function. """ if not isinstance(dtrain, DMatrix): raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__)) self._validate_features(dtrain) if fobj is None: _check_call(_LIB.XGBoosterUpdateOneIter(self.handle, iteration, dtrain.handle)) else: pred = self.predict(dtrain) grad, hess = fobj(pred, dtrain) self.boost(dtrain, grad, hess)
Update for one iteration, with objective function calculated internally. Parameters ---------- dtrain : DMatrix Training data. iteration : int Current iteration number. fobj : function Customized objective function.
def get_gene_count_tab(infile, bc_getter=None): ''' Yields the counts per umi for each gene bc_getter: method to get umi (plus optionally, cell barcode) from read, e.g get_umi_read_id or get_umi_tag TODO: ADD FOLLOWING OPTION skip_regex: skip genes matching this regex. Useful to ignore unassigned reads (as per get_bundles class above) ''' gene = None counts = collections.Counter() for line in infile: values = line.strip().split("\t") assert len(values) == 2, "line: %s does not contain 2 columns" % line read_id, assigned_gene = values if assigned_gene != gene: if gene: yield gene, counts gene = assigned_gene counts = collections.defaultdict(collections.Counter) cell, umi = bc_getter(read_id) counts[cell][umi] += 1 # yield final values yield gene, counts
Yields the counts per umi for each gene bc_getter: method to get umi (plus optionally, cell barcode) from read, e.g get_umi_read_id or get_umi_tag TODO: ADD FOLLOWING OPTION skip_regex: skip genes matching this regex. Useful to ignore unassigned reads (as per get_bundles class above)
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the data encoding the UsernamePasswordCredential struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the username is not defined. """ local_stream = BytearrayStream() if self._username: self._username.write(local_stream, kmip_version=kmip_version) else: raise ValueError( "Username/password credential struct missing the username." ) if self._password: self._password.write(local_stream, kmip_version=kmip_version) self.length = local_stream.length() super(UsernamePasswordCredential, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer)
Write the data encoding the UsernamePasswordCredential struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the username is not defined.
def _stage_input_files(self, file_mapping, dry_run=True): """Stage the input files to the scratch area and adjust the arguments accordingly""" # print ("Staging input ", file_mapping) if self._file_stage is None: return self._file_stage.copy_to_scratch(file_mapping, dry_run)
Stage the input files to the scratch area and adjust the arguments accordingly
def geometricmean(inlist): """ Calculates the geometric mean of the values in the passed list. That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list. Usage: lgeometricmean(inlist) """ mult = 1.0 one_over_n = 1.0 / len(inlist) for item in inlist: mult = mult * pow(item, one_over_n) return mult
Calculates the geometric mean of the values in the passed list. That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list. Usage: lgeometricmean(inlist)
def validate_id(request): """Validate request id.""" if 'id' in request: correct_id = isinstance( request['id'], (string_types, int, None), ) error = 'Incorrect identifier' assert correct_id, error
Validate request id.
def tagExplicitly(self, superTag): """Return explicitly tagged *TagSet* Create a new *TagSet* representing callee *TagSet* explicitly tagged with passed tag(s). With explicit tagging mode, new tags are appended to existing tag(s). Parameters ---------- superTag: :class:`~pyasn1.type.tag.Tag` *Tag* object to tag this *TagSet* Returns ------- : :class:`~pyasn1.type.tag.TagSet` New *TagSet* object """ if superTag.tagClass == tagClassUniversal: raise error.PyAsn1Error("Can't tag with UNIVERSAL class tag") if superTag.tagFormat != tagFormatConstructed: superTag = Tag(superTag.tagClass, tagFormatConstructed, superTag.tagId) return self + superTag
Return explicitly tagged *TagSet* Create a new *TagSet* representing callee *TagSet* explicitly tagged with passed tag(s). With explicit tagging mode, new tags are appended to existing tag(s). Parameters ---------- superTag: :class:`~pyasn1.type.tag.Tag` *Tag* object to tag this *TagSet* Returns ------- : :class:`~pyasn1.type.tag.TagSet` New *TagSet* object
def open_las(source, closefd=True): """ Opens and reads the header of the las content in the source >>> with open_las('pylastests/simple.las') as f: ... print(f.header.point_format_id) 3 >>> f = open('pylastests/simple.las', mode='rb') >>> with open_las(f, closefd=False) as flas: ... print(flas.header) <LasHeader(1.2)> >>> f.closed False >>> f = open('pylastests/simple.las', mode='rb') >>> with open_las(f) as flas: ... las = flas.read() >>> f.closed True Parameters ---------- source : str or io.BytesIO if source is a str it must be a filename a stream if a file object with the methods read, seek, tell closefd: bool Whether the stream/file object shall be closed, this only work when using open_las in a with statement. An exception is raised if closefd is specified and the source is a filename Returns ------- pylas.lasreader.LasReader """ if isinstance(source, str): stream = open(source, mode="rb") if not closefd: raise ValueError("Cannot use closefd with filename") elif isinstance(source, bytes): stream = io.BytesIO(source) else: stream = source return LasReader(stream, closefd=closefd)
Opens and reads the header of the las content in the source >>> with open_las('pylastests/simple.las') as f: ... print(f.header.point_format_id) 3 >>> f = open('pylastests/simple.las', mode='rb') >>> with open_las(f, closefd=False) as flas: ... print(flas.header) <LasHeader(1.2)> >>> f.closed False >>> f = open('pylastests/simple.las', mode='rb') >>> with open_las(f) as flas: ... las = flas.read() >>> f.closed True Parameters ---------- source : str or io.BytesIO if source is a str it must be a filename a stream if a file object with the methods read, seek, tell closefd: bool Whether the stream/file object shall be closed, this only work when using open_las in a with statement. An exception is raised if closefd is specified and the source is a filename Returns ------- pylas.lasreader.LasReader
def SingleModeCombine(pupils,modeDiameter=None): """ Return the instantaneous coherent fluxes and photometric fluxes for a multiway single-mode fibre combiner """ if modeDiameter is None: modeDiameter=0.9*pupils.shape[-1] amplitudes=FibreCouple(pupils,modeDiameter) cc=np.conj(amplitudes) fluxes=(amplitudes*cc).real coherentFluxes=[amplitudes[i]*cc[j] for i in range(1,len(amplitudes)) for j in range(i)] return fluxes,coherentFluxes
Return the instantaneous coherent fluxes and photometric fluxes for a multiway single-mode fibre combiner
def p_paramlist(self, p): 'paramlist : DELAY LPAREN params RPAREN' p[0] = Paramlist(params=p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
paramlist : DELAY LPAREN params RPAREN
def get_value_of_splits_for_account(self, account_id: str) -> Decimal: """ Returns the sum of values for all splits for the given account """ splits = self.get_split_for_account(account_id) result = Decimal(0) for split in splits: result += split.value return result
Returns the sum of values for all splits for the given account
def viewbox_mouse_event(self, event): """ The viewbox received a mouse event; update transform accordingly. Parameters ---------- event : instance of Event The event. """ if event.handled or not self.interactive: return PerspectiveCamera.viewbox_mouse_event(self, event) if event.type == 'mouse_release': self._event_value = None # Reset elif event.type == 'mouse_press': event.handled = True elif event.type == 'mouse_move': if event.press_event is None: return modifiers = event.mouse_event.modifiers p1 = event.mouse_event.press_event.pos p2 = event.mouse_event.pos d = p2 - p1 if 1 in event.buttons and not modifiers: # Rotate self._update_rotation(event) elif 2 in event.buttons and not modifiers: # Zoom if self._event_value is None: self._event_value = (self._scale_factor, self._distance) zoomy = (1 + self.zoom_factor) ** d[1] self.scale_factor = self._event_value[0] * zoomy # Modify distance if its given if self._distance is not None: self._distance = self._event_value[1] * zoomy self.view_changed() elif 1 in event.buttons and keys.SHIFT in modifiers: # Translate norm = np.mean(self._viewbox.size) if self._event_value is None or len(self._event_value) == 2: self._event_value = self.center dist = (p1 - p2) / norm * self._scale_factor dist[1] *= -1 # Black magic part 1: turn 2D into 3D translations dx, dy, dz = self._dist_to_trans(dist) # Black magic part 2: take up-vector and flipping into account ff = self._flip_factors up, forward, right = self._get_dim_vectors() dx, dy, dz = right * dx + forward * dy + up * dz dx, dy, dz = ff[0] * dx, ff[1] * dy, dz * ff[2] c = self._event_value self.center = c[0] + dx, c[1] + dy, c[2] + dz elif 2 in event.buttons and keys.SHIFT in modifiers: # Change fov if self._event_value is None: self._event_value = self._fov fov = self._event_value - d[1] / 5.0 self.fov = min(180.0, max(0.0, fov))
The viewbox received a mouse event; update transform accordingly. Parameters ---------- event : instance of Event The event.
def derelativise_url(url): ''' Normalizes URLs, gets rid of .. and . ''' parsed = six.moves.urllib.parse.urlparse(url) newpath=[] for chunk in parsed.path[1:].split('/'): if chunk == '.': continue elif chunk == '..': # parent dir. newpath=newpath[:-1] continue # TODO: Verify this behaviour. elif _fullmatch(r'\.{3,}', chunk) is not None: # parent dir. newpath=newpath[:-1] continue newpath += [chunk] return six.moves.urllib.parse.urlunparse(parsed[:2]+('/'+('/'.join(newpath)),)+parsed[3:])
Normalizes URLs, gets rid of .. and .
def read_actions(): """Yields actions for pressed keys.""" while True: key = get_key() # Handle arrows, j/k (qwerty), and n/e (colemak) if key in (const.KEY_UP, const.KEY_CTRL_N, 'k', 'e'): yield const.ACTION_PREVIOUS elif key in (const.KEY_DOWN, const.KEY_CTRL_P, 'j', 'n'): yield const.ACTION_NEXT elif key in (const.KEY_CTRL_C, 'q'): yield const.ACTION_ABORT elif key in ('\n', '\r'): yield const.ACTION_SELECT
Yields actions for pressed keys.
def get_branches(self, project=None, include_parent=None, include_children=None, include_deleted=None, include_links=None): """GetBranches. Get a collection of branch roots -- first-level children, branches with no parents. :param str project: Project ID or project name :param bool include_parent: Return the parent branch, if there is one. Default: False :param bool include_children: Return the child branches for each root branch. Default: False :param bool include_deleted: Return deleted branches. Default: False :param bool include_links: Return links. Default: False :rtype: [TfvcBranch] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if include_parent is not None: query_parameters['includeParent'] = self._serialize.query('include_parent', include_parent, 'bool') if include_children is not None: query_parameters['includeChildren'] = self._serialize.query('include_children', include_children, 'bool') if include_deleted is not None: query_parameters['includeDeleted'] = self._serialize.query('include_deleted', include_deleted, 'bool') if include_links is not None: query_parameters['includeLinks'] = self._serialize.query('include_links', include_links, 'bool') response = self._send(http_method='GET', location_id='bc1f417e-239d-42e7-85e1-76e80cb2d6eb', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[TfvcBranch]', self._unwrap_collection(response))
GetBranches. Get a collection of branch roots -- first-level children, branches with no parents. :param str project: Project ID or project name :param bool include_parent: Return the parent branch, if there is one. Default: False :param bool include_children: Return the child branches for each root branch. Default: False :param bool include_deleted: Return deleted branches. Default: False :param bool include_links: Return links. Default: False :rtype: [TfvcBranch]
def get_last_block(working_dir): """ Get the last block processed Return the integer on success Return None on error """ # make this usable even if we haven't explicitly configured virtualchain impl = sys.modules[__name__] return BlockstackDB.get_lastblock(impl, working_dir)
Get the last block processed Return the integer on success Return None on error
def ext_pillar(minion_id, pillar, # pylint: disable=W0613 bucket, key=None, keyid=None, verify_ssl=True, location=None, multiple_env=False, environment='base', prefix='', service_url=None, kms_keyid=None, s3_cache_expire=30, # cache for 30 seconds s3_sync_on_update=True, # sync cache on update rather than jit path_style=False, https_enable=True): ''' Execute a command and read the output as YAML ''' s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl, kms_keyid, location, path_style, https_enable) # normpath is needed to remove appended '/' if root is empty string. pillar_dir = os.path.normpath(os.path.join(_get_cache_dir(), environment, bucket)) if prefix: pillar_dir = os.path.normpath(os.path.join(pillar_dir, prefix)) if __opts__['pillar_roots'].get(environment, []) == [pillar_dir]: return {} metadata = _init(s3_creds, bucket, multiple_env, environment, prefix, s3_cache_expire) if s3_sync_on_update: # sync the buckets to the local cache log.info('Syncing local pillar cache from S3...') for saltenv, env_meta in six.iteritems(metadata): for bucket, files in six.iteritems(_find_files(env_meta)): for file_path in files: cached_file_path = _get_cached_file_name(bucket, saltenv, file_path) log.info('%s - %s : %s', bucket, saltenv, file_path) # load the file from S3 if not in the cache or too old _get_file_from_s3(s3_creds, metadata, saltenv, bucket, file_path, cached_file_path) log.info('Sync local pillar cache from S3 completed.') opts = deepcopy(__opts__) opts['pillar_roots'][environment] = [os.path.join(pillar_dir, environment)] if multiple_env else [pillar_dir] # Avoid recursively re-adding this same pillar opts['ext_pillar'] = [x for x in opts['ext_pillar'] if 's3' not in x] pil = Pillar(opts, __grains__, minion_id, environment) compiled_pillar = pil.compile_pillar(ext=False) return compiled_pillar
Execute a command and read the output as YAML
def map(self, mapper): """ Map categories using input correspondence (dict, Series, or function). Parameters ---------- mapper : dict, Series, callable The correspondence from old values to new. Returns ------- SparseArray The output array will have the same density as the input. The output fill value will be the result of applying the mapping to ``self.fill_value`` Examples -------- >>> arr = pd.SparseArray([0, 1, 2]) >>> arr.apply(lambda x: x + 10) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.apply({0: 10, 1: 11, 2: 12}) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2])) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) """ # this is used in apply. # We get hit since we're an "is_extension_type" but regular extension # types are not hit. This may be worth adding to the interface. if isinstance(mapper, ABCSeries): mapper = mapper.to_dict() if isinstance(mapper, abc.Mapping): fill_value = mapper.get(self.fill_value, self.fill_value) sp_values = [mapper.get(x, None) for x in self.sp_values] else: fill_value = mapper(self.fill_value) sp_values = [mapper(x) for x in self.sp_values] return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_value)
Map categories using input correspondence (dict, Series, or function). Parameters ---------- mapper : dict, Series, callable The correspondence from old values to new. Returns ------- SparseArray The output array will have the same density as the input. The output fill value will be the result of applying the mapping to ``self.fill_value`` Examples -------- >>> arr = pd.SparseArray([0, 1, 2]) >>> arr.apply(lambda x: x + 10) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.apply({0: 10, 1: 11, 2: 12}) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2])) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32)
def listDF(option='mostactive', token='', version=''): '''Returns an array of quotes for the top 10 symbols in a specified list. https://iexcloud.io/docs/api/#list Updated intraday Args: option (string); Option to query token (string); Access token version (string); API version Returns: DataFrame: result ''' df = pd.DataFrame(list(option, token, version)) _toDatetime(df) _reindex(df, 'symbol') return df
Returns an array of quotes for the top 10 symbols in a specified list. https://iexcloud.io/docs/api/#list Updated intraday Args: option (string); Option to query token (string); Access token version (string); API version Returns: DataFrame: result
def build_effects_to_residuals_matrix(num_seasons, dtype): """Build change-of-basis matrices for constrained seasonal effects. This method builds the matrix that transforms seasonal effects into effect residuals (differences from the mean effect), and additionally projects these residuals onto the subspace where the mean effect is zero. See `ConstrainedSeasonalStateSpaceModel` for mathematical details. Args: num_seasons: scalar `int` number of seasons. dtype: TensorFlow `dtype` for the returned values. Returns: effects_to_residuals: `Tensor` of shape `[num_seasons-1, num_seasons]`, such that `differences_from_mean_effect = matmul(effects_to_residuals, seasonal_effects)`. In the notation of `ConstrainedSeasonalStateSpaceModel`, this is `effects_to_residuals = P * R`. residuals_to_effects: the (pseudo)-inverse of the above; a `Tensor` of shape `[num_seasons, num_seasons-1]`. In the notation of `ConstrainedSeasonalStateSpaceModel`, this is `residuals_to_effects = R^{-1} * P'`. """ # Build the matrix that converts effects `e_i` into differences from the mean # effect `(e_i - sum(e_i)) / num_seasons`, with the mean effect in the last # row so that the transformation is invertible. effects_to_residuals_fullrank = np.eye(num_seasons) - 1./num_seasons effects_to_residuals_fullrank[-1, :] = 1./num_seasons # compute mean effect residuals_to_effects_fullrank = np.linalg.inv(effects_to_residuals_fullrank) # Drop the final dimension, effectively setting the mean effect to zero. effects_to_residuals = effects_to_residuals_fullrank[:-1, :] residuals_to_effects = residuals_to_effects_fullrank[:, :-1] # Return Tensor values of the specified dtype. effects_to_residuals = tf.cast( effects_to_residuals, dtype=dtype, name='effects_to_residuals') residuals_to_effects = tf.cast( residuals_to_effects, dtype=dtype, name='residuals_to_effects') return effects_to_residuals, residuals_to_effects
Build change-of-basis matrices for constrained seasonal effects. This method builds the matrix that transforms seasonal effects into effect residuals (differences from the mean effect), and additionally projects these residuals onto the subspace where the mean effect is zero. See `ConstrainedSeasonalStateSpaceModel` for mathematical details. Args: num_seasons: scalar `int` number of seasons. dtype: TensorFlow `dtype` for the returned values. Returns: effects_to_residuals: `Tensor` of shape `[num_seasons-1, num_seasons]`, such that `differences_from_mean_effect = matmul(effects_to_residuals, seasonal_effects)`. In the notation of `ConstrainedSeasonalStateSpaceModel`, this is `effects_to_residuals = P * R`. residuals_to_effects: the (pseudo)-inverse of the above; a `Tensor` of shape `[num_seasons, num_seasons-1]`. In the notation of `ConstrainedSeasonalStateSpaceModel`, this is `residuals_to_effects = R^{-1} * P'`.
def remove_filtered_edges(graph, edge_predicates=None): """Remove edges passing the given edge predicates. :param pybel.BELGraph graph: A BEL graph :param edge_predicates: A predicate or list of predicates :type edge_predicates: None or ((pybel.BELGraph, tuple, tuple, int) -> bool) or iter[(pybel.BELGraph, tuple, tuple, int) -> bool]] :return: """ edges = list(filter_edges(graph, edge_predicates=edge_predicates)) graph.remove_edges_from(edges)
Remove edges passing the given edge predicates. :param pybel.BELGraph graph: A BEL graph :param edge_predicates: A predicate or list of predicates :type edge_predicates: None or ((pybel.BELGraph, tuple, tuple, int) -> bool) or iter[(pybel.BELGraph, tuple, tuple, int) -> bool]] :return:
def _check_timeouts(self): """Check if any operations in progress need to be timed out Adds the corresponding finish action that fails the request due to a timeout. """ for conn_id, data in self._connections.items(): if 'timeout' in data and data['timeout'].expired: if data['state'] == self.Connecting: self.finish_connection(conn_id, False, 'Connection attempt timed out') elif data['state'] == self.Disconnecting: self.finish_disconnection(conn_id, False, 'Disconnection attempt timed out') elif data['state'] == self.InProgress: if data['microstate'] == 'rpc': self.finish_operation(conn_id, False, 'RPC timed out without response', None, None) elif data['microstate'] == 'open_interface': self.finish_operation(conn_id, False, 'Open interface request timed out')
Check if any operations in progress need to be timed out Adds the corresponding finish action that fails the request due to a timeout.
def EnumerateClasses(self, namespace=None, ClassName=None, DeepInheritance=None, LocalOnly=None, IncludeQualifiers=None, IncludeClassOrigin=None, **extra): # pylint: disable=invalid-name,line-too-long """ Enumerate the subclasses of a class, or the top-level classes in a namespace. This method performs the EnumerateClasses operation (see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all methods performing such operations. If the operation succeeds, this method returns. Otherwise, this method raises an exception. Parameters: namespace (:term:`string`): Name of the namespace in which the classes are to be enumerated (case independent). Leading and trailing slash characters will be stripped. The lexical case will be preserved. If `None`, the namespace of the `ClassName` parameter will be used, if specified as a :class:`~pywbem.CIMClassName` object. If that is also `None`, the default namespace of the connection will be used. ClassName (:term:`string` or :class:`~pywbem.CIMClassName`): Name of the class whose subclasses are to be retrieved (case independent). If specified as a :class:`~pywbem.CIMClassName` object, its host attribute will be ignored. If `None`, the top-level classes in the namespace will be retrieved. DeepInheritance (:class:`py:bool`): Indicates that all (direct and indirect) subclasses of the specified class or of the top-level classes are to be included in the result, as follows: * If `False`, only direct subclasses of the specified class or only top-level classes are included in the result. * If `True`, all direct and indirect subclasses of the specified class or the top-level classes and all of their direct and indirect subclasses are included in the result. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `False`. Note, the semantics of the `DeepInheritance` parameter in :meth:`~pywbem.WBEMConnection.EnumerateInstances` is different. LocalOnly (:class:`py:bool`): Indicates that inherited properties, methods, and qualifiers are to be excluded from the returned classes, as follows. * If `False`, inherited elements are not excluded. * If `True`, inherited elements are excluded. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `True`. IncludeQualifiers (:class:`py:bool`): Indicates that qualifiers are to be included in the returned classes, as follows: * If `False`, qualifiers are not included. * If `True`, qualifiers are included. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `True`. IncludeClassOrigin (:class:`py:bool`): Indicates that class origin information is to be included on each property and method in the returned classes, as follows: * If `False`, class origin information is not included. * If `True`, class origin information is included. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `False`. **extra : Additional keyword arguments are passed as additional operation parameters to the WBEM server. Note that :term:`DSP0200` does not define any additional parameters for this operation. Returns: A list of :class:`~pywbem.CIMClass` objects that are representations of the enumerated classes, with their `path` attributes set. Raises: Exceptions described in :class:`~pywbem.WBEMConnection`. """ exc = None classes = None method_name = 'EnumerateClasses' if self._operation_recorders: self.operation_recorder_reset() self.operation_recorder_stage_pywbem_args( method=method_name, namespace=namespace, ClassName=ClassName, DeepInheritance=DeepInheritance, LocalOnly=LocalOnly, IncludeQualifiers=IncludeQualifiers, IncludeClassOrigin=IncludeClassOrigin, **extra) try: stats = self.statistics.start_timer(method_name) if namespace is None and isinstance(ClassName, CIMClassName): namespace = ClassName.namespace namespace = self._iparam_namespace_from_namespace(namespace) classname = self._iparam_classname(ClassName, 'ClassName') result = self._imethodcall( method_name, namespace, ClassName=classname, DeepInheritance=DeepInheritance, LocalOnly=LocalOnly, IncludeQualifiers=IncludeQualifiers, IncludeClassOrigin=IncludeClassOrigin, **extra) if result is None: classes = [] else: classes = result[0][2] for klass in classes: if not isinstance(klass, CIMClass): raise CIMXMLParseError( _format("Expecting CIMClass object in result list, " "got {0} object", klass.__class__.__name__), conn_id=self.conn_id) # The EnumerateClasses CIM-XML operation returns classes # as CLASS elements, which do not contain a class path. # We want to return classes with a path (that has a namespace), # so we create the class path and set its namespace to the # effective target namespace. klass.path = CIMClassName( classname=klass.classname, host=self.host, namespace=namespace) return classes except (CIMXMLParseError, XMLParseError) as exce: exce.request_data = self.last_raw_request exce.response_data = self.last_raw_reply exc = exce raise except Exception as exce: exc = exce raise finally: self._last_operation_time = stats.stop_timer( self.last_request_len, self.last_reply_len, self.last_server_response_time, exc) if self._operation_recorders: self.operation_recorder_stage_result(classes, exc)
Enumerate the subclasses of a class, or the top-level classes in a namespace. This method performs the EnumerateClasses operation (see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all methods performing such operations. If the operation succeeds, this method returns. Otherwise, this method raises an exception. Parameters: namespace (:term:`string`): Name of the namespace in which the classes are to be enumerated (case independent). Leading and trailing slash characters will be stripped. The lexical case will be preserved. If `None`, the namespace of the `ClassName` parameter will be used, if specified as a :class:`~pywbem.CIMClassName` object. If that is also `None`, the default namespace of the connection will be used. ClassName (:term:`string` or :class:`~pywbem.CIMClassName`): Name of the class whose subclasses are to be retrieved (case independent). If specified as a :class:`~pywbem.CIMClassName` object, its host attribute will be ignored. If `None`, the top-level classes in the namespace will be retrieved. DeepInheritance (:class:`py:bool`): Indicates that all (direct and indirect) subclasses of the specified class or of the top-level classes are to be included in the result, as follows: * If `False`, only direct subclasses of the specified class or only top-level classes are included in the result. * If `True`, all direct and indirect subclasses of the specified class or the top-level classes and all of their direct and indirect subclasses are included in the result. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `False`. Note, the semantics of the `DeepInheritance` parameter in :meth:`~pywbem.WBEMConnection.EnumerateInstances` is different. LocalOnly (:class:`py:bool`): Indicates that inherited properties, methods, and qualifiers are to be excluded from the returned classes, as follows. * If `False`, inherited elements are not excluded. * If `True`, inherited elements are excluded. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `True`. IncludeQualifiers (:class:`py:bool`): Indicates that qualifiers are to be included in the returned classes, as follows: * If `False`, qualifiers are not included. * If `True`, qualifiers are included. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `True`. IncludeClassOrigin (:class:`py:bool`): Indicates that class origin information is to be included on each property and method in the returned classes, as follows: * If `False`, class origin information is not included. * If `True`, class origin information is included. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `False`. **extra : Additional keyword arguments are passed as additional operation parameters to the WBEM server. Note that :term:`DSP0200` does not define any additional parameters for this operation. Returns: A list of :class:`~pywbem.CIMClass` objects that are representations of the enumerated classes, with their `path` attributes set. Raises: Exceptions described in :class:`~pywbem.WBEMConnection`.
def make_timing_logger(logger, precision=3, level=logging.DEBUG): """ Return a timing logger. Usage:: >>> logger = logging.getLogger('foobar') >>> log_time = make_timing_logger( ... logger, level=logging.INFO, precision=2) >>> >>> with log_time("hello %s", "world"): ... time.sleep(1) INFO:foobar:hello world in 1.00s """ @contextmanager def log_time(msg, *args): """ Log `msg` and `*args` with (naive wallclock) timing information when the context block exits. """ start_time = time.time() try: yield finally: message = "{} in %0.{}fs".format(msg, precision) duration = time.time() - start_time args = args + (duration,) logger.log(level, message, *args) return log_time
Return a timing logger. Usage:: >>> logger = logging.getLogger('foobar') >>> log_time = make_timing_logger( ... logger, level=logging.INFO, precision=2) >>> >>> with log_time("hello %s", "world"): ... time.sleep(1) INFO:foobar:hello world in 1.00s
def p_scope(self, p): 'scope : identifier DOT' scope = () if p[1].scope is None else p[1].scope.labellist p[0] = IdentifierScope( scope + (IdentifierScopeLabel(p[1].name, lineno=p.lineno(1)),), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
scope : identifier DOT
def _unlock_temporarily(self): """ Allow tokens to modify the world for the duration of a with-block. It's important that tokens only modify the world at appropriate times, otherwise the changes they make may not be communicated across the network to other clients. To help catch and prevent these kinds of errors, the game engine keeps the world locked most of the time and only briefly unlocks it (using this method) when tokens are allowed to make changes. When the world is locked, token methods that aren't marked as being read-only can't be called. When the world is unlocked, any token method can be called. These checks can be disabled by running python with optimization enabled. You should never call this method manually from within your own game. This method is intended to be used by the game engine, which was carefully designed to allow the world to be modified only when safe. Calling this method yourself disables an important safety check. """ if not self._is_locked: yield else: try: self._is_locked = False yield finally: self._is_locked = True
Allow tokens to modify the world for the duration of a with-block. It's important that tokens only modify the world at appropriate times, otherwise the changes they make may not be communicated across the network to other clients. To help catch and prevent these kinds of errors, the game engine keeps the world locked most of the time and only briefly unlocks it (using this method) when tokens are allowed to make changes. When the world is locked, token methods that aren't marked as being read-only can't be called. When the world is unlocked, any token method can be called. These checks can be disabled by running python with optimization enabled. You should never call this method manually from within your own game. This method is intended to be used by the game engine, which was carefully designed to allow the world to be modified only when safe. Calling this method yourself disables an important safety check.
def _catch_nonjson_streamresponse(rawresponse): """ Validate a streamed response is JSON. Return a Python dictionary either way. **Parameters:** - **rawresponse:** Streamed Response from Requests. **Returns:** Dictionary """ # attempt to load response for return. try: response = json.loads(rawresponse) except (ValueError, TypeError): if rawresponse: response = { '_error': [ { 'message': 'Response not in JSON format.', 'data': rawresponse, } ] } else: # in case of null response, return empty dict. response = {} return response
Validate a streamed response is JSON. Return a Python dictionary either way. **Parameters:** - **rawresponse:** Streamed Response from Requests. **Returns:** Dictionary
def rename(self, from_name, to_name): """Renames an existing database.""" log.info('renaming database from %s to %s' % (from_name, to_name)) self._run_stmt('alter database %s rename to %s' % (from_name, to_name))
Renames an existing database.
def _edit_name(name, code, add_code=None, delete_end=False): """ Helping function for creating file names in .SAFE format :param name: initial string :type name: str :param code: :type code: str :param add_code: :type add_code: str or None :param delete_end: :type delete_end: bool :return: edited string :rtype: str """ info = name.split('_') info[2] = code if add_code is not None: info[3] = add_code if delete_end: info.pop() return '_'.join(info)
Helping function for creating file names in .SAFE format :param name: initial string :type name: str :param code: :type code: str :param add_code: :type add_code: str or None :param delete_end: :type delete_end: bool :return: edited string :rtype: str
def filter(self, all_records=False, **filters): """ Applies given query filters. If wanted result is more than specified size, exception is raised about using all() method instead of filter. Args: all_records (bool): **filters: Query filters as keyword arguments. Returns: Self. Queryset object. Examples: >>> Person.objects.filter(name='John') # same as .filter(name__exact='John') >>> Person.objects.filter(age__gte=16, name__startswith='jo') >>> # Assume u1 and u2 as related model instances. >>> Person.objects.filter(work_unit__in=[u1, u2], name__startswith='jo') """ clone = copy.deepcopy(self) clone.adapter.add_query(filters.items()) clone_length = clone.count() if clone_length > self._cfg['row_size'] and not all_records: raise Exception("""Your query result count(%s) is more than specified result value(%s). You can narrow your filters, you can apply your own pagination or you can use all() method for getting all filter results. Example Usage: Unit.objects.all() Filters: %s Model Class: %s """ % (clone_length, self._cfg['row_size'], filters, self._cfg['model_class'])) return clone
Applies given query filters. If wanted result is more than specified size, exception is raised about using all() method instead of filter. Args: all_records (bool): **filters: Query filters as keyword arguments. Returns: Self. Queryset object. Examples: >>> Person.objects.filter(name='John') # same as .filter(name__exact='John') >>> Person.objects.filter(age__gte=16, name__startswith='jo') >>> # Assume u1 and u2 as related model instances. >>> Person.objects.filter(work_unit__in=[u1, u2], name__startswith='jo')
def create_certificate_id(vault, name, version=None): """ :param vault: The vault uri. :type vault: str :param name: The certificate name. :type name: str :param version: The certificate version. :type version: str :rtype: KeyVaultId """ return CertificateId(vault=vault, name=name, version=version)
:param vault: The vault uri. :type vault: str :param name: The certificate name. :type name: str :param version: The certificate version. :type version: str :rtype: KeyVaultId
def get_qseq_dir(fc_dir): """Retrieve the qseq directory within Solexa flowcell output. """ machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return machine_bc # otherwise assume we are in the qseq directory # XXX What other cases can we end up with here? else: return fc_dir
Retrieve the qseq directory within Solexa flowcell output.
def spectrogram_from_file(filename, step=10, window=20, max_freq=None, eps=1e-14, overwrite=False, save_feature_as_csvfile=False): """ Calculate the log of linear spectrogram from FFT energy Params: filename (str): Path to the audio file step (int): Step size in milliseconds between windows window (int): FFT window size in milliseconds max_freq (int): Only FFT bins corresponding to frequencies between [0, max_freq] are returned eps (float): Small value to ensure numerical stability (for ln(x)) """ csvfilename = filename.replace(".wav", ".csv") if (os.path.isfile(csvfilename) is False) or overwrite: with soundfile.SoundFile(filename) as sound_file: audio = sound_file.read(dtype='float32') sample_rate = sound_file.samplerate if audio.ndim >= 2: audio = np.mean(audio, 1) if max_freq is None: max_freq = sample_rate / 2 if max_freq > sample_rate / 2: raise ValueError("max_freq must not be greater than half of " " sample rate") if step > window: raise ValueError("step size must not be greater than window size") hop_length = int(0.001 * step * sample_rate) fft_length = int(0.001 * window * sample_rate) pxx, freqs = spectrogram( audio, fft_length=fft_length, sample_rate=sample_rate, hop_length=hop_length) ind = np.where(freqs <= max_freq)[0][-1] + 1 res = np.transpose(np.log(pxx[:ind, :] + eps)) if save_feature_as_csvfile: np.savetxt(csvfilename, res) return res else: return np.loadtxt(csvfilename)
Calculate the log of linear spectrogram from FFT energy Params: filename (str): Path to the audio file step (int): Step size in milliseconds between windows window (int): FFT window size in milliseconds max_freq (int): Only FFT bins corresponding to frequencies between [0, max_freq] are returned eps (float): Small value to ensure numerical stability (for ln(x))
def parse_repeating_time_interval_to_str(date_str): """Devuelve descripción humana de un intervalo de repetición. TODO: Por ahora sólo interpreta una lista fija de intervalos. Debería poder parsear cualquier caso. """ with open(os.path.join(ABSOLUTE_SCHEMA_DIR, "accrualPeriodicity.json"), "r") as f: freqs_map = {freq["id"]: freq["description"] for freq in json.load(f)} return freqs_map[date_str]
Devuelve descripción humana de un intervalo de repetición. TODO: Por ahora sólo interpreta una lista fija de intervalos. Debería poder parsear cualquier caso.
def get_instance(self): """Get the Streams instance that owns this view. Returns: Instance: Streams instance owning this view. """ return Instance(self.rest_client.make_request(self.instance), self.rest_client)
Get the Streams instance that owns this view. Returns: Instance: Streams instance owning this view.
def ldap_server_definitions(self): """ :class:`~zhmcclient.LdapServerDefinitionManager`: Access to the :term:`LDAP Server Definitions <LDAP Server Definition>` in this Console. """ # We do here some lazy loading. if not self._ldap_server_definitions: self._ldap_server_definitions = LdapServerDefinitionManager(self) return self._ldap_server_definitions
:class:`~zhmcclient.LdapServerDefinitionManager`: Access to the :term:`LDAP Server Definitions <LDAP Server Definition>` in this Console.
def forms(self, req, tag): """ Make and return some forms, using L{self.parameter.getInitialLiveForms}. @return: some subforms. @rtype: C{list} of L{LiveForm} """ liveForms = self.parameter.getInitialLiveForms() for liveForm in liveForms: liveForm.setFragmentParent(self) return liveForms
Make and return some forms, using L{self.parameter.getInitialLiveForms}. @return: some subforms. @rtype: C{list} of L{LiveForm}
def sendpfast(x, pps=None, mbps=None, realtime=None, loop=0, file_cache=False, iface=None, replay_args=None, # noqa: E501 parse_results=False): """Send packets at layer 2 using tcpreplay for performance pps: packets per second mpbs: MBits per second realtime: use packet's timestamp, bending time with real-time value loop: number of times to process the packet list file_cache: cache packets in RAM instead of reading from disk at each iteration # noqa: E501 iface: output interface replay_args: List of additional tcpreplay args (List[str]) parse_results: Return a dictionary of information outputted by tcpreplay (default=False) # noqa: E501 :returns stdout, stderr, command used""" if iface is None: iface = conf.iface argv = [conf.prog.tcpreplay, "--intf1=%s" % iface] if pps is not None: argv.append("--pps=%i" % pps) elif mbps is not None: argv.append("--mbps=%f" % mbps) elif realtime is not None: argv.append("--multiplier=%f" % realtime) else: argv.append("--topspeed") if loop: argv.append("--loop=%i" % loop) if file_cache: argv.append("--preload-pcap") # Check for any additional args we didn't cover. if replay_args is not None: argv.extend(replay_args) f = get_temp_file() argv.append(f) wrpcap(f, x) results = None with ContextManagerSubprocess("sendpfast()", conf.prog.tcpreplay): try: cmd = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except KeyboardInterrupt: log_interactive.info("Interrupted by user") except Exception: os.unlink(f) raise else: stdout, stderr = cmd.communicate() if stderr: log_runtime.warning(stderr.decode()) if parse_results: results = _parse_tcpreplay_result(stdout, stderr, argv) elif conf.verb > 2: log_runtime.info(stdout.decode()) os.unlink(f) return results
Send packets at layer 2 using tcpreplay for performance pps: packets per second mpbs: MBits per second realtime: use packet's timestamp, bending time with real-time value loop: number of times to process the packet list file_cache: cache packets in RAM instead of reading from disk at each iteration # noqa: E501 iface: output interface replay_args: List of additional tcpreplay args (List[str]) parse_results: Return a dictionary of information outputted by tcpreplay (default=False) # noqa: E501 :returns stdout, stderr, command used
def train(self, training_set, iterations=500): """Trains itself using the sequence data.""" if len(training_set) > 2: self.__X = np.matrix([example[0] for example in training_set]) if self.__num_labels == 1: self.__y = np.matrix([example[1] for example in training_set]).reshape((-1, 1)) else: eye = np.eye(self.__num_labels) self.__y = np.matrix([eye[example[1]] for example in training_set]) else: self.__X = np.matrix(training_set[0]) if self.__num_labels == 1: self.__y = np.matrix(training_set[1]).reshape((-1, 1)) else: eye = np.eye(self.__num_labels) self.__y = np.matrix([eye[index] for sublist in training_set[1] for index in sublist]) self.__m = self.__X.shape[0] self.__input_layer_size = self.__X.shape[1] self.__sizes = [self.__input_layer_size] self.__sizes.extend(self.__hidden_layers) self.__sizes.append(self.__num_labels) initial_theta = [] for count in range(len(self.__sizes) - 1): epsilon = np.sqrt(6) / np.sqrt(self.__sizes[count]+self.__sizes[count+1]) initial_theta.append(np.random.rand(self.__sizes[count+1],self.__sizes[count]+1)*2*epsilon-epsilon) initial_theta = self.__unroll(initial_theta) self.__thetas = self.__roll(fmin_bfgs(self.__cost_function, initial_theta, fprime=self.__cost_grad_function, maxiter=iterations))
Trains itself using the sequence data.
def add_source(source, key=None): """Add a package source to this system. @param source: a URL with a rpm package @param key: A key to be added to the system's keyring and used to verify the signatures on packages. Ideally, this should be an ASCII format GPG public key including the block headers. A GPG key id may also be used, but be aware that only insecure protocols are available to retrieve the actual public key from a public keyserver placing your Juju environment at risk. """ if source is None: log('Source is not present. Skipping') return if source.startswith('http'): directory = '/etc/yum.repos.d/' for filename in os.listdir(directory): with open(directory + filename, 'r') as rpm_file: if source in rpm_file.read(): break else: log("Add source: {!r}".format(source)) # write in the charms.repo with open(directory + 'Charms.repo', 'a') as rpm_file: rpm_file.write('[%s]\n' % source[7:].replace('/', '_')) rpm_file.write('name=%s\n' % source[7:]) rpm_file.write('baseurl=%s\n\n' % source) else: log("Unknown source: {!r}".format(source)) if key: if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: with NamedTemporaryFile('w+') as key_file: key_file.write(key) key_file.flush() key_file.seek(0) subprocess.check_call(['rpm', '--import', key_file.name]) else: subprocess.check_call(['rpm', '--import', key])
Add a package source to this system. @param source: a URL with a rpm package @param key: A key to be added to the system's keyring and used to verify the signatures on packages. Ideally, this should be an ASCII format GPG public key including the block headers. A GPG key id may also be used, but be aware that only insecure protocols are available to retrieve the actual public key from a public keyserver placing your Juju environment at risk.
def p_scalar__folded(self, p): """ scalar : B_FOLD_START scalar_group B_FOLD_END """ scalar_group = ''.join(p[2]) folded_scalar = fold(dedent(scalar_group)).rstrip() p[0] = ScalarDispatch('%s\n' % folded_scalar, cast='str')
scalar : B_FOLD_START scalar_group B_FOLD_END
def _from_float(cls, xmin, xmax, ymin, ymax): """ Return the smallest bounding box that fully contains a given rectangle defined by float coordinate values. Following the pixel index convention, an integer index corresponds to the center of a pixel and the pixel edges span from (index - 0.5) to (index + 0.5). For example, the pixel edge spans of the following pixels are: - pixel 0: from -0.5 to 0.5 - pixel 1: from 0.5 to 1.5 - pixel 2: from 1.5 to 2.5 In addition, because `BoundingBox` upper limits are exclusive (by definition), 1 is added to the upper pixel edges. See examples below. Parameters ---------- xmin, xmax, ymin, ymax : float Float coordinates defining a rectangle. The lower values (``xmin`` and ``ymin``) must not be greater than the respective upper values (``xmax`` and ``ymax``). Returns ------- bbox : `BoundingBox` object The minimal ``BoundingBox`` object fully containing the input rectangle coordinates. Examples -------- >>> from photutils import BoundingBox >>> BoundingBox._from_float(xmin=1.0, xmax=10.0, ymin=2.0, ymax=20.0) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=21) >>> BoundingBox._from_float(xmin=1.4, xmax=10.4, ymin=1.6, ymax=10.6) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=12) """ ixmin = int(np.floor(xmin + 0.5)) ixmax = int(np.ceil(xmax + 0.5)) iymin = int(np.floor(ymin + 0.5)) iymax = int(np.ceil(ymax + 0.5)) return cls(ixmin, ixmax, iymin, iymax)
Return the smallest bounding box that fully contains a given rectangle defined by float coordinate values. Following the pixel index convention, an integer index corresponds to the center of a pixel and the pixel edges span from (index - 0.5) to (index + 0.5). For example, the pixel edge spans of the following pixels are: - pixel 0: from -0.5 to 0.5 - pixel 1: from 0.5 to 1.5 - pixel 2: from 1.5 to 2.5 In addition, because `BoundingBox` upper limits are exclusive (by definition), 1 is added to the upper pixel edges. See examples below. Parameters ---------- xmin, xmax, ymin, ymax : float Float coordinates defining a rectangle. The lower values (``xmin`` and ``ymin``) must not be greater than the respective upper values (``xmax`` and ``ymax``). Returns ------- bbox : `BoundingBox` object The minimal ``BoundingBox`` object fully containing the input rectangle coordinates. Examples -------- >>> from photutils import BoundingBox >>> BoundingBox._from_float(xmin=1.0, xmax=10.0, ymin=2.0, ymax=20.0) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=21) >>> BoundingBox._from_float(xmin=1.4, xmax=10.4, ymin=1.6, ymax=10.6) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=12)
def find_referenced_subfiles(self, directory): """ Return list of files below specified `directory` in job inputs. Could use more sophisticated logic (match quotes to handle spaces, handle subdirectories, etc...). **Parameters** directory : str Full path to directory to search. """ if directory is None: return [] pattern = r'''[\'\"]?(%s%s[^\s\'\"]+)[\'\"]?''' % (escape(directory), escape(sep)) return self.find_pattern_references(pattern)
Return list of files below specified `directory` in job inputs. Could use more sophisticated logic (match quotes to handle spaces, handle subdirectories, etc...). **Parameters** directory : str Full path to directory to search.
def build(self, builder): """Build XML by appending to builder""" params = dict( OID=self.oid, Active=bool_to_true_false(self.active), BypassDuringMigration=bool_to_true_false(self.bypass_during_migration), NeedsRetesting=bool_to_true_false(self.needs_retesting), ) builder.start("mdsol:EditCheckDef", params) for step in self.check_steps: step.build(builder) for action in self.check_actions: action.build(builder) builder.end("mdsol:EditCheckDef")
Build XML by appending to builder
def federation(self): """returns the class that controls federation""" url = self._url + "/federation" return _Federation(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
returns the class that controls federation
def get_submission_ids(self, tournament=1): """Get dict with username->submission_id mapping. Args: tournament (int): ID of the tournament (optional, defaults to 1) Returns: dict: username->submission_id mapping, string->string Example: >>> NumerAPI().get_submission_ids() {'1337ai': '93c46857-fed9-4594-981e-82db2b358daf', '1x0r': '108c7601-822c-4910-835d-241da93e2e24', ... } """ query = """ query($tournament: Int!) { rounds(tournament: $tournament number: 0) { leaderboard { username submissionId } } } """ arguments = {'tournament': tournament} data = self.raw_query(query, arguments)['data']['rounds'][0] if data is None: return None mapping = {item['username']: item['submissionId'] for item in data['leaderboard']} return mapping
Get dict with username->submission_id mapping. Args: tournament (int): ID of the tournament (optional, defaults to 1) Returns: dict: username->submission_id mapping, string->string Example: >>> NumerAPI().get_submission_ids() {'1337ai': '93c46857-fed9-4594-981e-82db2b358daf', '1x0r': '108c7601-822c-4910-835d-241da93e2e24', ... }
def inv(self): """The inverse transformation""" result = Complete(self.r.transpose(), np.dot(self.r.transpose(), -self.t)) result._cache_inv = self return result
The inverse transformation
def GetRootFileEntry(self): """Retrieves the root file entry. Returns: FakeFileEntry: a file entry or None if not available. """ path_spec = fake_path_spec.FakePathSpec(location=self.LOCATION_ROOT) return self.GetFileEntryByPathSpec(path_spec)
Retrieves the root file entry. Returns: FakeFileEntry: a file entry or None if not available.
def update_index(self, project_name, logstore_name, index_detail): """ update index for a logstore Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type index_detail: IndexConfig :param index_detail: the index config detail used to update index :return: UpdateIndexResponse :raise: LogException """ headers = {} params = {} resource = "/logstores/" + logstore_name + "/index" headers['Content-Type'] = 'application/json' body = six.b(json.dumps(index_detail.to_json())) headers['x-log-bodyrawsize'] = str(len(body)) (resp, header) = self._send("PUT", project_name, body, resource, params, headers) return UpdateIndexResponse(header, resp)
update index for a logstore Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type index_detail: IndexConfig :param index_detail: the index config detail used to update index :return: UpdateIndexResponse :raise: LogException
def run_bang(alt_args=None): """ Runs bang with optional list of strings as command line options. If ``alt_args`` is not specified, defaults to parsing ``sys.argv`` for command line options. """ parser = get_parser() args = parser.parse_args(alt_args) source = args.config_specs or get_env_configs() if not source: return config = Config.from_config_specs(source) if args.playbooks: config[A.PLAYBOOKS] = args.playbooks if args.dump_config: if args.dump_config in ('yaml', 'yml'): import yaml print yaml.safe_dump(dict(config)) elif args.dump_config == 'json': import json print json.dumps(config) else: print config sys.exit() set_ssh_creds(config, args) annoy(config) stack = Stack(config) if args.ansible_list: stack.show_inventory( os.isatty(sys.stdout.fileno()) ) return initialize_logging(config) # TODO: config.validate() if args.deploy: stack.deploy() if args.configure: stack.configure() config.autoinc()
Runs bang with optional list of strings as command line options. If ``alt_args`` is not specified, defaults to parsing ``sys.argv`` for command line options.
def OnTableChanged(self, event): """Table changed event handler""" if hasattr(event, 'table'): self.SetValue(event.table) wx.TextCtrl.SetInsertionPoint(self, self.cursor_pos) event.Skip()
Table changed event handler