code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def getchild(self, name, parent): """ get a child by name """ #log.debug('searching parent (%s) for (%s)', Repr(parent), name) if name.startswith('@'): return parent.get_attribute(name[1:]) else: return parent.get_child(name)
get a child by name
def tgread_date(self): """Reads and converts Unix time (used by Telegram) into a Python datetime object. """ value = self.read_int() if value == 0: return None else: return datetime.fromtimestamp(value, tz=timezone.utc)
Reads and converts Unix time (used by Telegram) into a Python datetime object.
def gev_expval(xi, mu=0, sigma=1): """ Expected value of generalized extreme value distribution. """ return mu - (sigma / xi) + (sigma / xi) * flib.gamfun(1 - xi)
Expected value of generalized extreme value distribution.
def transformer_from_metric(metric, tol=None): """Returns the transformation matrix from the Mahalanobis matrix. Returns the transformation matrix from the Mahalanobis matrix, i.e. the matrix L such that metric=L.T.dot(L). Parameters ---------- metric : symmetric `np.ndarray`, shape=(d x d) The input metric, from which we want to extract a transformation matrix. tol : positive `float`, optional Eigenvalues of `metric` between 0 and - tol are considered zero. If tol is None, and w_max is `metric`'s largest eigenvalue, and eps is the epsilon value for datatype of w, then tol is set to w_max * metric.shape[0] * eps. Returns ------- L : np.ndarray, shape=(d x d) The transformation matrix, such that L.T.dot(L) == metric. """ if not np.allclose(metric, metric.T): raise ValueError("The input metric should be symmetric.") # If M is diagonal, we will just return the elementwise square root: if np.array_equal(metric, np.diag(np.diag(metric))): _check_sdp_from_eigen(np.diag(metric), tol) return np.diag(np.sqrt(np.maximum(0, np.diag(metric)))) else: try: # if `M` is positive semi-definite, it will admit a Cholesky # decomposition: L = cholesky(M).T return np.linalg.cholesky(metric).T except LinAlgError: # However, currently np.linalg.cholesky does not support indefinite # matrices. So if the latter does not work we will return L = V.T w^( # -1/2), with M = V*w*V.T being the eigenvector decomposition of M with # the eigenvalues in the diagonal matrix w and the columns of V being the # eigenvectors. w, V = np.linalg.eigh(metric) _check_sdp_from_eigen(w, tol) return V.T * np.sqrt(np.maximum(0, w[:, None]))
Returns the transformation matrix from the Mahalanobis matrix. Returns the transformation matrix from the Mahalanobis matrix, i.e. the matrix L such that metric=L.T.dot(L). Parameters ---------- metric : symmetric `np.ndarray`, shape=(d x d) The input metric, from which we want to extract a transformation matrix. tol : positive `float`, optional Eigenvalues of `metric` between 0 and - tol are considered zero. If tol is None, and w_max is `metric`'s largest eigenvalue, and eps is the epsilon value for datatype of w, then tol is set to w_max * metric.shape[0] * eps. Returns ------- L : np.ndarray, shape=(d x d) The transformation matrix, such that L.T.dot(L) == metric.
def add_arguments(self, parser): """ Add arguments to the command parser. Uses argparse syntax. See documentation at https://docs.python.org/3/library/argparse.html. """ parser.add_argument( '--start', '-s', default=0, type=int, help=u"The Submission.id at which to begin updating rows. 0 by default." ) parser.add_argument( '--chunk', '-c', default=1000, type=int, help=u"Batch size, how many rows to update in a given transaction. Default 1000.", ) parser.add_argument( '--wait', '-w', default=2, type=int, help=u"Wait time between transactions, in seconds. Default 2.", )
Add arguments to the command parser. Uses argparse syntax. See documentation at https://docs.python.org/3/library/argparse.html.
def make_router(*routings): """Return a WSGI application that dispatches requests to controllers """ routes = [] for routing in routings: methods, regex, app = routing[:3] if isinstance(methods, basestring): methods = (methods,) vars = routing[3] if len(routing) >= 4 else {} routes.append((methods, re.compile(unicode(regex)), app, vars)) def router(environ, start_response): """Dispatch request to controllers.""" req = webob.Request(environ) split_path_info = req.path_info.split('/') if split_path_info[0]: # When path_info doesn't start with a "/" this is an error or a attack => Reject request. # An example of an URL with such a invalid path_info: http://127.0.0.1http%3A//127.0.0.1%3A80/result?... ctx = contexts.Ctx(req) headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx) return wsgihelpers.respond_json(ctx, dict( apiVersion = 1, error = dict( code = 400, # Bad Request message = ctx._(u"Invalid path: {0}").format(req.path_info), ), ), headers = headers, )(environ, start_response) for methods, regex, app, vars in routes: match = regex.match(req.path_info) if match is not None: if methods is not None and req.method not in methods: ctx = contexts.Ctx(req) headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx) return wsgihelpers.respond_json(ctx, dict( apiVersion = 1, error = dict( code = 405, message = ctx._(u"You cannot use HTTP {} to access this URL. Use one of {}.").format( req.method, methods), ), ), headers = headers, )(environ, start_response) if getattr(req, 'urlvars', None) is None: req.urlvars = {} req.urlvars.update(match.groupdict()) req.urlvars.update(vars) req.script_name += req.path_info[:match.end()] req.path_info = req.path_info[match.end():] return app(req.environ, start_response) ctx = contexts.Ctx(req) headers = wsgihelpers.handle_cross_origin_resource_sharing(ctx) return wsgihelpers.respond_json(ctx, dict( apiVersion = 1, error = dict( code = 404, # Not Found message = ctx._(u"Path not found: {0}").format(req.path_info), ), ), headers = headers, )(environ, start_response) return router
Return a WSGI application that dispatches requests to controllers
def get_grade_entries_for_gradebook_column_on_date(self, gradebook_column_id, from_, to): """Gets a ``GradeEntryList`` for the given gradebook column and effective during the entire given date range inclusive but not confined to the date range. arg: gradebook_column_id (osid.id.Id): a gradebook column ``Id`` arg: from (osid.calendaring.DateTime): start of date range arg: to (osid.calendaring.DateTime): end of date range return: (osid.grading.GradeEntryList) - the returned ``GradeEntry`` list raise: InvalidArgument - ``from`` is greater than ``to`` raise: NullArgument - ``gradebook_column_id, from, or to`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.relationship.RelationshipLookupSession.get_relationships_for_source_on_date grade_entry_list = [] for grade_entry in self.get_grade_entries_for_gradebook_column(gradebook_column_id): if overlap(from_, to, grade_entry.start_date, grade_entry.end_date): grade_entry_list.append(grade_entry) return objects.GradeEntryList(grade_entry_list, runtime=self._runtime)
Gets a ``GradeEntryList`` for the given gradebook column and effective during the entire given date range inclusive but not confined to the date range. arg: gradebook_column_id (osid.id.Id): a gradebook column ``Id`` arg: from (osid.calendaring.DateTime): start of date range arg: to (osid.calendaring.DateTime): end of date range return: (osid.grading.GradeEntryList) - the returned ``GradeEntry`` list raise: InvalidArgument - ``from`` is greater than ``to`` raise: NullArgument - ``gradebook_column_id, from, or to`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def substr(requestContext, seriesList, start=0, stop=0): """ Takes one metric or a wildcard seriesList followed by 1 or 2 integers. Assume that the metric name is a list or array, with each element separated by dots. Prints n - length elements of the array (if only one integer n is passed) or n - m elements of the array (if two integers n and m are passed). The list starts with element 0 and ends with element (length - 1). Example:: &target=substr(carbon.agents.hostname.avgUpdateTime,2,4) The label would be printed as "hostname.avgUpdateTime". """ for series in seriesList: left = series.name.rfind('(') + 1 right = series.name.find(')') if right < 0: right = len(series.name)+1 cleanName = series.name[left:right:].split('.') if int(stop) == 0: series.name = '.'.join(cleanName[int(start)::]) else: series.name = '.'.join(cleanName[int(start):int(stop):]) # substr(func(a.b,'c'),1) becomes b instead of b,'c' series.name = re.sub(',.*$', '', series.name) return seriesList
Takes one metric or a wildcard seriesList followed by 1 or 2 integers. Assume that the metric name is a list or array, with each element separated by dots. Prints n - length elements of the array (if only one integer n is passed) or n - m elements of the array (if two integers n and m are passed). The list starts with element 0 and ends with element (length - 1). Example:: &target=substr(carbon.agents.hostname.avgUpdateTime,2,4) The label would be printed as "hostname.avgUpdateTime".
def raw_data(self, event): """Handles incoming raw sensor data :param event: Raw sentences incoming data """ self.log('Received raw data from bus', lvl=events) if not parse: return nmea_time = event.data[0] try: parsed_data = parse(event.data[1]) except Exception as e: self.log('Unparseable sentence:', event.data[1], e, type(e), exc=True, lvl=warn) self.unparsable += event return bus = event.bus sensor_data_package = self._handle(parsed_data) self.log("Sensor data:", sensor_data_package, lvl=verbose) if sensor_data_package: # pprint(sensor_data_package) self.fireEvent(sensordata(sensor_data_package, nmea_time, bus), "navdata")
Handles incoming raw sensor data :param event: Raw sentences incoming data
def _prepare_request(self, url, method, headers, data): """Prepare HTTP request. :param str url: request URL. :param str method: request method. :param dict headers: request headers. :param object data: JSON-encodable object. :rtype: httpclient.HTTPRequest """ request = httpclient.HTTPRequest( url=url, method=method, headers=headers, body=data, connect_timeout=self._connect_timeout, request_timeout=self._request_timeout, auth_username=self._username, auth_password=self._password, client_cert=self._client_cert, client_key=self._client_key, ca_certs=self._ca_certs, validate_cert=self._verify_cert) return request
Prepare HTTP request. :param str url: request URL. :param str method: request method. :param dict headers: request headers. :param object data: JSON-encodable object. :rtype: httpclient.HTTPRequest
def create(cls, pid_type=None, pid_value=None, object_type=None, object_uuid=None, status=None, **kwargs): """Create a new instance for the given type and pid. :param pid_type: Persistent identifier type. (Default: None). :param pid_value: Persistent identifier value. (Default: None). :param status: Current PID status. (Default: :attr:`invenio_pidstore.models.PIDStatus.NEW`) :param object_type: The object type is a string that identify its type. (Default: None). :param object_uuid: The object UUID. (Default: None). :returns: A :class:`invenio_pidstore.providers.base.BaseProvider` instance. """ assert pid_value assert pid_type or cls.pid_type pid = PersistentIdentifier.create( pid_type or cls.pid_type, pid_value, pid_provider=cls.pid_provider, object_type=object_type, object_uuid=object_uuid, status=status or cls.default_status, ) return cls(pid, **kwargs)
Create a new instance for the given type and pid. :param pid_type: Persistent identifier type. (Default: None). :param pid_value: Persistent identifier value. (Default: None). :param status: Current PID status. (Default: :attr:`invenio_pidstore.models.PIDStatus.NEW`) :param object_type: The object type is a string that identify its type. (Default: None). :param object_uuid: The object UUID. (Default: None). :returns: A :class:`invenio_pidstore.providers.base.BaseProvider` instance.
def _read_country_names(self, countries_file=None): """Read list of countries from specified country file or default file.""" if not countries_file: countries_file = os.path.join(os.path.dirname(__file__), self.COUNTRY_FILE_DEFAULT) with open(countries_file) as f: countries_json = f.read() return json.loads(countries_json)
Read list of countries from specified country file or default file.
def _read_layers(self, layers, image_id): """ Reads the JSON metadata for specified layer / image id """ for layer in self.docker.history(image_id): layers.append(layer['Id'])
Reads the JSON metadata for specified layer / image id
def get_behave_args(self, argv=sys.argv): """ Get a list of those command line arguments specified with the management command that are meant as arguments for running behave. """ parser = BehaveArgsHelper().create_parser('manage.py', 'behave') args, unknown = parser.parse_known_args(argv[2:]) behave_args = [] for option in unknown: # Remove behave prefix if option.startswith('--behave-'): option = option.replace('--behave-', '', 1) prefix = '-' if len(option) == 1 else '--' option = prefix + option behave_args.append(option) return behave_args
Get a list of those command line arguments specified with the management command that are meant as arguments for running behave.
def _config_params(base_config, assoc_files, region, out_file, items): """Add parameters based on configuration variables, associated files and genomic regions. """ params = [] dbsnp = assoc_files.get("dbsnp") if dbsnp: params += ["--dbsnp", dbsnp] cosmic = assoc_files.get("cosmic") if cosmic: params += ["--cosmic", cosmic] variant_regions = bedutils.population_variant_regions(items) region = subset_variant_regions(variant_regions, region, out_file, items) if region: params += ["-L", bamprep.region_to_gatk(region), "--interval_set_rule", "INTERSECTION"] # set low frequency calling parameter if adjusted # to set other MuTect parameters on contamination, pass options to resources for mutect # --fraction_contamination --minimum_normal_allele_fraction min_af = tz.get_in(["algorithm", "min_allele_fraction"], base_config) if min_af: params += ["--minimum_mutation_cell_fraction", "%.2f" % (min_af / 100.0)] resources = config_utils.get_resources("mutect", base_config) if resources.get("options") is not None: params += [str(x) for x in resources.get("options", [])] # Output quality scores if "--enable_qscore_output" not in params: params.append("--enable_qscore_output") # drf not currently supported in MuTect to turn off duplicateread filter # params += gatk.standard_cl_params(items) return params
Add parameters based on configuration variables, associated files and genomic regions.
def get_foreign_key_base_declaration_sql(self, foreign_key): """ Obtains DBMS specific SQL code portion needed to set the FOREIGN KEY constraint of a field declaration to be used in statements like CREATE TABLE. :param foreign_key: The foreign key :type foreign_key: ForeignKeyConstraint :rtype: str """ sql = "" if foreign_key.get_name(): sql += "CONSTRAINT %s " % foreign_key.get_quoted_name(self) sql += "FOREIGN KEY (" if not foreign_key.get_local_columns(): raise DBALException('Incomplete definition. "local" required.') if not foreign_key.get_foreign_columns(): raise DBALException('Incomplete definition. "foreign" required.') if not foreign_key.get_foreign_table_name(): raise DBALException('Incomplete definition. "foreign_table" required.') sql += "%s) REFERENCES %s (%s)" % ( ", ".join(foreign_key.get_quoted_local_columns(self)), foreign_key.get_quoted_foreign_table_name(self), ", ".join(foreign_key.get_quoted_foreign_columns(self)), ) return sql
Obtains DBMS specific SQL code portion needed to set the FOREIGN KEY constraint of a field declaration to be used in statements like CREATE TABLE. :param foreign_key: The foreign key :type foreign_key: ForeignKeyConstraint :rtype: str
def blksize(self): """The test blksize.""" self._blksize = self.lib.iperf_get_test_blksize(self._test) return self._blksize
The test blksize.
def enable_messaging(message_viewer, sender=dispatcher.Any): """Set up the dispatcher for messaging. :param message_viewer: A message viewer to show the message. :type message_viewer: MessageViewer :param sender: Sender of the message signal. Default to Any object. :type sender: object """ # Set up dispatcher for dynamic messages # Dynamic messages will not clear the message queue so will be appended # to existing user messages LOGGER.debug('enable_messaging %s %s' % (message_viewer, sender)) # noinspection PyArgumentEqualDefault dispatcher.connect( message_viewer.dynamic_message_event, signal=DYNAMIC_MESSAGE_SIGNAL, sender=sender) # Set up dispatcher for static messages # Static messages clear the message queue and so the display is 'reset' # noinspection PyArgumentEqualDefault dispatcher.connect( message_viewer.static_message_event, signal=STATIC_MESSAGE_SIGNAL, sender=sender) # Set up dispatcher for error messages # Error messages clear the message queue and so the display is 'reset' # noinspection PyArgumentEqualDefault dispatcher.connect( message_viewer.static_message_event, signal=ERROR_MESSAGE_SIGNAL, sender=sender)
Set up the dispatcher for messaging. :param message_viewer: A message viewer to show the message. :type message_viewer: MessageViewer :param sender: Sender of the message signal. Default to Any object. :type sender: object
def disconnect(self, callback=None, sender=None, senders=None, key=None, keys=None, weak=True): ''' *This method is a coroutine.* Disconnects the callback from the signal. If no arguments are supplied for ``sender``, ``senders``, ``key``, or ``keys`` -- the callback is completely disconnected. Otherwise, only the supplied ``senders`` and ``keys`` are disconnected for the callback. .. Note:: the argument ``weak`` must be the same as when the callback was connected to the signal. ''' weak_callback = yield from self._get_ref(callback, weak) if (sender is None) and (senders is None) and (key is None) and (keys is None): # removing from _all signals # need a lock because we are changing the size of the dict if weak_callback in self._all: with (yield from self._lock_all): self._all.remove(weak_callback) with (yield from self._lock_by_senders): sender_keys = list(self._by_senders.keys()) for sender in sender_keys: yield from self._disconnect_from_sender(weak_callback, sender, is_id=True) with (yield from self._lock_by_keys): key_keys = list(self._by_keys.keys()) for key in key_keys: yield from self._disconnect_from_key(weak_callback, key) else: # only disconnect from specific senders/keys if sender is not None: yield from self._disconnect_from_sender(weak_callback, sender) if senders is not None: for sender in senders: yield from self._disconnect_from_sender(weak_callback, sender) if key is not None: yield from self._disconnect_from_key(weak_callback, key) if keys is not None: for key in keys: yield from self._disconnect_from_key(weak_callback, key)
*This method is a coroutine.* Disconnects the callback from the signal. If no arguments are supplied for ``sender``, ``senders``, ``key``, or ``keys`` -- the callback is completely disconnected. Otherwise, only the supplied ``senders`` and ``keys`` are disconnected for the callback. .. Note:: the argument ``weak`` must be the same as when the callback was connected to the signal.
def v_unique_name_defintions(ctx, stmt): """Make sure that all top-level definitions in a module are unique""" defs = [('typedef', 'TYPE_ALREADY_DEFINED', stmt.i_typedefs), ('grouping', 'GROUPING_ALREADY_DEFINED', stmt.i_groupings)] def f(s): for (keyword, errcode, dict) in defs: if s.keyword == keyword and s.arg in dict: err_add(ctx.errors, dict[s.arg].pos, errcode, (s.arg, s.pos)) for i in stmt.search('include'): submodulename = i.arg subm = ctx.get_module(submodulename) if subm is not None: for s in subm.substmts: for ss in s.substmts: iterate_stmt(ss, f)
Make sure that all top-level definitions in a module are unique
def _contextualise_connection(self, connection): """ Add a connection to the appcontext so it can be freed/unbound at a later time if an exception occured and it was not freed. Args: connection (ldap3.Connection): Connection to add to the appcontext """ ctx = stack.top if ctx is not None: if not hasattr(ctx, 'ldap3_manager_connections'): ctx.ldap3_manager_connections = [connection] else: ctx.ldap3_manager_connections.append(connection)
Add a connection to the appcontext so it can be freed/unbound at a later time if an exception occured and it was not freed. Args: connection (ldap3.Connection): Connection to add to the appcontext
def list_row_sparse_data(self, row_id): """Returns copies of the 'row_sparse' parameter on all contexts, in the same order as creation. The copy only retains rows whose ids occur in provided row ids. The parameter must have been initialized before. Parameters ---------- row_id: NDArray Row ids to retain for the 'row_sparse' parameter. Returns ------- list of NDArrays """ if self._stype != 'row_sparse': raise RuntimeError("Cannot return copies of Parameter '%s' on all contexts via " \ "list_row_sparse_data() because its storage type is %s. Please " \ "use data() instead." % (self.name, self._stype)) return self._get_row_sparse(self._data, list, row_id)
Returns copies of the 'row_sparse' parameter on all contexts, in the same order as creation. The copy only retains rows whose ids occur in provided row ids. The parameter must have been initialized before. Parameters ---------- row_id: NDArray Row ids to retain for the 'row_sparse' parameter. Returns ------- list of NDArrays
def load_itasser_folder(self, ident, itasser_folder, organize=False, outdir=None, organize_name=None, set_as_representative=False, representative_chain='X', force_rerun=False): """Load the results folder from an I-TASSER run (local, not from the website) and copy relevant files over to the protein structures directory. Args: ident (str): I-TASSER ID itasser_folder (str): Path to results folder organize (bool): If select files from modeling should be copied to the Protein directory outdir (str): Path to directory where files will be copied and organized to organize_name (str): Basename of files to rename results to. If not provided, will use id attribute. set_as_representative: If this structure should be set as the representative structure representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures Returns: ITASSERProp: The object that is now contained in the structures attribute """ if organize: if not outdir: outdir = self.structure_dir if not outdir: raise ValueError('Directory to copy results to must be specified') if self.structures.has_id(ident): if force_rerun: existing = self.structures.get_by_id(ident) self.structures.remove(existing) else: log.debug('{}: already present in list of structures'.format(ident)) itasser = self.structures.get_by_id(ident) if not self.structures.has_id(ident): itasser = ITASSERProp(ident, itasser_folder) self.structures.append(itasser) if set_as_representative: self._representative_structure_setter(structprop=itasser, keep_chain=representative_chain, force_rerun=force_rerun) if organize: if itasser.structure_file: # The name of the actual pdb file will be $GENEID_model1.pdb if not organize_name: new_itasser_name = self.id + '_model1' else: new_itasser_name = organize_name # Additional results will be stored in a subdirectory dest_itasser_extra_dir = op.join(outdir, '{}_itasser'.format(new_itasser_name)) ssbio.utils.make_dir(dest_itasser_extra_dir) # Copy the model1.pdb and also create summary dataframes itasser.copy_results(copy_to_dir=outdir, rename_model_to=new_itasser_name, force_rerun=force_rerun) return self.structures.get_by_id(ident)
Load the results folder from an I-TASSER run (local, not from the website) and copy relevant files over to the protein structures directory. Args: ident (str): I-TASSER ID itasser_folder (str): Path to results folder organize (bool): If select files from modeling should be copied to the Protein directory outdir (str): Path to directory where files will be copied and organized to organize_name (str): Basename of files to rename results to. If not provided, will use id attribute. set_as_representative: If this structure should be set as the representative structure representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures Returns: ITASSERProp: The object that is now contained in the structures attribute
def parse_doc_str(text=None, is_untabbed=True, is_stripped=True, tab=None, split_character="::"): """ Returns a str of the parsed doc for example the following would return 'a:A\nb:B' :: a:A b:B :param text: str of the text to parse, by default uses calling function doc :param is_untabbed: bool if True will untab the text :param is_stripped: bool if True will strip the text :param tab: str of the tab to use when untabbing, by default it will self determine tab size :param split_character: str of the character to split the text on :return: dict """ text = text or function_doc(2) text = text.split(split_character, 1)[-1] text = text.split(':param')[0].split(':return')[0] tab = is_untabbed and \ (tab or text[:-1 * len(text.lstrip())].split('\n')[-1]) or '' text = is_stripped and text.strip() or text return text.replace('\n%s' % tab, '\n')
Returns a str of the parsed doc for example the following would return 'a:A\nb:B' :: a:A b:B :param text: str of the text to parse, by default uses calling function doc :param is_untabbed: bool if True will untab the text :param is_stripped: bool if True will strip the text :param tab: str of the tab to use when untabbing, by default it will self determine tab size :param split_character: str of the character to split the text on :return: dict
def _autorestart_components(self, bundle): # type: (Bundle) -> None """ Restart the components of the given bundle :param bundle: A Bundle object """ with self.__instances_lock: instances = self.__auto_restart.get(bundle) if not instances: # Nothing to do return for factory, name, properties in instances: try: # Instantiate the given component self.instantiate(factory, name, properties) except Exception as ex: # Log error, but continue to work _logger.exception( "Error restarting component '%s' ('%s') " "from bundle %s (%d): %s", name, factory, bundle.get_symbolic_name(), bundle.get_bundle_id(), ex, )
Restart the components of the given bundle :param bundle: A Bundle object
def _parse_current_member(self, previous_rank, values): """ Parses the column texts of a member row into a member dictionary. Parameters ---------- previous_rank: :class:`dict`[int, str] The last rank present in the rows. values: tuple[:class:`str`] A list of row contents. """ rank, name, vocation, level, joined, status = values rank = previous_rank[1] if rank == " " else rank title = None previous_rank[1] = rank m = title_regex.match(name) if m: name = m.group(1) title = m.group(2) self.members.append(GuildMember(name, rank, title, int(level), vocation, joined=joined, online=status == "online"))
Parses the column texts of a member row into a member dictionary. Parameters ---------- previous_rank: :class:`dict`[int, str] The last rank present in the rows. values: tuple[:class:`str`] A list of row contents.
def snapshot(self): """Return a dictionary of current slots by reference.""" return dict((n, self._slots[n].get()) for n in self._slots.keys())
Return a dictionary of current slots by reference.
def get_historical_data(self, uuid, start, end, average_by=0): """ Get the data from one device for a specified time range. .. note:: Can fetch a maximum of 42 days of data. To speed up query processing, you can use a combination of average factor multiple of 1H in seconds (e.g. 3600), and o'clock start and end times :param uuid: Id of the device :type uuid: str :param start: start of the range :type start: datetime :param end: end of the range :type end: datetime :param average_by: amount of seconds to average data over. 0 or 300 for no average. Use 3600 (average hourly) or a multiple for long range requests (e.g. more than 1 day) :type average_by: integer :returns: list of datapoints :raises: ClientError, AuthFailure, BadFormat, ForbiddenAccess, TooManyRequests, InternalError .. seealso:: :func:`parse_data` for return data syntax """ return self.parse_data((yield from self._get( HISTORICAL_DATA_URL.format(uuid= uuid, start = trunc(start.replace(tzinfo=timezone.utc).timestamp()), end = trunc(end.replace(tzinfo=timezone.utc).timestamp()), average_by= trunc(average_by)))))
Get the data from one device for a specified time range. .. note:: Can fetch a maximum of 42 days of data. To speed up query processing, you can use a combination of average factor multiple of 1H in seconds (e.g. 3600), and o'clock start and end times :param uuid: Id of the device :type uuid: str :param start: start of the range :type start: datetime :param end: end of the range :type end: datetime :param average_by: amount of seconds to average data over. 0 or 300 for no average. Use 3600 (average hourly) or a multiple for long range requests (e.g. more than 1 day) :type average_by: integer :returns: list of datapoints :raises: ClientError, AuthFailure, BadFormat, ForbiddenAccess, TooManyRequests, InternalError .. seealso:: :func:`parse_data` for return data syntax
def calc_glmelt_in_v1(self): """Calculate melting from glaciers which are actually not covered by a snow layer and add it to the water release of the snow module. Required control parameters: |NmbZones| |ZoneType| |GMelt| Required state sequence: |SP| Required flux sequence: |TC| Calculated fluxes sequence: |GlMelt| Updated flux sequence: |In_| Basic equation: :math:`GlMelt = \\Bigl \\lbrace { {max(GMelt \\cdot (TC-TTM), 0) \\ | \\ SP = 0} \\atop {0 \\ | \\ SP > 0} }` Examples: Seven zones are prepared, but glacier melting occurs only in the fourth one, as the first three zones are no glaciers, the fifth zone is covered by a snow layer and the actual temperature of the last two zones is not above the threshold temperature: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nmbzones(7) >>> zonetype(FIELD, FOREST, ILAKE, GLACIER, GLACIER, GLACIER, GLACIER) >>> gmelt(4.) >>> derived.ttm(2.) >>> states.sp = 0., 0., 0., 0., .1, 0., 0. >>> fluxes.tc = 3., 3., 3., 3., 3., 2., 1. >>> fluxes.in_ = 3. >>> model.calc_glmelt_in_v1() >>> fluxes.glmelt glmelt(0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0) >>> fluxes.in_ in_(3.0, 3.0, 3.0, 5.0, 3.0, 3.0, 3.0) Note that the assumed length of the simulation step is only a half day. Hence the effective value of the degree day factor is not 4 but 2: >>> gmelt gmelt(4.0) >>> gmelt.values array([ 2., 2., 2., 2., 2., 2., 2.]) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nmbzones): if ((con.zonetype[k] == GLACIER) and (sta.sp[k] <= 0.) and (flu.tc[k] > der.ttm[k])): flu.glmelt[k] = con.gmelt[k]*(flu.tc[k]-der.ttm[k]) flu.in_[k] += flu.glmelt[k] else: flu.glmelt[k] = 0.
Calculate melting from glaciers which are actually not covered by a snow layer and add it to the water release of the snow module. Required control parameters: |NmbZones| |ZoneType| |GMelt| Required state sequence: |SP| Required flux sequence: |TC| Calculated fluxes sequence: |GlMelt| Updated flux sequence: |In_| Basic equation: :math:`GlMelt = \\Bigl \\lbrace { {max(GMelt \\cdot (TC-TTM), 0) \\ | \\ SP = 0} \\atop {0 \\ | \\ SP > 0} }` Examples: Seven zones are prepared, but glacier melting occurs only in the fourth one, as the first three zones are no glaciers, the fifth zone is covered by a snow layer and the actual temperature of the last two zones is not above the threshold temperature: >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> nmbzones(7) >>> zonetype(FIELD, FOREST, ILAKE, GLACIER, GLACIER, GLACIER, GLACIER) >>> gmelt(4.) >>> derived.ttm(2.) >>> states.sp = 0., 0., 0., 0., .1, 0., 0. >>> fluxes.tc = 3., 3., 3., 3., 3., 2., 1. >>> fluxes.in_ = 3. >>> model.calc_glmelt_in_v1() >>> fluxes.glmelt glmelt(0.0, 0.0, 0.0, 2.0, 0.0, 0.0, 0.0) >>> fluxes.in_ in_(3.0, 3.0, 3.0, 5.0, 3.0, 3.0, 3.0) Note that the assumed length of the simulation step is only a half day. Hence the effective value of the degree day factor is not 4 but 2: >>> gmelt gmelt(4.0) >>> gmelt.values array([ 2., 2., 2., 2., 2., 2., 2.])
def split_in_blocks(sequence, hint, weight=lambda item: 1, key=nokey): """ Split the `sequence` in a number of WeightedSequences close to `hint`. :param sequence: a finite sequence of items :param hint: an integer suggesting the number of subsequences to generate :param weight: a function returning the weigth of a given item :param key: a function returning the key of a given item The WeightedSequences are of homogeneous key and they try to be balanced in weight. For instance >>> items = 'ABCDE' >>> list(split_in_blocks(items, 3)) [<WeightedSequence ['A', 'B'], weight=2>, <WeightedSequence ['C', 'D'], weight=2>, <WeightedSequence ['E'], weight=1>] """ if isinstance(sequence, int): return split_in_slices(sequence, hint) elif hint in (0, 1) and key is nokey: # do not split return [sequence] elif hint in (0, 1): # split by key blocks = [] for k, group in groupby(sequence, key).items(): blocks.append(group) return blocks items = sorted(sequence, key=lambda item: (key(item), weight(item))) assert hint > 0, hint assert len(items) > 0, len(items) total_weight = float(sum(weight(item) for item in items)) return block_splitter(items, math.ceil(total_weight / hint), weight, key)
Split the `sequence` in a number of WeightedSequences close to `hint`. :param sequence: a finite sequence of items :param hint: an integer suggesting the number of subsequences to generate :param weight: a function returning the weigth of a given item :param key: a function returning the key of a given item The WeightedSequences are of homogeneous key and they try to be balanced in weight. For instance >>> items = 'ABCDE' >>> list(split_in_blocks(items, 3)) [<WeightedSequence ['A', 'B'], weight=2>, <WeightedSequence ['C', 'D'], weight=2>, <WeightedSequence ['E'], weight=1>]
def check_time(value): """check that it's a value like 03:45 or 1:1""" try: h, m = value.split(':') h = int(h) m = int(m) if h >= 24 or h < 0: raise ValueError if m >= 60 or m < 0: raise ValueError except ValueError: raise TimeDefinitionError("Invalid definition of time %r" % value)
check that it's a value like 03:45 or 1:1
def _post(self, url_suffix, data, content_type=ContentType.json): """ Send POST request to API at url_suffix with post_data. Raises error if x-total-pages is contained in the response. :param url_suffix: str URL path we are sending a POST to :param data: object data we are sending :param content_type: str from ContentType that determines how we format the data :return: requests.Response containing the result """ (url, data_str, headers) = self._url_parts(url_suffix, data, content_type=content_type) resp = self.http.post(url, data_str, headers=headers) return self._check_err(resp, url_suffix, data, allow_pagination=False)
Send POST request to API at url_suffix with post_data. Raises error if x-total-pages is contained in the response. :param url_suffix: str URL path we are sending a POST to :param data: object data we are sending :param content_type: str from ContentType that determines how we format the data :return: requests.Response containing the result
def keystroke_toggled(self, settings, key, user_data): """If the gconf var scroll_keystroke be changed, this method will be called and will set the scroll_on_keystroke in all terminals open. """ for i in self.guake.notebook_manager.iter_terminals(): i.set_scroll_on_keystroke(settings.get_boolean(key))
If the gconf var scroll_keystroke be changed, this method will be called and will set the scroll_on_keystroke in all terminals open.
def mol(self): """ Return record in MOL format """ if self._mol is None: apiurl = 'http://www.chemspider.com/MassSpecAPI.asmx/GetRecordMol?csid=%s&calc3d=false&token=%s' % (self.csid,TOKEN) response = urlopen(apiurl) tree = ET.parse(response) self._mol = tree.getroot().text return self._mol
Return record in MOL format
def resample(self, rate): """Resample this `StateVector` to a new rate Because of the nature of a state-vector, downsampling is done by taking the logical 'and' of all original samples in each new sampling interval, while upsampling is achieved by repeating samples. Parameters ---------- rate : `float` rate to which to resample this `StateVector`, must be a divisor of the original sample rate (when downsampling) or a multiple of the original (when upsampling). Returns ------- vector : `StateVector` resampled version of the input `StateVector` """ rate1 = self.sample_rate.value if isinstance(rate, units.Quantity): rate2 = rate.value else: rate2 = float(rate) # upsample if (rate2 / rate1).is_integer(): raise NotImplementedError("StateVector upsampling has not " "been implemented yet, sorry.") # downsample elif (rate1 / rate2).is_integer(): factor = int(rate1 / rate2) # reshape incoming data to one column per new sample newsize = int(self.size / factor) old = self.value.reshape((newsize, self.size // newsize)) # work out number of bits if self.bits: nbits = len(self.bits) else: max_ = self.value.max() nbits = int(ceil(log(max_, 2))) if max_ else 1 bits = range(nbits) # construct an iterator over the columns of the old array itr = numpy.nditer( [old, None], flags=['external_loop', 'reduce_ok'], op_axes=[None, [0, -1]], op_flags=[['readonly'], ['readwrite', 'allocate']]) dtype = self.dtype type_ = self.dtype.type # for each new sample, each bit is logical AND of old samples # bit is ON, for x, y in itr: y[...] = numpy.sum([type_((x >> bit & 1).all() * (2 ** bit)) for bit in bits], dtype=self.dtype) new = StateVector(itr.operands[1], dtype=dtype) new.__metadata_finalize__(self) new._unit = self.unit new.sample_rate = rate2 return new # error for non-integer resampling factors elif rate1 < rate2: raise ValueError("New sample rate must be multiple of input " "series rate if upsampling a StateVector") else: raise ValueError("New sample rate must be divisor of input " "series rate if downsampling a StateVector")
Resample this `StateVector` to a new rate Because of the nature of a state-vector, downsampling is done by taking the logical 'and' of all original samples in each new sampling interval, while upsampling is achieved by repeating samples. Parameters ---------- rate : `float` rate to which to resample this `StateVector`, must be a divisor of the original sample rate (when downsampling) or a multiple of the original (when upsampling). Returns ------- vector : `StateVector` resampled version of the input `StateVector`
def ExpandArg(self, **_): """Expand the args as a section.parameter from the config.""" # This function is called when we see close ) and the stack depth has to # exactly match the number of (. if len(self.stack) <= 1: raise lexer.ParseError( "Unbalanced parenthesis: Can not expand '%s'" % self.processed_buffer) # This is the full parameter name: e.g. Logging.path parameter_name = self.stack.pop(-1) if "." not in parameter_name: parameter_name = "%s.%s" % (self.default_section, parameter_name) final_value = self.config.Get(parameter_name, context=self.context) if final_value is None: final_value = "" type_info_obj = ( self.config.FindTypeInfo(parameter_name) or type_info.String()) # Encode the interpolated string according to its type. self.stack[-1] += type_info_obj.ToString(final_value)
Expand the args as a section.parameter from the config.
def popular(category=None, sortOption = "title"): """Return a search result containing torrents appearing on the KAT home page. Can be categorized. Cannot be sorted or contain multiple pages""" s = Search() s.popular(category, sortOption) return s
Return a search result containing torrents appearing on the KAT home page. Can be categorized. Cannot be sorted or contain multiple pages
def subscribe(self, topic, callback, qos): """Subscribe to an MQTT topic.""" if topic in self.topics: return def _message_callback(mqttc, userdata, msg): """Callback added to callback list for received message.""" callback(msg.topic, msg.payload.decode('utf-8'), msg.qos) self._mqttc.subscribe(topic, qos) self._mqttc.message_callback_add(topic, _message_callback) self.topics[topic] = callback
Subscribe to an MQTT topic.
def maskedConvolve(arr, kernel, mask, mode='reflect'): ''' same as scipy.ndimage.convolve but is only executed on mask==True ... which should speed up everything ''' arr2 = extendArrayForConvolution(arr, kernel.shape, modex=mode, modey=mode) print(arr2.shape) out = np.zeros_like(arr) return _calc(arr2, kernel, mask, out)
same as scipy.ndimage.convolve but is only executed on mask==True ... which should speed up everything
def remove_none_value(data): """remove item from dict if value is None. return new dict. """ return dict((k, v) for k, v in data.items() if v is not None)
remove item from dict if value is None. return new dict.
def get_id(page): """Extract the id from a page. Args: page: a string Returns: an integer """ start_pos = page.find("<id>") end_pos = page.find("</id>") assert start_pos != -1 assert end_pos != -1 start_pos += len("<id>") return int(page[start_pos:end_pos])
Extract the id from a page. Args: page: a string Returns: an integer
def scale_transform(t, scale): """ Apply a scale to a transform :param t: The transform [[x, y, z], [x, y, z, w]] to be scaled OR the position [x, y, z] to be scaled :param scale: the scale of type float OR the scale [scale_x, scale_y, scale_z] :return: The scaled """ if isinstance(scale, float) or isinstance(scale, int): scale = [scale, scale, scale] if _is_indexable(t[0]): return [[t[0][0] * scale[0], t[0][1] * scale[1], t[0][2] * scale[2]], t[1]] else: return [t[0] * scale[0], t[1] * scale[1], t[2] * scale[2]]
Apply a scale to a transform :param t: The transform [[x, y, z], [x, y, z, w]] to be scaled OR the position [x, y, z] to be scaled :param scale: the scale of type float OR the scale [scale_x, scale_y, scale_z] :return: The scaled
def delete(self, table): """Deletes record in table >>> yql.delete('yql.storage').where(['name','=','store://YEl70PraLLMSMuYAauqNc7']) """ self._table = table self._limit = None self._query = "DELETE FROM {0}".format(self._table) return self
Deletes record in table >>> yql.delete('yql.storage').where(['name','=','store://YEl70PraLLMSMuYAauqNc7'])
def document_iter(self, context): """ Iterates over all the elements in an iterparse context (here: <document> elements) and yields an URMLDocumentGraph instance for each of them. For efficiency, the elements are removed from the DOM / main memory after processing them. If ``self.debug`` is set to ``True`` (in the ``__init__`` method), this method will yield <documents> elements, which can be used to construct ``URMLDocumentGraph``s manually. """ for _event, elem in context: if not self.debug: yield URMLDocumentGraph(elem, tokenize=self.tokenize, precedence=self.precedence) else: yield elem # removes element (and references to it) from memory after processing it elem.clear() while elem.getprevious() is not None: del elem.getparent()[0] del context
Iterates over all the elements in an iterparse context (here: <document> elements) and yields an URMLDocumentGraph instance for each of them. For efficiency, the elements are removed from the DOM / main memory after processing them. If ``self.debug`` is set to ``True`` (in the ``__init__`` method), this method will yield <documents> elements, which can be used to construct ``URMLDocumentGraph``s manually.
def create_from_yamlfile(cls, yamlfile): """Create a Castro data object from a yaml file contains the likelihood data.""" data = load_yaml(yamlfile) nebins = len(data) emin = np.array([data[i]['emin'] for i in range(nebins)]) emax = np.array([data[i]['emax'] for i in range(nebins)]) ref_flux = np.array([data[i]['flux'][1] for i in range(nebins)]) ref_eflux = np.array([data[i]['eflux'][1] for i in range(nebins)]) conv = np.array([data[i]['eflux2npred'] for i in range(nebins)]) ref_npred = conv*ref_eflux ones = np.ones(ref_flux.shape) ref_spec = ReferenceSpec(emin, emax, ones, ref_flux, ref_eflux, ref_npred) norm_data = np.array([data[i]['eflux'] for i in range(nebins)]) ll_data = np.array([data[i]['logLike'] for i in range(nebins)]) max_ll = ll_data.max(1) nll_data = (max_ll - ll_data.T).T return cls(norm_data, nll_data, ref_spec, 'eflux')
Create a Castro data object from a yaml file contains the likelihood data.
def daily(target_coll, source_coll, interp_days=32, interp_method='linear'): """Generate daily ETa collection from ETo and ETf collections Parameters ---------- target_coll : ee.ImageCollection Source images will be interpolated to each target image time_start. Target images should have a daily time step. This will typically be the reference ET (ETr) collection. source_coll : ee.ImageCollection Images that will be interpolated to the target image collection. This will typically be the fraction of reference ET (ETrF) collection. interp_days : int, optional Number of days before and after each image date to include in the interpolation (the default is 32). interp_method : {'linear'}, optional Interpolation method (the default is 'linear'). Returns ------- ee.ImageCollection() of daily interpolated images Raises ------ ValueError If `interp_method` is not a supported method. """ # # DEADBEEF - This module is assuming that the time band is already in # # the source collection. # # Uncomment the following to add a time band here instead. # def add_utc0_time_band(image): # date_0utc = utils.date_0utc(ee.Date(image.get('system:time_start'))) # return image.addBands([ # image.select([0]).double().multiply(0).add(date_0utc.millis())\ # .rename(['time'])]) # source_coll = ee.ImageCollection(source_coll.map(add_utc0_time_band)) if interp_method.lower() == 'linear': def _linear(image): """Linearly interpolate source images to target image time_start(s) Parameters ---------- image : ee.Image. The first band in the image will be used as the "target" image and will be returned with the output image. Returns ------- ee.Image of interpolated values with band name 'src' Notes ----- The source collection images must have a time band. This function is intended to be mapped over an image collection and can only take one input parameter. """ target_image = ee.Image(image).select(0).double() target_date = ee.Date(image.get('system:time_start')) # All filtering will be done based on 0 UTC dates utc0_date = utils.date_0utc(target_date) # utc0_time = target_date.update(hour=0, minute=0, second=0)\ # .millis().divide(1000).floor().multiply(1000) time_image = ee.Image.constant(utc0_date.millis()).double() # Build nodata images/masks that can be placed at the front/back of # of the qm image collections in case the collections are empty. bands = source_coll.first().bandNames() prev_qm_mask = ee.Image.constant(ee.List.repeat(1, bands.length()))\ .double().rename(bands).updateMask(0)\ .set({ 'system:time_start': utc0_date.advance( -interp_days - 1, 'day').millis()}) next_qm_mask = ee.Image.constant(ee.List.repeat(1, bands.length()))\ .double().rename(bands).updateMask(0)\ .set({ 'system:time_start': utc0_date.advance( interp_days + 2, 'day').millis()}) # Build separate collections for before and after the target date prev_qm_coll = source_coll.filterDate( utc0_date.advance(-interp_days, 'day'), utc0_date)\ .merge(ee.ImageCollection(prev_qm_mask)) next_qm_coll = source_coll.filterDate( utc0_date, utc0_date.advance(interp_days + 1, 'day'))\ .merge(ee.ImageCollection(next_qm_mask)) # Flatten the previous/next collections to single images # The closest image in time should be on "top" # CGM - Is the previous collection already sorted? # prev_qm_image = prev_qm_coll.mosaic() prev_qm_image = prev_qm_coll.sort('system:time_start', True).mosaic() next_qm_image = next_qm_coll.sort('system:time_start', False).mosaic() # DEADBEEF - It might be easier to interpolate all bands instead of # separating the value and time bands # prev_value_image = ee.Image(prev_qm_image).double() # next_value_image = ee.Image(next_qm_image).double() # Interpolate all bands except the "time" band prev_bands = prev_qm_image.bandNames()\ .filter(ee.Filter.notEquals('item', 'time')) next_bands = next_qm_image.bandNames() \ .filter(ee.Filter.notEquals('item', 'time')) prev_value_image = ee.Image(prev_qm_image.select(prev_bands)).double() next_value_image = ee.Image(next_qm_image.select(next_bands)).double() prev_time_image = ee.Image(prev_qm_image.select('time')).double() next_time_image = ee.Image(next_qm_image.select('time')).double() # Fill masked values with values from the opposite image # Something like this is needed to ensure there are always two # values to interpolate between # For data gaps, this will cause a flat line instead of a ramp prev_time_mosaic = ee.Image(ee.ImageCollection.fromImages([ next_time_image, prev_time_image]).mosaic()) next_time_mosaic = ee.Image(ee.ImageCollection.fromImages([ prev_time_image, next_time_image]).mosaic()) prev_value_mosaic = ee.Image(ee.ImageCollection.fromImages([ next_value_image, prev_value_image]).mosaic()) next_value_mosaic = ee.Image(ee.ImageCollection.fromImages([ prev_value_image, next_value_image]).mosaic()) # Calculate time ratio of the current image between other cloud free images time_ratio_image = time_image.subtract(prev_time_mosaic) \ .divide(next_time_mosaic.subtract(prev_time_mosaic)) # Interpolate values to the current image time interp_value_image = next_value_mosaic.subtract(prev_value_mosaic) \ .multiply(time_ratio_image).add(prev_value_mosaic) # CGM # Should/can the target image be mapped to the interpolated image? # Is there a clean way of computing ET here? return interp_value_image \ .addBands(target_image) \ .set({ 'system:index': image.get('system:index'), 'system:time_start': image.get('system:time_start'), # 'system:time_start': utc0_time, }) interp_coll = ee.ImageCollection(target_coll.map(_linear)) # elif interp_method.lower() == 'nearest': # interp_coll = ee.ImageCollection(target_coll.map(_nearest)) else: raise ValueError('invalid interpolation method: {}'.format(interp_method)) return interp_coll
Generate daily ETa collection from ETo and ETf collections Parameters ---------- target_coll : ee.ImageCollection Source images will be interpolated to each target image time_start. Target images should have a daily time step. This will typically be the reference ET (ETr) collection. source_coll : ee.ImageCollection Images that will be interpolated to the target image collection. This will typically be the fraction of reference ET (ETrF) collection. interp_days : int, optional Number of days before and after each image date to include in the interpolation (the default is 32). interp_method : {'linear'}, optional Interpolation method (the default is 'linear'). Returns ------- ee.ImageCollection() of daily interpolated images Raises ------ ValueError If `interp_method` is not a supported method.
def array2tabledef(data, table_type='binary', write_bitcols=False): """ Similar to descr2tabledef but if there are object columns a type and max length will be extracted and used for the tabledef """ is_ascii = (table_type == 'ascii') if data.dtype.fields is None: raise ValueError("data must have fields") names = [] names_nocase = {} formats = [] dims = [] descr = data.dtype.descr for d in descr: # these have the form '<f4' or '|S25', etc. Extract the pure type npy_dtype = d[1][1:] if is_ascii: if npy_dtype in ['u1', 'i1']: raise ValueError( "1-byte integers are not supported for " "ascii tables: '%s'" % npy_dtype) if npy_dtype in ['u2']: raise ValueError( "unsigned 2-byte integers are not supported for " "ascii tables: '%s'" % npy_dtype) if npy_dtype[0] == 'O': # this will be a variable length column 1Pt(len) where t is the # type and len is max length. Each element must be convertible to # the same type as the first name = d[0] form, dim = npy_obj2fits(data, name) elif npy_dtype[0] == "V": continue else: name, form, dim = _npy2fits( d, table_type=table_type, write_bitcols=write_bitcols) if name == '': raise ValueError("field name is an empty string") """ if is_ascii: if dim is not None: raise ValueError("array columns are not supported for " "ascii tables") """ name_nocase = name.upper() if name_nocase in names_nocase: raise ValueError( "duplicate column name found: '%s'. Note " "FITS column names are not case sensitive" % name_nocase) names.append(name) names_nocase[name_nocase] = name_nocase formats.append(form) dims.append(dim) return names, formats, dims
Similar to descr2tabledef but if there are object columns a type and max length will be extracted and used for the tabledef
def get_breadcrumbs(url, request=None): """ Given a url returns a list of breadcrumbs, which are each a tuple of (name, url). """ from wave.reverse import preserve_builtin_query_params from wave.settings import api_settings from wave.views import APIView view_name_func = api_settings.VIEW_NAME_FUNCTION def breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen): """ Add tuples of (name, url) to the breadcrumbs list, progressively chomping off parts of the url. """ try: (view, unused_args, unused_kwargs) = resolve(url) except Exception: pass else: # Check if this is a REST framework view, # and if so add it to the breadcrumbs cls = getattr(view, 'cls', None) if cls is not None and issubclass(cls, APIView): # Don't list the same view twice in a row. # Probably an optional trailing slash. if not seen or seen[-1] != view: suffix = getattr(view, 'suffix', None) name = view_name_func(cls, suffix) insert_url = preserve_builtin_query_params(prefix + url, request) breadcrumbs_list.insert(0, (name, insert_url)) seen.append(view) if url == '': # All done return breadcrumbs_list elif url.endswith('/'): # Drop trailing slash off the end and continue to try to # resolve more breadcrumbs url = url.rstrip('/') return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen) # Drop trailing non-slash off the end and continue to try to # resolve more breadcrumbs url = url[:url.rfind('/') + 1] return breadcrumbs_recursive(url, breadcrumbs_list, prefix, seen) prefix = get_script_prefix().rstrip('/') url = url[len(prefix):] return breadcrumbs_recursive(url, [], prefix, [])
Given a url returns a list of breadcrumbs, which are each a tuple of (name, url).
def kill(self, sig): '''Sends a Unix signal to the subprocess. Use constants from the :mod:`signal` module to specify which signal. ''' if sys.platform == 'win32': if sig in [signal.SIGINT, signal.CTRL_C_EVENT]: sig = signal.CTRL_C_EVENT elif sig in [signal.SIGBREAK, signal.CTRL_BREAK_EVENT]: sig = signal.CTRL_BREAK_EVENT else: sig = signal.SIGTERM os.kill(self.proc.pid, sig)
Sends a Unix signal to the subprocess. Use constants from the :mod:`signal` module to specify which signal.
def angle2vecs(vec1, vec2): """angle between two vectors""" # vector a * vector b = |a|*|b|* cos(angle between vector a and vector b) dot = np.dot(vec1, vec2) vec1_modulus = np.sqrt(np.multiply(vec1, vec1).sum()) vec2_modulus = np.sqrt(np.multiply(vec2, vec2).sum()) if (vec1_modulus * vec2_modulus) == 0: cos_angle = 1 else: cos_angle = dot / (vec1_modulus * vec2_modulus) return math.degrees(acos(cos_angle))
angle between two vectors
def getUniqueFilename(dir=None, base=None): """ DESCRP: Generate a filename in the directory <dir> which is unique (i.e. not in use at the moment) PARAMS: dir -- the directory to look in. If None, use CWD base -- use this as the base name for the filename RETURN: string -- the filename generated """ while True: fn = str(random.randint(0, 100000)) + ".tmp" if not os.path.exists(fn): break return fn
DESCRP: Generate a filename in the directory <dir> which is unique (i.e. not in use at the moment) PARAMS: dir -- the directory to look in. If None, use CWD base -- use this as the base name for the filename RETURN: string -- the filename generated
def to_buffer(f): """ Decorator converting all strings and iterators/iterables into Buffers. """ @functools.wraps(f) def wrap(*args, **kwargs): iterator = kwargs.get('iterator', args[0]) if not isinstance(iterator, Buffer): iterator = Buffer(iterator) return f(iterator, *args[1:], **kwargs) return wrap
Decorator converting all strings and iterators/iterables into Buffers.
def makedirs(directory): """ Resursively create a named directory. """ parent = os.path.dirname(os.path.abspath(directory)) if not os.path.exists(parent): makedirs(parent) os.mkdir(directory)
Resursively create a named directory.
def debug(msg: str, resource: Optional['Resource'] = None, stream_id: Optional[int] = None) -> None: """ Logs a message to the Pulumi CLI's debug channel, associating it with a resource and stream_id if provided. :param str msg: The message to send to the Pulumi CLI. :param Optional[Resource] resource: If provided, associate this message with the given resource in the Pulumi CLI. :param Optional[int] stream_id: If provided, associate this message with a stream of other messages. """ engine = get_engine() if engine is not None: _log(engine, engine_pb2.DEBUG, msg, resource, stream_id) else: print("debug: " + msg, file=sys.stderr)
Logs a message to the Pulumi CLI's debug channel, associating it with a resource and stream_id if provided. :param str msg: The message to send to the Pulumi CLI. :param Optional[Resource] resource: If provided, associate this message with the given resource in the Pulumi CLI. :param Optional[int] stream_id: If provided, associate this message with a stream of other messages.
def plot_pseudosection_type2(self, mid, **kwargs): """Create a pseudosection plot of type 2. For a given measurement data set, create plots that graphically show the data in a 2D color plot. Hereby, x and y coordinates in the plot are determined by the current dipole (x-axis) and voltage dipole (y-axis) of the corresponding measurement configurations. This type of rawdata plot can plot any type of measurement configurations, i.e., it is not restricted to certain types of configurations such as Dipole-dipole or Wenner configurations. However, spatial inferences cannot be made from the plots for all configuration types. Coordinates are generated by separately sorting the dipoles (current/voltage) along the first electrode, and then subsequently sorting by the difference (skip) between both electrodes. Note that this type of raw data plot does not take into account the real electrode spacing of the measurement setup. Type 2 plots can show normal and reciprocal data at the same time. Hereby the upper left triangle of the plot area usually contains normal data, and the lower right triangle contains the corresponding reciprocal data. Therefore a quick assessment of normal-reciprocal differences can be made by visually comparing the symmetry on the 1:1 line going from the lower left corner to the upper right corner. Note that this interpretation usually only holds for Dipole-Dipole data (and the related Wenner configurations). Parameters ---------- mid: integer or numpy.ndarray Measurement ID of stored measurement data, or a numpy array (size N) with data that will be plotted. Must have the same length as number of configurations. ax: matplotlib.Axes object, optional axes object to plot to. If not provided, a new figure and axes object will be created and returned nocb: bool, optional if set to False, don't plot the colorbar cblabel: string, optional label for the colorbar cbmin: float, optional colorbar minimum cbmax: float, optional colorbar maximum xlabel: string, optional xlabel for the plot ylabel: string, optional ylabel for the plot do_not_saturate: bool, optional if set to True, then values outside the colorbar range will not saturate with the respective limit colors. Instead, values lower than the CB are colored "cyan" and vaues above the CB limit are colored "red" log10: bool, optional if set to True, plot the log10 values of the provided data Returns ------- fig: figure object ax: axes object Examples -------- You can just supply a data vector to the plot function: .. plot:: :include-source: import numpy as np import crtomo.configManager as CRConfig configs = CRConfig.ConfigManager(nr_of_electrodes=48) configs.gen_dipole_dipole(skipc=1, stepc=2) measurements = np.random.random(configs.nr_of_configs) configs.plot_pseudosection_type2( mid=measurements, ) Generate a simple type 2 plot: .. plot:: :include-source: import numpy as np import crtomo.configManager as CRConfig configs = CRConfig.ConfigManager(nr_of_electrodes=48) configs.gen_dipole_dipole(skipc=1, stepc=2) measurements = np.random.random(configs.nr_of_configs) mid = configs.add_measurements(measurements) configs.plot_pseudosection_type2( mid, cblabel='this label', xlabel='xlabel', ylabel='ylabel', ) You can also supply axes to plot to: .. plot:: :include-source: import numpy as np from crtomo.mpl_setup import * import crtomo.configManager as CRConfig configs = CRConfig.ConfigManager(nr_of_electrodes=48) configs.gen_dipole_dipole(skipc=1, stepc=2) K = configs.compute_K_factors(spacing=1) measurements = np.random.random(configs.nr_of_configs) mid = configs.add_measurements(measurements) fig, axes = plt.subplots(1, 2) configs.plot_pseudosection_type2( mid, ax=axes[0], cblabel='this label', xlabel='xlabel', ylabel='ylabel', ) configs.plot_pseudosection_type2( K, ax=axes[1], cblabel='K factor', xlabel='xlabel', ylabel='ylabel', ) fig.tight_layout() """ c = self.configs AB_ids = self._get_unique_identifiers(c[:, 0:2]) MN_ids = self._get_unique_identifiers(c[:, 2:4]) ab_sorted = np.sort(c[:, 0:2], axis=1) mn_sorted = np.sort(c[:, 2:4], axis=1) AB_coords = [ AB_ids[x] for x in (ab_sorted[:, 0] * 1e5 + ab_sorted[:, 1]).astype(int) ] MN_coords = [ MN_ids[x] for x in (mn_sorted[:, 0] * 1e5 + mn_sorted[:, 1]).astype(int) ] # check for duplicate positions ABMN_coords = np.vstack((AB_coords, MN_coords)).T.copy() _, counts = np.unique( ABMN_coords.view( ABMN_coords.dtype.descr * 2 ), return_counts=True, ) if np.any(counts > 1): print('found duplicate coordinates!') duplicate_configs = np.where(counts > 1)[0] print('duplicate configs:') print('A B M N') for i in duplicate_configs: print(c[i, :]) # prepare matrix if isinstance(mid, int): plot_values = self.measurements[mid] elif isinstance(mid, np.ndarray): plot_values = np.squeeze(mid) else: raise Exception('Data in parameter "mid" not understood') if kwargs.get('log10', False): plot_values = np.log10(plot_values) C = np.zeros((len(MN_ids.items()), len(AB_ids))) * np.nan C[MN_coords, AB_coords] = plot_values # for display purposes, reverse the first dimension C = C[::-1, :] ax = kwargs.get('ax', None) if ax is None: fig, ax = plt.subplots(1, 1, figsize=(15 / 2.54, 10 / 2.54)) fig = ax.get_figure() cmap = mpl.cm.get_cmap('viridis') if kwargs.get('do_not_saturate', False): cmap.set_over( color='r' ) cmap.set_under( color='c' ) im = ax.matshow( C, interpolation='none', cmap=cmap, aspect='auto', vmin=kwargs.get('cbmin', None), vmax=kwargs.get('cbmax', None), extent=[ 0, max(AB_coords), 0, max(MN_coords), ], ) max_xy = max((max(AB_coords), max(MN_coords))) ax.plot( (0, max_xy), (0, max_xy), '-', color='k', linewidth=1.0, ) if not kwargs.get('nocb', False): cb = fig.colorbar(im, ax=ax) cb.set_label( kwargs.get('cblabel', '') ) ax.set_xlabel( kwargs.get('xlabel', 'current dipoles') ) ax.set_ylabel( kwargs.get('ylabel', 'voltage dipoles') ) return fig, ax
Create a pseudosection plot of type 2. For a given measurement data set, create plots that graphically show the data in a 2D color plot. Hereby, x and y coordinates in the plot are determined by the current dipole (x-axis) and voltage dipole (y-axis) of the corresponding measurement configurations. This type of rawdata plot can plot any type of measurement configurations, i.e., it is not restricted to certain types of configurations such as Dipole-dipole or Wenner configurations. However, spatial inferences cannot be made from the plots for all configuration types. Coordinates are generated by separately sorting the dipoles (current/voltage) along the first electrode, and then subsequently sorting by the difference (skip) between both electrodes. Note that this type of raw data plot does not take into account the real electrode spacing of the measurement setup. Type 2 plots can show normal and reciprocal data at the same time. Hereby the upper left triangle of the plot area usually contains normal data, and the lower right triangle contains the corresponding reciprocal data. Therefore a quick assessment of normal-reciprocal differences can be made by visually comparing the symmetry on the 1:1 line going from the lower left corner to the upper right corner. Note that this interpretation usually only holds for Dipole-Dipole data (and the related Wenner configurations). Parameters ---------- mid: integer or numpy.ndarray Measurement ID of stored measurement data, or a numpy array (size N) with data that will be plotted. Must have the same length as number of configurations. ax: matplotlib.Axes object, optional axes object to plot to. If not provided, a new figure and axes object will be created and returned nocb: bool, optional if set to False, don't plot the colorbar cblabel: string, optional label for the colorbar cbmin: float, optional colorbar minimum cbmax: float, optional colorbar maximum xlabel: string, optional xlabel for the plot ylabel: string, optional ylabel for the plot do_not_saturate: bool, optional if set to True, then values outside the colorbar range will not saturate with the respective limit colors. Instead, values lower than the CB are colored "cyan" and vaues above the CB limit are colored "red" log10: bool, optional if set to True, plot the log10 values of the provided data Returns ------- fig: figure object ax: axes object Examples -------- You can just supply a data vector to the plot function: .. plot:: :include-source: import numpy as np import crtomo.configManager as CRConfig configs = CRConfig.ConfigManager(nr_of_electrodes=48) configs.gen_dipole_dipole(skipc=1, stepc=2) measurements = np.random.random(configs.nr_of_configs) configs.plot_pseudosection_type2( mid=measurements, ) Generate a simple type 2 plot: .. plot:: :include-source: import numpy as np import crtomo.configManager as CRConfig configs = CRConfig.ConfigManager(nr_of_electrodes=48) configs.gen_dipole_dipole(skipc=1, stepc=2) measurements = np.random.random(configs.nr_of_configs) mid = configs.add_measurements(measurements) configs.plot_pseudosection_type2( mid, cblabel='this label', xlabel='xlabel', ylabel='ylabel', ) You can also supply axes to plot to: .. plot:: :include-source: import numpy as np from crtomo.mpl_setup import * import crtomo.configManager as CRConfig configs = CRConfig.ConfigManager(nr_of_electrodes=48) configs.gen_dipole_dipole(skipc=1, stepc=2) K = configs.compute_K_factors(spacing=1) measurements = np.random.random(configs.nr_of_configs) mid = configs.add_measurements(measurements) fig, axes = plt.subplots(1, 2) configs.plot_pseudosection_type2( mid, ax=axes[0], cblabel='this label', xlabel='xlabel', ylabel='ylabel', ) configs.plot_pseudosection_type2( K, ax=axes[1], cblabel='K factor', xlabel='xlabel', ylabel='ylabel', ) fig.tight_layout()
def StartFlowAndWait(client_id, token=None, timeout=DEFAULT_TIMEOUT, **flow_args): """Runs a flow and waits for it to finish. Args: client_id: The client id of the client to run on. token: The datastore access token. timeout: How long to wait for a flow to complete, maximum. **flow_args: Pass through to flow. Returns: The urn of the flow that was run. """ flow_urn = flow.StartAFF4Flow( client_id=client_id, token=token, sync=True, **flow_args) WaitForFlow(flow_urn, token=token, timeout=timeout) return flow_urn
Runs a flow and waits for it to finish. Args: client_id: The client id of the client to run on. token: The datastore access token. timeout: How long to wait for a flow to complete, maximum. **flow_args: Pass through to flow. Returns: The urn of the flow that was run.
def configure_health(graph): """ Configure the health endpoint. :returns: a handle to the `Health` object, allowing other components to manipulate health state. """ ns = Namespace( subject=Health, ) include_build_info = strtobool(graph.config.health_convention.include_build_info) convention = HealthConvention(graph, include_build_info) convention.configure(ns, retrieve=tuple()) return convention.health
Configure the health endpoint. :returns: a handle to the `Health` object, allowing other components to manipulate health state.
def ovsdb_server_port(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ovsdb_server = ET.SubElement(config, "ovsdb-server", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(ovsdb_server, "name") name_key.text = kwargs.pop('name') port = ET.SubElement(ovsdb_server, "port") port.text = kwargs.pop('port') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def verify_fft_options(opt,parser): """Parses the FFT options and verifies that they are reasonable. Parameters ---------- opt : object Result of parsing the CLI with OptionParser, or any object with the required attributes. parser : object OptionParser instance. """ if opt.fftw_measure_level not in [0,1,2,3]: parser.error("{0} is not a valid FFTW measure level.".format(opt.fftw_measure_level)) if opt.fftw_import_system_wisdom and ((opt.fftw_input_float_wisdom_file is not None) or (opt.fftw_input_double_wisdom_file is not None)): parser.error("If --fftw-import-system-wisdom is given, then you cannot give" " either of --fftw-input-float-wisdom-file or --fftw-input-double-wisdom-file") if opt.fftw_threads_backend is not None: if opt.fftw_threads_backend not in ['openmp','pthreads','unthreaded']: parser.error("Invalid threads backend; must be 'openmp', 'pthreads' or 'unthreaded'")
Parses the FFT options and verifies that they are reasonable. Parameters ---------- opt : object Result of parsing the CLI with OptionParser, or any object with the required attributes. parser : object OptionParser instance.
def get_huc8(prefix): """ Return all HUC8s matching the given prefix (e.g. 1801) or basin name (e.g. Klamath) """ if not prefix.isdigit(): # Look up hucs by name name = prefix prefix = None for row in hucs: if row.basin.lower() == name.lower(): # Use most general huc if two have the same name if prefix is None or len(row.huc) < len(prefix): prefix = row.huc if prefix is None: return [] huc8s = [] for row in hucs: # Return all 8-digit hucs with given prefix if len(row.huc) == 8 and row.huc.startswith(prefix): huc8s.append(row.huc) return huc8s
Return all HUC8s matching the given prefix (e.g. 1801) or basin name (e.g. Klamath)
def dcdict2rdfpy(dc_dict): """Convert a DC dictionary into an RDF Python object.""" ark_prefix = 'ark: ark:' uri = URIRef('') # Create the RDF Python object. rdf_py = ConjunctiveGraph() # Set DC namespace definition. DC = Namespace('http://purl.org/dc/elements/1.1/') # Get the ark for the subject URI from the ark identifier. for element_value in dc_dict['identifier']: if element_value['content'].startswith(ark_prefix): uri = URIRef( element_value['content'].replace( ark_prefix, 'info:ark' ) ) # Bind the prefix/namespace pair. rdf_py.bind('dc', DC) # Get the values for each element in the ordered DC elements. for element_name in DC_ORDER: element_value_list = dc_dict.get(element_name, []) # Add the values to the RDF object. for element_value in element_value_list: # Handle URL values differently. if ('http' in element_value['content'] and ' ' not in element_value['content']): rdf_py.add(( uri, DC[element_name], URIRef(element_value['content']) )) else: rdf_py.add(( uri, DC[element_name], Literal(element_value['content']) )) return rdf_py
Convert a DC dictionary into an RDF Python object.
def aggregate_data(self, start, end, aggregation, keys=[], tags=[], attrs={}, rollup=None, period=None, interpolationf=None, interpolation_period=None, tz=None, limit=1000): """Read data from multiple series according to a filter and apply a function across all the returned series to put the datapoints together into one aggregrate series. See the :meth:`list_series` method for a description of how the filter criteria are applied, and the :meth:`read_data` method for how to work with the start, end, and tz parameters. Valid aggregation functions are the same as valid rollup functions. :param string aggregation: the aggregation to perform :param keys: (optional) filter by one or more series keys :type keys: list or string :param tags: (optional) filter by one or more tags :type tags: list or string :param dict attrs: (optional) filter by one or more key-value attributes :param start: the start time for the data points :type start: string or Datetime :param end: the end time for the data points :type end: string or Datetime :param string rollup: (optional) the name of a rollup function to use :param string period: (optional) downsampling rate for the data :param string interpolationf: (optional) an interpolation function to run over the series :param string interpolation_period: (optional) the period to interpolate data into :param string tz: (optional) the timezone to place the data into :rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an iterator over :class:`tempodb.protocol.objects.DataPoint` objects""" url = 'segment' vstart = check_time_param(start) vend = check_time_param(end) params = { 'start': vstart, 'end': vend, 'key': keys, 'tag': tags, 'attr': attrs, 'aggregation.fold': aggregation, 'rollup.fold': rollup, 'rollup.period': period, 'interpolation.function': interpolationf, 'interpolation.period': interpolation_period, 'tz': tz, 'limit': limit } url_args = endpoint.make_url_args(params) url = '?'.join([url, url_args]) resp = self.session.get(url) return resp
Read data from multiple series according to a filter and apply a function across all the returned series to put the datapoints together into one aggregrate series. See the :meth:`list_series` method for a description of how the filter criteria are applied, and the :meth:`read_data` method for how to work with the start, end, and tz parameters. Valid aggregation functions are the same as valid rollup functions. :param string aggregation: the aggregation to perform :param keys: (optional) filter by one or more series keys :type keys: list or string :param tags: (optional) filter by one or more tags :type tags: list or string :param dict attrs: (optional) filter by one or more key-value attributes :param start: the start time for the data points :type start: string or Datetime :param end: the end time for the data points :type end: string or Datetime :param string rollup: (optional) the name of a rollup function to use :param string period: (optional) downsampling rate for the data :param string interpolationf: (optional) an interpolation function to run over the series :param string interpolation_period: (optional) the period to interpolate data into :param string tz: (optional) the timezone to place the data into :rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an iterator over :class:`tempodb.protocol.objects.DataPoint` objects
def create_dbinstance_read_replica(self, id, source_id, instance_class=None, port=3306, availability_zone=None, auto_minor_version_upgrade=None): """ Create a new DBInstance Read Replica. :type id: str :param id: Unique identifier for the new instance. Must contain 1-63 alphanumeric characters. First character must be a letter. May not end with a hyphen or contain two consecutive hyphens :type source_id: str :param source_id: Unique identifier for the DB Instance for which this DB Instance will act as a Read Replica. :type instance_class: str :param instance_class: The compute and memory capacity of the DBInstance. Default is to inherit from the source DB Instance. Valid values are: * db.m1.small * db.m1.large * db.m1.xlarge * db.m2.xlarge * db.m2.2xlarge * db.m2.4xlarge :type port: int :param port: Port number on which database accepts connections. Default is to inherit from source DB Instance. Valid values [1115-65535]. Defaults to 3306. :type availability_zone: str :param availability_zone: Name of the availability zone to place DBInstance into. :type auto_minor_version_upgrade: bool :param auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window. Default is to inherit this value from the source DB Instance. :rtype: :class:`boto.rds.dbinstance.DBInstance` :return: The new db instance. """ params = {'DBInstanceIdentifier' : id, 'SourceDBInstanceIdentifier' : source_id} if instance_class: params['DBInstanceClass'] = instance_class if port: params['Port'] = port if availability_zone: params['AvailabilityZone'] = availability_zone if auto_minor_version_upgrade is not None: if auto_minor_version_upgrade is True: params['AutoMinorVersionUpgrade'] = 'true' else: params['AutoMinorVersionUpgrade'] = 'false' return self.get_object('CreateDBInstanceReadReplica', params, DBInstance)
Create a new DBInstance Read Replica. :type id: str :param id: Unique identifier for the new instance. Must contain 1-63 alphanumeric characters. First character must be a letter. May not end with a hyphen or contain two consecutive hyphens :type source_id: str :param source_id: Unique identifier for the DB Instance for which this DB Instance will act as a Read Replica. :type instance_class: str :param instance_class: The compute and memory capacity of the DBInstance. Default is to inherit from the source DB Instance. Valid values are: * db.m1.small * db.m1.large * db.m1.xlarge * db.m2.xlarge * db.m2.2xlarge * db.m2.4xlarge :type port: int :param port: Port number on which database accepts connections. Default is to inherit from source DB Instance. Valid values [1115-65535]. Defaults to 3306. :type availability_zone: str :param availability_zone: Name of the availability zone to place DBInstance into. :type auto_minor_version_upgrade: bool :param auto_minor_version_upgrade: Indicates that minor engine upgrades will be applied automatically to the Read Replica during the maintenance window. Default is to inherit this value from the source DB Instance. :rtype: :class:`boto.rds.dbinstance.DBInstance` :return: The new db instance.
def del_role(self, role): """ deletes a group """ target = AuthGroup.objects(role=role, creator=self.client).first() if target: target.delete() return True else: return False
deletes a group
def propagate_averages(self, n, tv, bv, var, outgroup=False): """ This function implements the propagation of the means, variance, and covariances along a branch. It operates both towards the root and tips. Parameters ---------- n : (node) the branch connecting this node to its parent is used for propagation tv : (float) tip value. Only required if not is terminal bl : (float) branch value. The increment of the tree associated quantity' var : (float) the variance increment along the branch Returns ------- Q : (np.array) a vector of length 6 containing the updated quantities """ if n.is_terminal() and outgroup==False: if tv is None or np.isinf(tv) or np.isnan(tv): res = np.array([0, 0, 0, 0, 0, 0]) elif var==0: res = np.array([np.inf, np.inf, np.inf, np.inf, np.inf, np.inf]) else: res = np.array([ tv/var, bv/var, tv**2/var, bv*tv/var, bv**2/var, 1.0/var], dtype=float) else: tmpQ = n.O if outgroup else n.Q denom = 1.0/(1+var*tmpQ[sii]) res = np.array([ tmpQ[tavgii]*denom, (tmpQ[davgii] + bv*tmpQ[sii])*denom, tmpQ[tsqii] - var*tmpQ[tavgii]**2*denom, tmpQ[dtavgii] + tmpQ[tavgii]*bv - var*tmpQ[tavgii]*(tmpQ[davgii] + bv*tmpQ[sii])*denom, tmpQ[dsqii] + 2*bv*tmpQ[davgii] + bv**2*tmpQ[sii] - var*(tmpQ[davgii]**2 + 2*bv*tmpQ[davgii]*tmpQ[sii] + bv**2*tmpQ[sii]**2)*denom, tmpQ[sii]*denom] ) return res
This function implements the propagation of the means, variance, and covariances along a branch. It operates both towards the root and tips. Parameters ---------- n : (node) the branch connecting this node to its parent is used for propagation tv : (float) tip value. Only required if not is terminal bl : (float) branch value. The increment of the tree associated quantity' var : (float) the variance increment along the branch Returns ------- Q : (np.array) a vector of length 6 containing the updated quantities
def bits_to_bytes(bits): """Convert the bit list into bytes. (Assumes bits is a list whose length is a multiple of 8) """ if len(bits) % 8 != 0: raise Exception("num bits must be multiple of 8") res = "" for x in six.moves.range(0, len(bits), 8): byte_bits = bits[x:x+8] byte_val = int(''.join(map(str, byte_bits)), 2) res += chr(byte_val) return utils.binary(res)
Convert the bit list into bytes. (Assumes bits is a list whose length is a multiple of 8)
def hook_inform(self, inform_name, callback): """Hookup a function to be called when an inform is received. Useful for interface-changed and sensor-status informs. Parameters ---------- inform_name : str The name of the inform. callback : function The function to be called. """ # Do not hook the same callback multiple times if callback not in self._inform_hooks[inform_name]: self._inform_hooks[inform_name].append(callback)
Hookup a function to be called when an inform is received. Useful for interface-changed and sensor-status informs. Parameters ---------- inform_name : str The name of the inform. callback : function The function to be called.
def _call(self, method, **kwargs): """ Вызов метода API """ kwargs.update(dict( partnerid=self.partner_id, partnerkey=self.partner_key, method=method, )) url = options.ENDPOINT_URL + '?' + parse.urlencode(kwargs) try: raw_json = request.urlopen(url, timeout=self.timeout).read().decode('utf-8') res = json.loads(raw_json) if 'code' in res: if res['code'] == 4: raise InvalidCredentialsException(res['msg']) except (HTTPError, URLError, timeout) as e: # ошибка запроса raise APIRequestException(e.__class__.__name__, e) except (ValueError, ) as e: # ошибка в данных raise APIDataException(e.__class__.__name__, e) return res, raw_json
Вызов метода API
def get_root_directory(self, timestamp=None): """ A helper method that supplies the root directory name given a timestamp. """ if timestamp is None: timestamp = self.timestamp if self.timestamp_format is not None: root_name = (time.strftime(self.timestamp_format, timestamp) + '-' + self.batch_name) else: root_name = self.batch_name path = os.path.join(self.output_directory, *(self.subdir+[root_name])) return os.path.abspath(path)
A helper method that supplies the root directory name given a timestamp.
def default(self): """Return default contents""" import json import ambry.bundle.default_files as df import os path = os.path.join(os.path.dirname(df.__file__), 'notebook.ipynb') if six.PY2: with open(path, 'rb') as f: content_str = f.read() else: # py3 with open(path, 'rt', encoding='utf-8') as f: content_str = f.read() c = json.loads(content_str) context = { 'title': self._bundle.metadata.about.title, 'summary': self._bundle.metadata.about.title, 'bundle_vname': self._bundle.identity.vname } for cell in c['cells']: for i in range(len(cell['source'])): cell['source'][i] = cell['source'][i].format(**context) c['metadata']['ambry'] = { 'identity': self._bundle.identity.dict } return json.dumps(c, indent=4)
Return default contents
def getUpdatedFields(self, cascadeObjects=False): ''' getUpdatedFields - See changed fields. @param cascadeObjects <bool> default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively). Otherwise, will just check if the pk has changed. @return - a dictionary of fieldName : tuple(old, new). fieldName may be a string or may implement IRField (which implements string, and can be used just like a string) ''' updatedFields = {} for thisField in self.FIELDS: thisVal = object.__getattribute__(self, thisField) if self._origData.get(thisField, '') != thisVal: updatedFields[thisField] = (self._origData[thisField], thisVal) if cascadeObjects is True and issubclass(thisField.__class__, IRForeignLinkFieldBase) and thisVal.objHasUnsavedChanges(): updatedFields[thisField] = (self._origData[thisField], thisVal) return updatedFields
getUpdatedFields - See changed fields. @param cascadeObjects <bool> default False, if True will check if any foreign linked objects themselves have unsaved changes (recursively). Otherwise, will just check if the pk has changed. @return - a dictionary of fieldName : tuple(old, new). fieldName may be a string or may implement IRField (which implements string, and can be used just like a string)
def _add_stats_box(h1: Histogram1D, ax: Axes, stats: Union[str, bool] = "all"): """Insert a small legend-like box with statistical information. Parameters ---------- stats : "all" | "total" | True What info to display Note ---- Very basic implementation. """ # place a text box in upper left in axes coords if stats in ["all", True]: text = "Total: {0}\nMean: {1:.2f}\nStd.dev: {2:.2f}".format( h1.total, h1.mean(), h1.std()) elif stats == "total": text = "Total: {0}".format(h1.total) else: raise ValueError("Invalid stats specification") ax.text(0.05, 0.95, text, transform=ax.transAxes, verticalalignment='top', horizontalalignment='left')
Insert a small legend-like box with statistical information. Parameters ---------- stats : "all" | "total" | True What info to display Note ---- Very basic implementation.
def extract(self, node, condition, skip=0): """ Extract a single node that matches the provided condition, otherwise a TypeError is raised. An optional skip parameter can be provided to specify how many matching nodes are to be skipped over. """ for child in self.filter(node, condition): if not skip: return child skip -= 1 raise TypeError('no match found')
Extract a single node that matches the provided condition, otherwise a TypeError is raised. An optional skip parameter can be provided to specify how many matching nodes are to be skipped over.
def _user_mdata(mdata_list=None, mdata_get=None): ''' User Metadata ''' grains = {} if not mdata_list: mdata_list = salt.utils.path.which('mdata-list') if not mdata_get: mdata_get = salt.utils.path.which('mdata-get') if not mdata_list or not mdata_get: return grains for mdata_grain in __salt__['cmd.run'](mdata_list, ignore_retcode=True).splitlines(): mdata_value = __salt__['cmd.run']('{0} {1}'.format(mdata_get, mdata_grain), ignore_retcode=True) if not mdata_grain.startswith('sdc:'): if 'mdata' not in grains: grains['mdata'] = {} log.debug('found mdata entry %s with value %s', mdata_grain, mdata_value) mdata_grain = mdata_grain.replace('-', '_') mdata_grain = mdata_grain.replace(':', '_') grains['mdata'][mdata_grain] = mdata_value return grains
User Metadata
def get(self, tag, nth=1): """Return n-th value for tag. :param tag: FIX field tag number. :param nth: Index of tag if repeating, first is 1. :return: None if nothing found, otherwise value matching tag. Defaults to returning the first matching value of 'tag', but if the 'nth' parameter is overridden, can get repeated fields.""" tag = fix_tag(tag) nth = int(nth) for t, v in self.pairs: if t == tag: nth -= 1 if nth == 0: return v return None
Return n-th value for tag. :param tag: FIX field tag number. :param nth: Index of tag if repeating, first is 1. :return: None if nothing found, otherwise value matching tag. Defaults to returning the first matching value of 'tag', but if the 'nth' parameter is overridden, can get repeated fields.
def retry_with_delay(f, delay=60): """ Retry the wrapped requests.request function in case of ConnectionError. Optionally limit the number of retries or set the delay between retries. """ @wraps(f) def inner(*args, **kwargs): kwargs['timeout'] = 5 remaining = get_retries() + 1 while remaining: remaining -= 1 try: return f(*args, **kwargs) except (requests.ConnectionError, requests.Timeout): if not remaining: raise gevent.sleep(delay) return inner
Retry the wrapped requests.request function in case of ConnectionError. Optionally limit the number of retries or set the delay between retries.
def institutes(): """Display a list of all user institutes.""" institute_objs = user_institutes(store, current_user) institutes = [] for ins_obj in institute_objs: sanger_recipients = [] for user_mail in ins_obj.get('sanger_recipients',[]): user_obj = store.user(user_mail) if not user_obj: continue sanger_recipients.append(user_obj['name']) institutes.append( { 'display_name': ins_obj['display_name'], 'internal_id': ins_obj['_id'], 'coverage_cutoff': ins_obj.get('coverage_cutoff', 'None'), 'sanger_recipients': sanger_recipients, 'frequency_cutoff': ins_obj.get('frequency_cutoff', 'None'), 'phenotype_groups': ins_obj.get('phenotype_groups', PHENOTYPE_GROUPS) } ) data = dict(institutes=institutes) return render_template( 'overview/institutes.html', **data)
Display a list of all user institutes.
def legacy(**kwargs): """ Compute options for using the PHOEBE 1.0 legacy backend (must be installed). Generally, this will be used as an input to the kind argument in :meth:`phoebe.frontend.bundle.Bundle.add_compute` Please see :func:`phoebe.backend.backends.legacy` for a list of sources to cite when using this backend. :parameter **kwargs: defaults for the values of any of the parameters :return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly created :class:`phoebe.parameters.parameters.Parameter`s """ params = [] params += [BoolParameter(qualifier='enabled', copy_for={'context': 'dataset', 'kind': ['lc', 'rv', 'mesh'], 'dataset': '*'}, dataset='_default', value=kwargs.get('enabled', True), description='Whether to create synthetics in compute/fitting run')] # TODO: the kwargs need to match the qualifier names! # TODO: include MORE meshing options params += [ChoiceParameter(copy_for = {'kind': ['star'], 'component': '*'}, component='_default', qualifier='atm', value=kwargs.get('atm', 'extern_atmx'), choices=['extern_atmx', 'extern_planckint'], description='Atmosphere table')] # params += [ChoiceParameter(copy_for = {'kind': ['star'], 'component': '*'}, component='_default', qualifier='atm', value=kwargs.get('atm', 'kurucz'), choices=['kurucz', 'blackbody'], description='Atmosphere table')] # params += [ChoiceParameter(qualifier='morphology', value=kwargs.get('morphology','Detached binary'), choices=['Unconstrained binary system', 'Detached binary', 'Overcontact binary of the W UMa type', 'Overcontact binary not in thermal contact'], description='System type constraint')] # params += [BoolParameter(qualifier='cindex', value=kwargs.get('cindex', False), description='Color index constraint')] # params += [IntParameter(visible_if='cindex_switch:True', qualifier='cindex', value=kwargs.get('cindex', np.array([1.0])), description='Number of reflections')] # params += [BoolParameter(qualifier='heating', value=kwargs.get('heating', True), description='Allow irradiators to heat other components')] params += [IntParameter(copy_for={'kind': ['star'], 'component': '*'}, component='_default', qualifier='gridsize', value=kwargs.get('gridsize', 60), limits=(10,None), description='Number of meshpoints for WD')] params += [ChoiceParameter(qualifier='irrad_method', value=kwargs.get('irrad_method', 'wilson'), choices=['none', 'wilson'], description='Which method to use to handle irradiation/reflection effects')] params += [IntParameter(visible_if='irrad_method:wilson', qualifier='refl_num', value=kwargs.get('refl_num', 1), limits=(0,None), description='Number of reflections')] # params += [BoolParameter(qualifier='msc1', value=kwargs.get('msc1', False), description='Mainsequence Constraint for star 1')] # params += [BoolParameter(qualifier='msc2', value=kwargs.get('msc2', False), description='Mainsequence Constraint for star 2')] # TODO: can we come up with a better qualifier for reddening (and be consistent when we enable in phoebe2) params += [BoolParameter(qualifier='ie', value=kwargs.get('ie', False), description='Should data be de-reddened')] # TODO: can we change this to rv_method = ['flux_weighted', 'dynamical'] to be consistent with phoebe2? # TODO: can proximity_rv (rv_method) be copied for each dataset (see how this is done for phoebe2)? This would probably mean that the wrapper would need to loop and make separate calls since PHOEBE1 can't handle different settings per-RV dataset params += [ChoiceParameter(qualifier='rv_method', copy_for = {'kind': ['rv'], 'component': '*', 'dataset': '*'}, component='_default', dataset='_default', value=kwargs.get('rv_method', 'flux-weighted'), choices=['flux-weighted', 'dynamical'], description='Method to use for computing RVs (must be flux-weighted for Rossiter-McLaughlin)')] return ParameterSet(params)
Compute options for using the PHOEBE 1.0 legacy backend (must be installed). Generally, this will be used as an input to the kind argument in :meth:`phoebe.frontend.bundle.Bundle.add_compute` Please see :func:`phoebe.backend.backends.legacy` for a list of sources to cite when using this backend. :parameter **kwargs: defaults for the values of any of the parameters :return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly created :class:`phoebe.parameters.parameters.Parameter`s
def read(fname, merge_duplicate_shots=False, encoding='windows-1252'): """Read a PocketTopo .TXT file and produce a `TxtFile` object which represents it""" return PocketTopoTxtParser(fname, merge_duplicate_shots, encoding).parse()
Read a PocketTopo .TXT file and produce a `TxtFile` object which represents it
def __extend_uri(prefixes, short): """ Extend a prefixed uri with the help of a specific dictionary of prefixes :param prefixes: Dictionary of prefixes :param short: Prefixed uri to be extended :return: """ for prefix in prefixes: if short.startswith(prefix): return short.replace(prefix + ':', prefixes[prefix]) return short
Extend a prefixed uri with the help of a specific dictionary of prefixes :param prefixes: Dictionary of prefixes :param short: Prefixed uri to be extended :return:
def as_tuple(self): """ :return: Rows of the table. :rtype: list of |namedtuple| :Sample Code: .. code:: python from tabledata import TableData records = TableData( "sample", ["a", "b"], [[1, 2], [3.3, 4.4]] ).as_tuple() for record in records: print(record) :Output: .. code-block:: none Row(a=1, b=2) Row(a=Decimal('3.3'), b=Decimal('4.4')) """ Row = namedtuple("Row", self.headers) for value_dp_list in self.value_dp_matrix: if typepy.is_empty_sequence(value_dp_list): continue row = Row(*[value_dp.data for value_dp in value_dp_list]) yield row
:return: Rows of the table. :rtype: list of |namedtuple| :Sample Code: .. code:: python from tabledata import TableData records = TableData( "sample", ["a", "b"], [[1, 2], [3.3, 4.4]] ).as_tuple() for record in records: print(record) :Output: .. code-block:: none Row(a=1, b=2) Row(a=Decimal('3.3'), b=Decimal('4.4'))
def get_weakref(func): """Get a weak reference to bound or unbound `func`. If `func` is unbound (i.e. has no __self__ attr) get a weakref.ref, otherwise get a wrapper that simulates weakref.ref. """ if func is None: raise ValueError if not hasattr(func, '__self__'): return weakref.ref(func) return WeakMethod(func)
Get a weak reference to bound or unbound `func`. If `func` is unbound (i.e. has no __self__ attr) get a weakref.ref, otherwise get a wrapper that simulates weakref.ref.
def export(self, metadata, **kwargs): """ Export metadata as YAML. This uses yaml.SafeDumper by default. """ kwargs.setdefault('Dumper', SafeDumper) kwargs.setdefault('default_flow_style', False) kwargs.setdefault('allow_unicode', True) metadata = yaml.dump(metadata, **kwargs).strip() return u(metadata)
Export metadata as YAML. This uses yaml.SafeDumper by default.
def start(self): """Starts the `PrawOAuth2Server` server. It will open the default web browser and it will take you to Reddit's authorization page, asking you to authorize your Reddit account(or account of the bot's) with your app(or bot script). Once authorized successfully, it will show `successful` message in web browser. """ global CODE url = self._get_auth_url() webbrowser.open(url) tornado.ioloop.IOLoop.current().start() self.code = CODE
Starts the `PrawOAuth2Server` server. It will open the default web browser and it will take you to Reddit's authorization page, asking you to authorize your Reddit account(or account of the bot's) with your app(or bot script). Once authorized successfully, it will show `successful` message in web browser.
def check_message(keywords, message): """Checks an exception for given keywords and raises a new ``ActionError`` with the desired message if the keywords are found. This allows selective control over API error messages. """ exc_type, exc_value, exc_traceback = sys.exc_info() if set(str(exc_value).split(" ")).issuperset(set(keywords)): exc_value.message = message raise
Checks an exception for given keywords and raises a new ``ActionError`` with the desired message if the keywords are found. This allows selective control over API error messages.
def init(scope): """ Copy all values of scope into the class SinonGlobals Args: scope (eg. locals() or globals()) Return: SinonGlobals instance """ class SinonGlobals(object): #pylint: disable=too-few-public-methods """ A fully empty class External can push the whole `scope` into this class through global function init() """ pass global CPSCOPE #pylint: disable=global-statement CPSCOPE = SinonGlobals() funcs = [obj for obj in scope.values() if isinstance(obj, FunctionType)] for func in funcs: setattr(CPSCOPE, func.__name__, func) return CPSCOPE
Copy all values of scope into the class SinonGlobals Args: scope (eg. locals() or globals()) Return: SinonGlobals instance
def post(cond): """ Add a postcondition check to the annotated method. The condition is passed the return value of the annotated method. """ source = inspect.getsource(cond).strip() def inner(f): if enabled: # deal with the real function, not a wrapper f = getattr(f, 'wrapped_fn', f) def check_condition(result): if not cond(result): raise AssertionError('Postcondition failure, %s' % source) # append to the rest of the postconditions attached to this method if not hasattr(f, 'postconditions'): f.postconditions = [] f.postconditions.append(check_condition) return check(f) else: return f return inner
Add a postcondition check to the annotated method. The condition is passed the return value of the annotated method.
def initialize_page(title, style, script, header=None): """ A function that returns a markup.py page object with the required html header. """ page = markup.page(mode="strict_html") page._escape = False page.init(title=title, css=style, script=script, header=header) return page
A function that returns a markup.py page object with the required html header.
def end_position(variant_obj): """Calculate end position for a variant.""" alt_bases = len(variant_obj['alternative']) num_bases = max(len(variant_obj['reference']), alt_bases) return variant_obj['position'] + (num_bases - 1)
Calculate end position for a variant.
def quantile(arg, quantile, interpolation='linear'): """ Return value at the given quantile, a la numpy.percentile. Parameters ---------- quantile : float/int or array-like 0 <= quantile <= 1, the quantile(s) to compute interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- quantile if scalar input, scalar type, same as input if array input, list of scalar type """ if isinstance(quantile, collections.abc.Sequence): op = ops.MultiQuantile(arg, quantile, interpolation) else: op = ops.Quantile(arg, quantile, interpolation) return op.to_expr()
Return value at the given quantile, a la numpy.percentile. Parameters ---------- quantile : float/int or array-like 0 <= quantile <= 1, the quantile(s) to compute interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use, when the desired quantile lies between two data points `i` and `j`: * linear: `i + (j - i) * fraction`, where `fraction` is the fractional part of the index surrounded by `i` and `j`. * lower: `i`. * higher: `j`. * nearest: `i` or `j` whichever is nearest. * midpoint: (`i` + `j`) / 2. Returns ------- quantile if scalar input, scalar type, same as input if array input, list of scalar type
def hash(pii_csv, keys, schema, clk_json, quiet, no_header, check_header, validate): """Process data to create CLKs Given a file containing CSV data as PII_CSV, and a JSON document defining the expected schema, verify the schema, then hash the data to create CLKs writing them as JSON to CLK_JSON. Note the CSV file should contain a header row - however this row is not used by this tool. It is important that the keys are only known by the two data providers. Two words should be provided. For example: $clkutil hash pii.csv horse staple pii-schema.json clk.json Use "-" for CLK_JSON to write JSON to stdout. """ schema_object = clkhash.schema.from_json_file(schema_file=schema) header = True if not check_header: header = 'ignore' if no_header: header = False try: clk_data = clk.generate_clk_from_csv( pii_csv, keys, schema_object, validate=validate, header=header, progress_bar=not quiet) except (validate_data.EntryError, validate_data.FormatError) as e: msg, = e.args log(msg) log('Hashing failed.') else: json.dump({'clks': clk_data}, clk_json) if hasattr(clk_json, 'name'): log("CLK data written to {}".format(clk_json.name))
Process data to create CLKs Given a file containing CSV data as PII_CSV, and a JSON document defining the expected schema, verify the schema, then hash the data to create CLKs writing them as JSON to CLK_JSON. Note the CSV file should contain a header row - however this row is not used by this tool. It is important that the keys are only known by the two data providers. Two words should be provided. For example: $clkutil hash pii.csv horse staple pii-schema.json clk.json Use "-" for CLK_JSON to write JSON to stdout.
def get_rich_menu(self, rich_menu_id, timeout=None): """Call get rich menu API. https://developers.line.me/en/docs/messaging-api/reference/#get-rich-menu :param str rich_menu_id: ID of the rich menu :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.RichMenuResponse` :return: RichMenuResponse instance """ response = self._get( '/v2/bot/richmenu/{rich_menu_id}'.format(rich_menu_id=rich_menu_id), timeout=timeout ) return RichMenuResponse.new_from_json_dict(response.json)
Call get rich menu API. https://developers.line.me/en/docs/messaging-api/reference/#get-rich-menu :param str rich_menu_id: ID of the rich menu :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.RichMenuResponse` :return: RichMenuResponse instance
def set_level(self, level): """ Sets :attr:loglevel to @level @level: #str one or several :attr:levels """ if not level: return None self.levelmap = set() for char in level: self.levelmap = self.levelmap.union(self.levels[char]) self.loglevel = level return self.loglevel
Sets :attr:loglevel to @level @level: #str one or several :attr:levels
def get_range_response(self, range_start, range_end, sort_order=None, sort_target='key', **kwargs): """Get a range of keys.""" range_request = self._build_get_range_request( key=range_start, range_end=range_end, sort_order=sort_order, sort_target=sort_target, **kwargs ) return self.kvstub.Range( range_request, self.timeout, credentials=self.call_credentials, metadata=self.metadata )
Get a range of keys.
def get_tensors_by_names(names): """ Get a list of tensors in the default graph by a list of names. Args: names (list): """ ret = [] G = tfv1.get_default_graph() for n in names: opn, varn = get_op_tensor_name(n) ret.append(G.get_tensor_by_name(varn)) return ret
Get a list of tensors in the default graph by a list of names. Args: names (list):
def get_catalog(mid): """Return catalog entry for the specified ID. `mid` should be either a UUID or a 32 digit hex number. """ if isinstance(mid, _uuid.UUID): mid = mid.hex return _get_catalog(mid)
Return catalog entry for the specified ID. `mid` should be either a UUID or a 32 digit hex number.
def get_file(self, name, save_to, add_to_cache=True, force_refresh=False, _lock_exclusive=False): """Retrieves file identified by ``name``. The file is saved as ``save_to``. If ``add_to_cache`` is ``True``, the file is added to the local store. If ``force_refresh`` is ``True``, local cache is not examined if a remote store is configured. If a remote store is configured, but ``name`` does not contain a version, the local data store is not used, as we cannot guarantee that the version there is fresh. Local data store implemented in :class:`LocalDataStore` tries to not copy the entire file to ``save_to`` if possible, but instead uses hardlinking. Therefore you should not modify the file if you don't want to totally blow something. This method returns the full versioned name of the retrieved file. """ uname, version = split_name(name) lock = None if self.local_store: lock = self.lock_manager.lock_for(uname) if _lock_exclusive: lock.lock_exclusive() else: lock.lock_shared() else: add_to_cache = False t = time.time() logger.debug(' downloading %s', name) try: if not self.remote_store or (version is not None and not force_refresh): try: if self.local_store and self.local_store.exists(name): return self.local_store.get_file(name, save_to) except Exception: if self.remote_store: logger.warning("Error getting '%s' from local store", name, exc_info=True) else: raise if self.remote_store: if not _lock_exclusive and add_to_cache: if lock: lock.unlock() return self.get_file(name, save_to, add_to_cache, _lock_exclusive=True) vname = self.remote_store.get_file(name, save_to) if add_to_cache: self._add_to_cache(vname, save_to) return vname raise FiletrackerError("File not available: %s" % name) finally: if lock: lock.close() logger.debug(' processed %s in %.2fs', name, time.time() - t)
Retrieves file identified by ``name``. The file is saved as ``save_to``. If ``add_to_cache`` is ``True``, the file is added to the local store. If ``force_refresh`` is ``True``, local cache is not examined if a remote store is configured. If a remote store is configured, but ``name`` does not contain a version, the local data store is not used, as we cannot guarantee that the version there is fresh. Local data store implemented in :class:`LocalDataStore` tries to not copy the entire file to ``save_to`` if possible, but instead uses hardlinking. Therefore you should not modify the file if you don't want to totally blow something. This method returns the full versioned name of the retrieved file.
def download_artist_by_search(self, artist_name): """Download a artist's top50 songs by his/her name. :params artist_name: artist name. """ try: artist = self.crawler.search_artist(artist_name, self.quiet) except RequestException as exception: click.echo(exception) else: self.download_artist_by_id(artist.artist_id, artist.artist_name)
Download a artist's top50 songs by his/her name. :params artist_name: artist name.
def _is_cache_dir_appropriate(cache_dir, cache_file): """ Determine if a directory is acceptable for building. A directory is suitable if any of the following are true: - it doesn't exist - it is empty - it contains an existing build cache """ if os.path.exists(cache_dir): files = os.listdir(cache_dir) if cache_file in files: return True return not bool(files) return True
Determine if a directory is acceptable for building. A directory is suitable if any of the following are true: - it doesn't exist - it is empty - it contains an existing build cache
def build_from_entities(self, tax_benefit_system, input_dict): """ Build a simulation from a Python dict ``input_dict`` fully specifying entities. Examples: >>> simulation_builder.build_from_entities({ 'persons': {'Javier': { 'salary': {'2018-11': 2000}}}, 'households': {'household': {'parents': ['Javier']}} }) """ input_dict = deepcopy(input_dict) simulation = Simulation(tax_benefit_system, tax_benefit_system.instantiate_entities()) # Register variables so get_variable_entity can find them for (variable_name, _variable) in tax_benefit_system.variables.items(): self.register_variable(variable_name, simulation.get_variable_population(variable_name).entity) check_type(input_dict, dict, ['error']) axes = input_dict.pop('axes', None) unexpected_entities = [entity for entity in input_dict if entity not in tax_benefit_system.entities_plural()] if unexpected_entities: unexpected_entity = unexpected_entities[0] raise SituationParsingError([unexpected_entity], ''.join([ "Some entities in the situation are not defined in the loaded tax and benefit system.", "These entities are not found: {0}.", "The defined entities are: {1}."] ) .format( ', '.join(unexpected_entities), ', '.join(tax_benefit_system.entities_plural()) ) ) persons_json = input_dict.get(tax_benefit_system.person_entity.plural, None) if not persons_json: raise SituationParsingError([tax_benefit_system.person_entity.plural], 'No {0} found. At least one {0} must be defined to run a simulation.'.format(tax_benefit_system.person_entity.key)) persons_ids = self.add_person_entity(simulation.persons.entity, persons_json) for entity_class in tax_benefit_system.group_entities: instances_json = input_dict.get(entity_class.plural) if instances_json is not None: self.add_group_entity(self.persons_plural, persons_ids, entity_class, instances_json) else: self.add_default_group_entity(persons_ids, entity_class) if axes: self.axes = axes self.expand_axes() try: self.finalize_variables_init(simulation.persons) except PeriodMismatchError as e: self.raise_period_mismatch(simulation.persons.entity, persons_json, e) for entity_class in tax_benefit_system.group_entities: try: population = simulation.populations[entity_class.key] self.finalize_variables_init(population) except PeriodMismatchError as e: self.raise_period_mismatch(population.entity, instances_json, e) return simulation
Build a simulation from a Python dict ``input_dict`` fully specifying entities. Examples: >>> simulation_builder.build_from_entities({ 'persons': {'Javier': { 'salary': {'2018-11': 2000}}}, 'households': {'household': {'parents': ['Javier']}} })