code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def offset(img, offset, fill_value=0): """ Moves the contents of image without changing the image size. The missing values are given a specified fill value. Parameters ---------- img : array Image. offset : (vertical_offset, horizontal_offset) Tuple of length 2, specifying the offset along the two axes. fill_value : dtype of img Fill value. Defaults to 0. """ sh = img.shape if sh == (0, 0): return img else: x = np.empty(sh) x[:] = fill_value x[max(offset[0], 0):min(sh[0]+offset[0], sh[0]), max(offset[1], 0):min(sh[1]+offset[1], sh[1])] = \ img[max(-offset[0], 0):min(sh[0]-offset[0], sh[0]), max(-offset[1], 0):min(sh[1]-offset[1], sh[1])] return x
Moves the contents of image without changing the image size. The missing values are given a specified fill value. Parameters ---------- img : array Image. offset : (vertical_offset, horizontal_offset) Tuple of length 2, specifying the offset along the two axes. fill_value : dtype of img Fill value. Defaults to 0.
def CheckHost(host_data, os_name=None, cpe=None, labels=None, exclude_checks=None, restrict_checks=None): """Perform all checks on a host using acquired artifacts. Checks are selected based on the artifacts available and the host attributes (e.g. os_name/cpe/labels) provided as either parameters, or in the knowledgebase artifact. A KnowledgeBase artifact should be provided that contains, at a minimum: - OS - Hostname or IP Other knowldegebase attributes may be required for specific checks. CPE is currently unused, pending addition of a CPE module in the GRR client. Labels are arbitrary string labels attached to a client. Args: host_data: A dictionary with artifact names as keys, and rdf data as values. os_name: An OS name (optional). cpe: A CPE string (optional). labels: An iterable of labels (optional). exclude_checks: A list of check ids not to run. A check id in this list will not get run even if included in restrict_checks. restrict_checks: A list of check ids that may be run, if appropriate. Returns: A CheckResults object that contains results for all checks that were performed on the host. """ # Get knowledgebase, os_name from hostdata kb = host_data.get("KnowledgeBase") if os_name is None: os_name = kb.os if cpe is None: # TODO(user): Get CPE (requires new artifact/parser) pass if labels is None: # TODO(user): Get labels (see grr/lib/export.py for acquisition # from client) pass return CheckRegistry.Process( host_data, os_name=os_name, cpe=cpe, labels=labels, restrict_checks=restrict_checks, exclude_checks=exclude_checks)
Perform all checks on a host using acquired artifacts. Checks are selected based on the artifacts available and the host attributes (e.g. os_name/cpe/labels) provided as either parameters, or in the knowledgebase artifact. A KnowledgeBase artifact should be provided that contains, at a minimum: - OS - Hostname or IP Other knowldegebase attributes may be required for specific checks. CPE is currently unused, pending addition of a CPE module in the GRR client. Labels are arbitrary string labels attached to a client. Args: host_data: A dictionary with artifact names as keys, and rdf data as values. os_name: An OS name (optional). cpe: A CPE string (optional). labels: An iterable of labels (optional). exclude_checks: A list of check ids not to run. A check id in this list will not get run even if included in restrict_checks. restrict_checks: A list of check ids that may be run, if appropriate. Returns: A CheckResults object that contains results for all checks that were performed on the host.
def get_platform_node_selector(self, platform): """ search the configuration for entries of the form node_selector.platform :param platform: str, platform to search for, can be null :return dict """ nodeselector = {} if platform: nodeselector_str = self._get_value("node_selector." + platform, self.conf_section, "node_selector." + platform) nodeselector = self.generate_nodeselector_dict(nodeselector_str) return nodeselector
search the configuration for entries of the form node_selector.platform :param platform: str, platform to search for, can be null :return dict
def host_to_ips(host): ''' Returns a list of IP addresses of a given hostname or None if not found. ''' ips = [] try: for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo( host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM): if family == socket.AF_INET: ip, port = sockaddr elif family == socket.AF_INET6: ip, port, flow_info, scope_id = sockaddr ips.append(ip) if not ips: ips = None except Exception: ips = None return ips
Returns a list of IP addresses of a given hostname or None if not found.
def get_idxs(data, eid2idx): """ Convert from event IDs to event indices. :param data: an array with a field eid :param eid2idx: a dictionary eid -> idx :returns: the array of event indices """ uniq, inv = numpy.unique(data['eid'], return_inverse=True) idxs = numpy.array([eid2idx[eid] for eid in uniq])[inv] return idxs
Convert from event IDs to event indices. :param data: an array with a field eid :param eid2idx: a dictionary eid -> idx :returns: the array of event indices
def init_config(self, config): ''' Configures this extension with a given configuration dictionary. This allows use of this extension without a flask app. Args: config (dict): A dictionary with configuration keys ''' self.config.update(config) self.config.setdefault('LDAP_PORT', 389) self.config.setdefault('LDAP_HOST', None) self.config.setdefault('LDAP_USE_SSL', False) self.config.setdefault('LDAP_READONLY', True) self.config.setdefault('LDAP_CHECK_NAMES', True) self.config.setdefault('LDAP_BIND_DIRECT_CREDENTIALS', False) self.config.setdefault('LDAP_BIND_DIRECT_PREFIX', '') self.config.setdefault('LDAP_BIND_DIRECT_SUFFIX', '') self.config.setdefault('LDAP_BIND_DIRECT_GET_USER_INFO', True) self.config.setdefault('LDAP_ALWAYS_SEARCH_BIND', False) self.config.setdefault('LDAP_BASE_DN', '') self.config.setdefault('LDAP_BIND_USER_DN', None) self.config.setdefault('LDAP_BIND_USER_PASSWORD', None) self.config.setdefault('LDAP_SEARCH_FOR_GROUPS', True) self.config.setdefault('LDAP_FAIL_AUTH_ON_MULTIPLE_FOUND', False) # Prepended to the Base DN to limit scope when searching for # Users/Groups. self.config.setdefault('LDAP_USER_DN', '') self.config.setdefault('LDAP_GROUP_DN', '') self.config.setdefault('LDAP_BIND_AUTHENTICATION_TYPE', 'SIMPLE') # Ldap Filters self.config.setdefault('LDAP_USER_SEARCH_SCOPE', 'LEVEL') self.config.setdefault('LDAP_USER_OBJECT_FILTER', '(objectclass=person)') self.config.setdefault('LDAP_USER_LOGIN_ATTR', 'uid') self.config.setdefault('LDAP_USER_RDN_ATTR', 'uid') self.config.setdefault( 'LDAP_GET_USER_ATTRIBUTES', ldap3.ALL_ATTRIBUTES) self.config.setdefault('LDAP_GROUP_SEARCH_SCOPE', 'LEVEL') self.config.setdefault( 'LDAP_GROUP_OBJECT_FILTER', '(objectclass=group)') self.config.setdefault('LDAP_GROUP_MEMBERS_ATTR', 'uniqueMember') self.config.setdefault( 'LDAP_GET_GROUP_ATTRIBUTES', ldap3.ALL_ATTRIBUTES) self.config.setdefault('LDAP_ADD_SERVER', True) if self.config['LDAP_ADD_SERVER']: self.add_server( hostname=self.config['LDAP_HOST'], port=self.config['LDAP_PORT'], use_ssl=self.config['LDAP_USE_SSL'] )
Configures this extension with a given configuration dictionary. This allows use of this extension without a flask app. Args: config (dict): A dictionary with configuration keys
def search(cls, query, search_opts=None): """ Search tags. For more information, see the backend function :py:func:`nipap.backend.Nipap.search_tag`. """ if search_opts is None: search_opts = {} xmlrpc = XMLRPCConnection() try: search_result = xmlrpc.connection.search_tag( { 'query': query, 'search_options': search_opts, 'auth': AuthOptions().options }) except xmlrpclib.Fault as xml_fault: raise _fault_to_exception(xml_fault) result = dict() result['result'] = [] result['search_options'] = search_result['search_options'] for xml_tag in search_result['result']: result['result'].append(Tag.from_dict(xml_tag)) return result
Search tags. For more information, see the backend function :py:func:`nipap.backend.Nipap.search_tag`.
def delete(self): """Removes .sqlite file. **CAREFUL** needless say""" self._ensure_filename() self._close_if_open() os.remove(self.filename)
Removes .sqlite file. **CAREFUL** needless say
def units2pint(value): """Return the pint Unit for the DataArray units. Parameters ---------- value : xr.DataArray or string Input data array or expression. Returns ------- pint.Unit Units of the data array. """ def _transform(s): """Convert a CF-unit string to a pint expression.""" return re.subn(r'\^?(-?\d)', r'**\g<1>', s)[0] if isinstance(value, str): unit = value elif isinstance(value, xr.DataArray): unit = value.attrs['units'] elif isinstance(value, units.Quantity): return value.units else: raise NotImplementedError("Value of type {} not supported.".format(type(value))) try: # Pint compatible return units.parse_expression(unit).units except (pint.UndefinedUnitError, pint.DimensionalityError): # Convert from CF-units to pint-compatible return units.parse_expression(_transform(unit)).units
Return the pint Unit for the DataArray units. Parameters ---------- value : xr.DataArray or string Input data array or expression. Returns ------- pint.Unit Units of the data array.
def delete_endpoint_config(self, endpoint_config_name): """Delete an Amazon SageMaker endpoint configuration. Args: endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to delete. """ LOGGER.info('Deleting endpoint configuration with name: {}'.format(endpoint_config_name)) self.sagemaker_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name)
Delete an Amazon SageMaker endpoint configuration. Args: endpoint_config_name (str): Name of the Amazon SageMaker endpoint configuration to delete.
def _ns_var( py_ns_var: str = _NS_VAR, lisp_ns_var: str = LISP_NS_VAR, lisp_ns_ns: str = CORE_NS ) -> ast.Assign: """Assign a Python variable named `ns_var` to the value of the current namespace.""" return ast.Assign( targets=[ast.Name(id=py_ns_var, ctx=ast.Store())], value=ast.Call( func=_FIND_VAR_FN_NAME, args=[ ast.Call( func=_NEW_SYM_FN_NAME, args=[ast.Str(lisp_ns_var)], keywords=[ast.keyword(arg="ns", value=ast.Str(lisp_ns_ns))], ) ], keywords=[], ), )
Assign a Python variable named `ns_var` to the value of the current namespace.
def find_le(self, dt): '''Find the index corresponding to the rightmost value less than or equal to *dt*. If *dt* is less than :func:`dynts.TimeSeries.end` a :class:`dynts.exceptions.LeftOutOfBound` exception will raise. *dt* must be a python datetime.date instance.''' i = bisect_right(self.dates, dt) if i: return i-1 raise LeftOutOfBound
Find the index corresponding to the rightmost value less than or equal to *dt*. If *dt* is less than :func:`dynts.TimeSeries.end` a :class:`dynts.exceptions.LeftOutOfBound` exception will raise. *dt* must be a python datetime.date instance.
def _api_all(self): """Glances API RESTful implementation. Return the JSON representation of all the plugins HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error """ response.content_type = 'application/json; charset=utf-8' if self.args.debug: fname = os.path.join(tempfile.gettempdir(), 'glances-debug.json') try: with open(fname) as f: return f.read() except IOError: logger.debug("Debug file (%s) not found" % fname) # Update the stat self.__update__() try: # Get the JSON value of the stat ID statval = json.dumps(self.stats.getAllAsDict()) except Exception as e: abort(404, "Cannot get stats (%s)" % str(e)) return statval
Glances API RESTful implementation. Return the JSON representation of all the plugins HTTP/200 if OK HTTP/400 if plugin is not found HTTP/404 if others error
def get_current_context_id(): """Identifis which context it is (greenlet, stackless, or thread). :returns: the identifier of the current context. """ global get_current_context_id if greenlet is not None: if stackless is None: get_current_context_id = greenlet.getcurrent return greenlet.getcurrent() return greenlet.getcurrent(), stackless.getcurrent() elif stackless is not None: get_current_context_id = stackless.getcurrent return stackless.getcurrent() get_current_context_id = _thread.get_ident return _thread.get_ident()
Identifis which context it is (greenlet, stackless, or thread). :returns: the identifier of the current context.
def children(self): """Retrieve tags associated to the current node""" tags = {'*'} if self.tag: network_tags = {self.tag: self.campaign.network.tags[self.tag]} else: network_tags = self.campaign.network.tags for tag, configs in network_tags.items(): for config in configs: for mode, kconfig in config.items(): if mode == 'match': if kconfig.match(self.name) or kconfig.match(LOCALHOST): tags.add(tag) break elif mode == 'nodes': if self.name in kconfig or LOCALHOST in kconfig: tags.add(tag) break elif mode == 'constraint': tags.add(tag) break if tag in tags: break return tags
Retrieve tags associated to the current node
def blocking(self): """ Display queries holding locks other queries are waiting to be released. Record( pid=40821, source='', running_for=datetime.timedelta(0, 0, 2857), waiting=False, query='SELECT pg_sleep(10);' ) :returns: list of Records """ return self.execute( sql.BLOCKING.format( query_column=self.query_column, pid_column=self.pid_column ) )
Display queries holding locks other queries are waiting to be released. Record( pid=40821, source='', running_for=datetime.timedelta(0, 0, 2857), waiting=False, query='SELECT pg_sleep(10);' ) :returns: list of Records
def delete_database(self, instance_id, database_id, project_id=None): """ Drops a database in Cloud Spanner. :type project_id: str :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param database_id: The ID of the database in Cloud Spanner. :type database_id: str :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :return: True if everything succeeded :rtype: bool """ instance = self._get_client(project_id=project_id).\ instance(instance_id=instance_id) if not instance.exists(): raise AirflowException("The instance {} does not exist in project {} !". format(instance_id, project_id)) database = instance.database(database_id=database_id) if not database.exists(): self.log.info("The database {} is already deleted from instance {}. " "Exiting.".format(database_id, instance_id)) return try: operation = database.drop() # type: Operation except GoogleAPICallError as e: self.log.error('An error occurred: %s. Exiting.', e.message) raise e if operation: result = operation.result() self.log.info(result) return
Drops a database in Cloud Spanner. :type project_id: str :param instance_id: The ID of the Cloud Spanner instance. :type instance_id: str :param database_id: The ID of the database in Cloud Spanner. :type database_id: str :param project_id: Optional, the ID of the GCP project that owns the Cloud Spanner database. If set to None or missing, the default project_id from the GCP connection is used. :return: True if everything succeeded :rtype: bool
def from_api_repr(cls, api_repr): """Factory: create a FieldPath from the string formatted per the API. Args: api_repr (str): a string path, with non-identifier elements quoted It cannot exceed 1500 characters, and cannot be empty. Returns: (:class:`FieldPath`) An instance parsed from ``api_repr``. Raises: ValueError if the parsing fails """ api_repr = api_repr.strip() if not api_repr: raise ValueError("Field path API representation cannot be empty.") return cls(*parse_field_path(api_repr))
Factory: create a FieldPath from the string formatted per the API. Args: api_repr (str): a string path, with non-identifier elements quoted It cannot exceed 1500 characters, and cannot be empty. Returns: (:class:`FieldPath`) An instance parsed from ``api_repr``. Raises: ValueError if the parsing fails
def commit_new_version(version: str): """ Commits the file containing the version number variable with the version number as the commit message. :param version: The version number to be used in the commit message. """ check_repo() commit_message = config.get('semantic_release', 'commit_message') message = '{0}\n\n{1}'.format(version, commit_message) repo.git.add(config.get('semantic_release', 'version_variable').split(':')[0]) return repo.git.commit(m=message, author="semantic-release <semantic-release>")
Commits the file containing the version number variable with the version number as the commit message. :param version: The version number to be used in the commit message.
def _init_from_csr(self, csr): """ Initialize data from a CSR matrix. """ if len(csr.indices) != len(csr.data): raise ValueError('length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data))) self.handle = ctypes.c_void_p() _check_call(_LIB.XGDMatrixCreateFromCSR(c_array(ctypes.c_ulong, csr.indptr), c_array(ctypes.c_uint, csr.indices), c_array(ctypes.c_float, csr.data), len(csr.indptr), len(csr.data), ctypes.byref(self.handle)))
Initialize data from a CSR matrix.
def try_disk(self, path, gpg=True): """ Try to load json off disk """ if not os.path.isfile(path): return if not gpg or self.validate_gpg_sig(path): stream = open(path, 'r') json_stream = stream.read() if len(json_stream): try: json_config = json.loads(json_stream) return json_config except ValueError: logger.error("ERROR: Invalid JSON in %s", path) return False else: logger.warn("WARNING: %s was an empty file", path) return
Try to load json off disk
def keys(self): """ return a list of the content types this set supports. this is not a complete list: serializers can accept more than one content type. However, it is a good representation of the class of content types supported. """ return_value = [] for s in self.serializers: return_value += s.content_type return return_value
return a list of the content types this set supports. this is not a complete list: serializers can accept more than one content type. However, it is a good representation of the class of content types supported.
def package(self): """Build the App package for deployment to ThreatConnect Exchange.""" # create build directory tmp_path = os.path.join(self.app_path, self.args.outdir, 'build') if not os.path.isdir(tmp_path): os.makedirs(tmp_path) # temp path and cleanup template_app_path = os.path.join(tmp_path, 'template') if os.access(template_app_path, os.W_OK): # cleanup any previous failed builds shutil.rmtree(template_app_path) # update package data self.package_data['package'].append( {'action': 'Template Directory:', 'output': template_app_path} ) # build exclude file/directory list excludes = [ 'tcex.json', self.args.outdir, '__pycache__', '.c9', # C9 IDE '.git', # git directory '.gitmodules', # git modules '.idea', # PyCharm '*.iml', # PyCharm files '*.pyc', # any pyc file '.python-version', # pyenv '.vscode', # Visual Studio Code 'log', # log directory ] excludes.extend(self.args.exclude) excludes.extend(self.tcex_json.get('package', {}).get('excludes', [])) # update package data self.package_data['package'].append({'action': 'Excluded Files:', 'output': excludes}) # copy project directory to temp location to use as template for multiple builds ignore_patterns = shutil.ignore_patterns(*excludes) shutil.copytree(self.app_path, template_app_path, False, ignore_patterns) # build list of app json files if self.args.install_json is not None: contents = [self.args.install_json] else: contents = os.listdir(self.app_path) # package app for install_json in sorted(contents): # skip files that are not install.json files if 'install.json' not in install_json: continue # get App Name from config, install.json prefix or directory name. if install_json == 'install.json': app_name = self.tcex_json.get('package', {}).get( 'app_name', os.path.basename(self.app_path) ) else: app_name = install_json.split('.')[0] # update package data self.package_data['package'].append({'action': 'App Name:', 'output': app_name}) # load install json ij = self.load_install_json(install_json) # automatically update install.json for feature sets supported by the SDK ij, ij_modified = self._update_install_json(ij) # write update install.json if ij_modified: self._write_install_json(install_json, ij) # find a usable app version program_version = ij.get('programVersion', '1.0.0').split('.') major_version = program_version[0] try: minor_version = program_version[1] except IndexError: minor_version = 0 app_version = '{}'.format( self.tcex_json.get('package', {}).get( 'app_version', 'v{}.{}'.format(major_version, minor_version) ) ) # update package data self.package_data['package'].append( {'action': 'App Version:', 'output': 'v{}.{}'.format(major_version, minor_version)} ) # !!! The name of the folder in the zip is the *key* for an App. This value must # !!! remain consistent for the App to upgrade successfully. app_name_version = '{}_{}'.format(app_name, app_version) # build app directory tmp_app_path = os.path.join(tmp_path, app_name_version) if os.access(tmp_app_path, os.W_OK): # cleanup any previous failed builds shutil.rmtree(tmp_app_path) shutil.copytree(template_app_path, tmp_app_path) # Copy install.json # TODO: do we need copy if writing the data in the next step? shutil.copy(install_json, os.path.join(tmp_app_path, 'install.json')) # Update commit hash after install.json has been copied. if self.commit_hash is not None: ij.setdefault('commitHash', self.commit_hash) self._write_install_json(os.path.join(tmp_app_path, 'install.json'), ij) # update package data self.package_data['package'].append( {'action': 'Commit Hash:', 'output': self.commit_hash} ) # zip file self.zip_file(self.app_path, app_name_version, tmp_path) # cleanup build directory shutil.rmtree(tmp_app_path) # bundle zips (must have more than 1 app) if len(self._app_packages) > 1: self.bundle(self.tcex_json.get('package', {}).get('bundle_name', app_name))
Build the App package for deployment to ThreatConnect Exchange.
def _compute_confidence_bounds_of_transform(self, transform, alpha, ci_labels): """ This computes the confidence intervals of a transform of the parameters. Ex: take the fitted parameters, a function/transform and the variance matrix and give me back confidence intervals of the transform. Parameters ----------- transform: function must a function of two parameters: ``params``, an iterable that stores the parameters ``times``, a numpy vector representing some timeline the function must use autograd imports (scipy and numpy) alpha: float confidence level ci_labels: tuple """ alpha2 = 1 - alpha / 2.0 z = inv_normal_cdf(alpha2) df = pd.DataFrame(index=self.timeline) # pylint: disable=no-value-for-parameter gradient_of_cum_hazard_at_mle = make_jvp_reversemode(transform)( self._fitted_parameters_, self.timeline.astype(float) ) gradient_at_times = np.vstack( [gradient_of_cum_hazard_at_mle(basis) for basis in np.eye(len(self._fitted_parameters_), dtype=float)] ) std_cumulative_hazard = np.sqrt( np.einsum("nj,jk,nk->n", gradient_at_times.T, self.variance_matrix_, gradient_at_times.T) ) if ci_labels is None: ci_labels = ["%s_upper_%g" % (self._label, 1 - alpha), "%s_lower_%g" % (self._label, 1 - alpha)] assert len(ci_labels) == 2, "ci_labels should be a length 2 array." df[ci_labels[0]] = transform(self._fitted_parameters_, self.timeline) + z * std_cumulative_hazard df[ci_labels[1]] = transform(self._fitted_parameters_, self.timeline) - z * std_cumulative_hazard return df
This computes the confidence intervals of a transform of the parameters. Ex: take the fitted parameters, a function/transform and the variance matrix and give me back confidence intervals of the transform. Parameters ----------- transform: function must a function of two parameters: ``params``, an iterable that stores the parameters ``times``, a numpy vector representing some timeline the function must use autograd imports (scipy and numpy) alpha: float confidence level ci_labels: tuple
def _is_device_active(device): """ Checks dmsetup to see if a device is already active """ cmd = ['dmsetup', 'info', device] dmsetup_info = util.subp(cmd) for dm_line in dmsetup_info.stdout.split("\n"): line = dm_line.split(':') if ('State' in line[0].strip()) and ('ACTIVE' in line[1].strip()): return True return False
Checks dmsetup to see if a device is already active
def success_response(self, method_resp, **kw): """ Make a standard "success" response, which contains some ancilliary data. Also, detect if this node is too far behind the Bitcoin blockchain, and if so, convert this into an error message. """ resp = { 'status': True, 'indexing': config.is_indexing(self.working_dir), 'lastblock': virtualchain_hooks.get_last_block(self.working_dir), } resp.update(kw) resp.update(method_resp) if self.is_stale(): # our state is stale resp['stale'] = True resp['warning'] = 'Daemon has not reindexed since {}'.format(self.last_indexing_time) return resp
Make a standard "success" response, which contains some ancilliary data. Also, detect if this node is too far behind the Bitcoin blockchain, and if so, convert this into an error message.
def decode(): """Given a Geobuf byte string on stdin, write a GeoJSON feature collection to stdout.""" logger = logging.getLogger('geobuf') stdin = click.get_binary_stream('stdin') sink = click.get_text_stream('stdout') try: pbf = stdin.read() data = geobuf.decode(pbf) json.dump(data, sink) sys.exit(0) except Exception: logger.exception("Failed. Exception caught") sys.exit(1)
Given a Geobuf byte string on stdin, write a GeoJSON feature collection to stdout.
def linkRecord(self, existing_domain, new_domain, rtype, callback=None, errback=None, **kwargs): """ Create a new linked record in this zone. These records use the configuration (answers, ttl, filters, etc) from an existing record in the NS1 platform. :param str existing_domain: FQDN of the target record whose config \ should be used. Does not have to be in the same zone. :param str new_domain: Name of the new (linked) record. Zone name is\ appended automatically. :param str rtype: DNS record type, which must match the target record. :rtype: ns1.records.Record :return: new Record """ if '.' not in existing_domain: existing_domain = existing_domain + '.' + self.zone record = Record(self, new_domain, rtype) return record.create(answers=[], link=existing_domain, callback=callback, errback=errback, **kwargs)
Create a new linked record in this zone. These records use the configuration (answers, ttl, filters, etc) from an existing record in the NS1 platform. :param str existing_domain: FQDN of the target record whose config \ should be used. Does not have to be in the same zone. :param str new_domain: Name of the new (linked) record. Zone name is\ appended automatically. :param str rtype: DNS record type, which must match the target record. :rtype: ns1.records.Record :return: new Record
def pull(directory: str) -> Commit: """ Pulls the subrepo that has been cloned into the given directory. :param directory: the directory containing the subrepo :return: the commit the subrepo is on """ if not os.path.exists(directory): raise ValueError(f"No subrepo found in \"{directory}\"") try: result = run([GIT_COMMAND, _GIT_SUBREPO_COMMAND, _GIT_SUBREPO_PULL_COMMAND, _GIT_SUBREPO_VERBOSE_FLAG, get_directory_relative_to_git_root(directory)], execution_directory=get_git_root_directory(directory)) except RunException as e: if "Can't pull subrepo. Working tree has changes" in e.stderr: raise UnstagedChangeException() from e return status(directory)[2]
Pulls the subrepo that has been cloned into the given directory. :param directory: the directory containing the subrepo :return: the commit the subrepo is on
def _populate_cmd_lists(self): """ Populate self.lists and hashes: self.commands, and self.aliases, self.category """ self.commands = {} self.aliases = {} self.category = {} # self.short_help = {} for cmd_instance in self.cmd_instances: if not hasattr(cmd_instance, 'aliases'): continue alias_names = cmd_instance.aliases cmd_name = cmd_instance.name self.commands[cmd_name] = cmd_instance for alias_name in alias_names: self.aliases[alias_name] = cmd_name pass cat = getattr(cmd_instance, 'category') if cat and self.category.get(cat): self.category[cat].append(cmd_name) else: self.category[cat] = [cmd_name] pass # sh = getattr(cmd_instance, 'short_help') # if sh: # self.short_help[cmd_name] = getattr(c, 'short_help') # pass pass for k in list(self.category.keys()): self.category[k].sort() pass return
Populate self.lists and hashes: self.commands, and self.aliases, self.category
def _put(self, route, data, headers=None, failure_message=None): """ Execute a put request and return the result :param data: :param headers: :return: """ headers = self._get_headers(headers) response_lambda = ( lambda: requests.put( self._get_qualified_route(route), headers=headers, data=data, verify=False, proxies=self.proxies ) ) response = check_for_rate_limiting(response_lambda(), response_lambda) return self._handle_response(response, failure_message)
Execute a put request and return the result :param data: :param headers: :return:
def send_topic_message(self, topic_name, message=None): ''' Enqueues a message into the specified topic. The limit to the number of messages which may be present in the topic is governed by the message size in MaxTopicSizeInBytes. If this message causes the topic to exceed its quota, a quota exceeded error is returned and the message will be rejected. topic_name: Name of the topic. message: Message object containing message body and properties. ''' _validate_not_none('topic_name', topic_name) _validate_not_none('message', message) request = HTTPRequest() request.method = 'POST' request.host = self._get_host() request.path = '/' + _str(topic_name) + '/messages' request.headers = message.add_headers(request) request.body = _get_request_body(message.body) request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access request.headers = self._update_service_bus_header(request) self._perform_request(request)
Enqueues a message into the specified topic. The limit to the number of messages which may be present in the topic is governed by the message size in MaxTopicSizeInBytes. If this message causes the topic to exceed its quota, a quota exceeded error is returned and the message will be rejected. topic_name: Name of the topic. message: Message object containing message body and properties.
def add_title_widget(self, ref, text="Title"): """ Add Title Widget """ if ref not in self.widgets: widget = widgets.TitleWidget(screen=self, ref=ref, text=text) self.widgets[ref] = widget return self.widgets[ref]
Add Title Widget
def add_why(voevent, importance=None, expires=None, inferences=None): """Add Inferences, or set importance / expires attributes of the Why section. .. note:: ``importance`` / ``expires`` are 'Why' attributes, therefore setting them will overwrite previous values. ``inferences``, on the other hand, are appended to the list. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. importance(float): Value from 0.0 to 1.0 expires(datetime.datetime): Expiration date given inferred reason (See voevent spec). inferences(:class:`voeventparse.misc.Inference`): Inference or list of inferences, denoting probable identifications or associations, etc. """ if not voevent.xpath('Why'): etree.SubElement(voevent, 'Why') if importance is not None: voevent.Why.attrib['importance'] = str(importance) if expires is not None: voevent.Why.attrib['expires'] = expires.replace( microsecond=0).isoformat() if inferences is not None: voevent.Why.extend(_listify(inferences))
Add Inferences, or set importance / expires attributes of the Why section. .. note:: ``importance`` / ``expires`` are 'Why' attributes, therefore setting them will overwrite previous values. ``inferences``, on the other hand, are appended to the list. Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. importance(float): Value from 0.0 to 1.0 expires(datetime.datetime): Expiration date given inferred reason (See voevent spec). inferences(:class:`voeventparse.misc.Inference`): Inference or list of inferences, denoting probable identifications or associations, etc.
def options(self, parser, env): """Sets additional command line options.""" Plugin.options(self, parser, env) parser.add_option( '--html-file', action='store', dest='html_file', metavar="FILE", default=env.get('NOSE_HTML_FILE', 'nosetests.html'), help="Path to html file to store the report in. " "Default is nosetests.html in the working directory " "[NOSE_HTML_FILE]")
Sets additional command line options.
def export_coreml(self, path, image_shape=(256, 256), include_flexible_shape=True): """ Save the model in Core ML format. The Core ML model takes an image of fixed size, and a style index inputs and produces an output of an image of fixed size Parameters ---------- path : string A string to the path for saving the Core ML model. image_shape: tuple A tuple (defaults to (256, 256)) will bind the coreml model to a fixed shape. include_flexible_shape: bool A boolean value indicating whether flexible_shape should be included or not. See Also -------- save Examples -------- >>> model.export_coreml('StyleTransfer.mlmodel') """ import mxnet as _mx from .._mxnet._mxnet_to_coreml import _mxnet_converter import coremltools transformer = self._model index = _mx.sym.Variable("index", shape=(1,), dtype=_np.int32) # append batch size and channels image_shape = (1, 3) + image_shape c_image = _mx.sym.Variable(self.content_feature, shape=image_shape, dtype=_np.float32) # signal that we want the transformer to prepare for coreml export # using a zero batch size transformer.batch_size = 0 transformer.scale255 = True sym_out = transformer(c_image, index) mod = _mx.mod.Module(symbol=sym_out, data_names=[self.content_feature, "index"], label_names=None) mod.bind(data_shapes=zip([self.content_feature, "index"], [image_shape, (1,)]), for_training=False, inputs_need_grad=False) gluon_weights = transformer.collect_params() gluon_layers = [] for layer in transformer.collect_params()._params: gluon_layers.append(layer) sym_layers = mod._param_names sym_weight_dict = {} for gluon_layer, sym_layer in zip(gluon_layers, sym_layers): sym_weight_dict[sym_layer] = gluon_weights[gluon_layer]._data[0] mod.set_params(sym_weight_dict, sym_weight_dict) index_dim = (1, self.num_styles) coreml_model = _mxnet_converter.convert(mod, input_shape=[(self.content_feature, image_shape), ('index', index_dim)], mode=None, preprocessor_args=None, builder=None, verbose=False) transformer.scale255 = False spec = coreml_model.get_spec() image_input = spec.description.input[0] image_output = spec.description.output[0] input_array_shape = tuple(image_input.type.multiArrayType.shape) output_array_shape = tuple(image_output.type.multiArrayType.shape) self._export_coreml_image(image_input, input_array_shape) self._export_coreml_image(image_output, output_array_shape) stylized_image = 'stylized%s' % self.content_feature.capitalize() coremltools.utils.rename_feature(spec, 'transformer__mulscalar0_output', stylized_image, True, True) if include_flexible_shape: # Support flexible shape flexible_shape_utils = _mxnet_converter._coremltools.models.neural_network.flexible_shape_utils img_size_ranges = flexible_shape_utils.NeuralNetworkImageSizeRange() img_size_ranges.add_height_range((64, -1)) img_size_ranges.add_width_range((64, -1)) flexible_shape_utils.update_image_size_range(spec, feature_name=self.content_feature, size_range=img_size_ranges) flexible_shape_utils.update_image_size_range(spec, feature_name=stylized_image, size_range=img_size_ranges) model_type = 'style transfer (%s)' % self.model spec.description.metadata.shortDescription = _coreml_utils._mlmodel_short_description( model_type) spec.description.input[0].shortDescription = 'Input image' spec.description.input[1].shortDescription = u'Style index array (set index I to 1.0 to enable Ith style)' spec.description.output[0].shortDescription = 'Stylized image' user_defined_metadata = _coreml_utils._get_model_metadata( self.__class__.__name__, { 'model': self.model, 'num_styles': str(self.num_styles), 'content_feature': self.content_feature, 'style_feature': self.style_feature, 'max_iterations': str(self.max_iterations), 'training_iterations': str(self.training_iterations), }, version=StyleTransfer._PYTHON_STYLE_TRANSFER_VERSION) spec.description.metadata.userDefined.update(user_defined_metadata) from coremltools.models.utils import save_spec as _save_spec _save_spec(spec, path)
Save the model in Core ML format. The Core ML model takes an image of fixed size, and a style index inputs and produces an output of an image of fixed size Parameters ---------- path : string A string to the path for saving the Core ML model. image_shape: tuple A tuple (defaults to (256, 256)) will bind the coreml model to a fixed shape. include_flexible_shape: bool A boolean value indicating whether flexible_shape should be included or not. See Also -------- save Examples -------- >>> model.export_coreml('StyleTransfer.mlmodel')
def attrgetter_atom_handle(loc, tokens): """Process attrgetter literals.""" name, args = attrgetter_atom_split(tokens) if args is None: return '_coconut.operator.attrgetter("' + name + '")' elif "." in name: raise CoconutDeferredSyntaxError("cannot have attribute access in implicit methodcaller partial", loc) elif args == "": return '_coconut.operator.methodcaller("' + tokens[0] + '")' else: return '_coconut.operator.methodcaller("' + tokens[0] + '", ' + tokens[2] + ")"
Process attrgetter literals.
def get_binary_stream(name): """Returns a system stream for byte processing. This essentially returns the stream from the sys module with the given name but it solves some compatibility issues between different Python versions. Primarily this function is necessary for getting binary streams on Python 3. :param name: the name of the stream to open. Valid names are ``'stdin'``, ``'stdout'`` and ``'stderr'`` """ opener = binary_streams.get(name) if opener is None: raise TypeError('Unknown standard stream %r' % name) return opener()
Returns a system stream for byte processing. This essentially returns the stream from the sys module with the given name but it solves some compatibility issues between different Python versions. Primarily this function is necessary for getting binary streams on Python 3. :param name: the name of the stream to open. Valid names are ``'stdin'``, ``'stdout'`` and ``'stderr'``
def set_vm_status(self, device='FLOPPY', boot_option='BOOT_ONCE', write_protect='YES'): """Sets the Virtual Media drive status and allows the boot options for booting from the virtual media. """ return self._call_method('set_vm_status', device, boot_option, write_protect)
Sets the Virtual Media drive status and allows the boot options for booting from the virtual media.
def execCmdThruIUCV(rh, userid, strCmd, hideInLog=[]): """ Send a command to a virtual machine using IUCV. Input: Request Handle Userid of the target virtual machine Command string to send (Optional) List of strCmd words (by index) to hide in sysLog by replacing the word with "<hidden>". Output: Dictionary containing the following: overallRC - overall return code, 0: success, 2: failure rc - RC returned from iucvclnt if overallRC != 0. rs - RS returned from iucvclnt if overallRC != 0. errno - Errno returned from iucvclnt if overallRC != 0. response - Output of the iucvclnt command or this routine. Notes: 1) This routine does not use the Request Handle printLn function. This is because an error might be expected and we might desire to suppress it. Instead, any error messages are put in the response dictionary element that is returned. """ if len(hideInLog) == 0: rh.printSysLog("Enter vmUtils.execCmdThruIUCV, userid: " + userid + " cmd: " + strCmd) else: logCmd = strCmd.split(' ') for i in hideInLog: logCmd[i] = '<hidden>' rh.printSysLog("Enter vmUtils.execCmdThruIUCV, userid: " + userid + " cmd: " + ' '.join(logCmd)) iucvpath = '/opt/zthin/bin/IUCV/' results = { 'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0, 'response': [], } cmd = ['sudo', iucvpath + "iucvclnt", userid, strCmd] try: results['response'] = subprocess.check_output( cmd, stderr=subprocess.STDOUT, close_fds=True) if isinstance(results['response'], bytes): results['response'] = bytes.decode(results['response']) except CalledProcessError as e: msg = [] results['overallRC'] = 2 results['rc'] = e.returncode output = bytes.decode(e.output) match = re.search('Return code (.+?),', output) if match: try: results['rc'] = int(match.group(1)) except ValueError: # Return code in response from IUCVCLNT is not an int. msg = msgs.msg['0311'][1] % (modId, userid, strCmd, results['rc'], match.group(1), output) if not msg: # We got the rc. Now, get the rs. match = re.search('Reason code (.+?)\.', output) if match: try: results['rs'] = int(match.group(1)) except ValueError: # Reason code in response from IUCVCLNT is not an int. msg = msgs.msg['0312'][1] % (modId, userid, strCmd, results['rc'], match.group(1), output) if msg: # Already produced an error message. pass elif results['rc'] == 1: # Command was not authorized or a generic Linux error. msg = msgs.msg['0313'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) elif results['rc'] == 2: # IUCV client parameter error. msg = msgs.msg['0314'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) elif results['rc'] == 4: # IUCV socket error msg = msgs.msg['0315'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) elif results['rc'] == 8: # Executed command failed msg = msgs.msg['0316'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) elif results['rc'] == 16: # File Transport failed msg = msgs.msg['0317'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) elif results['rc'] == 32: # IUCV server file was not found on this system. msg += msgs.msg['0318'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) else: # Unrecognized IUCV client error msg = msgs.msg['0319'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) results['response'] = msg except Exception as e: # Other exceptions from this system (i.e. not the managed system). results = msgs.msg['0421'][0] msg = msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e)) results['response'] = msg rh.printSysLog("Exit vmUtils.execCmdThruIUCV, rc: " + str(results['rc'])) return results
Send a command to a virtual machine using IUCV. Input: Request Handle Userid of the target virtual machine Command string to send (Optional) List of strCmd words (by index) to hide in sysLog by replacing the word with "<hidden>". Output: Dictionary containing the following: overallRC - overall return code, 0: success, 2: failure rc - RC returned from iucvclnt if overallRC != 0. rs - RS returned from iucvclnt if overallRC != 0. errno - Errno returned from iucvclnt if overallRC != 0. response - Output of the iucvclnt command or this routine. Notes: 1) This routine does not use the Request Handle printLn function. This is because an error might be expected and we might desire to suppress it. Instead, any error messages are put in the response dictionary element that is returned.
def extract_labels(self) -> np.ndarray: """Extract condition labels. Returns ------- np.ndarray The condition label of each epoch. """ condition_idxs, epoch_idxs, _ = np.where(self) _, unique_epoch_idxs = np.unique(epoch_idxs, return_index=True) return condition_idxs[unique_epoch_idxs]
Extract condition labels. Returns ------- np.ndarray The condition label of each epoch.
def stub(base_class=None, **attributes): """creates a python class on-the-fly with the given keyword-arguments as class-attributes accessible with .attrname. The new class inherits from Use this to mock rather than stub. """ if base_class is None: base_class = object members = { "__init__": lambda self: None, "__new__": lambda *args, **kw: object.__new__( *args, *kw ), # remove __new__ and metaclass behavior from object "__metaclass__": None, } members.update(attributes) # let's create a python class on-the-fly :) return type(f"{base_class.__name__}Stub", (base_class,), members)()
creates a python class on-the-fly with the given keyword-arguments as class-attributes accessible with .attrname. The new class inherits from Use this to mock rather than stub.
def _log_start_transaction(self, endpoint, data, json, files, params): """Log the beginning of an API request.""" # TODO: add information about the caller, i.e. which module + line of code called the .request() method # This can be done by fetching current traceback and then traversing it until we find the request function self._requests_counter += 1 if not self._is_logging: return msg = "\n---- %d --------------------------------------------------------\n" % self._requests_counter msg += "[%s] %s\n" % (time.strftime("%H:%M:%S"), endpoint) if params is not None: msg += " params: {%s}\n" % ", ".join("%s:%s" % item for item in viewitems(params)) if data is not None: msg += " body: {%s}\n" % ", ".join("%s:%s" % item for item in viewitems(data)) if json is not None: import json as j msg += " json: %s\n" % j.dumps(json) if files is not None: msg += " file: %s\n" % ", ".join(f.name for f in viewvalues(files)) self._log_message(msg + "\n")
Log the beginning of an API request.
def factory_profiles(self): '''The factory profiles of all loaded modules.''' with self._mutex: if not self._factory_profiles: self._factory_profiles = [] for fp in self._obj.get_factory_profiles(): self._factory_profiles.append(utils.nvlist_to_dict(fp.properties)) return self._factory_profiles
The factory profiles of all loaded modules.
def get_cov_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper, lambda1=None, lambda2=None, quadparam1=None, quadparam2=None): """ Function to convert between masses and spins and locations in the xi parameter space. Xi = Cartesian metric and rotated to principal components. Parameters ----------- mass1 : float Mass of heavier body. mass2 : float Mass of lighter body. spin1z : float Spin of body 1. spin2z : float Spin of body 2. metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. fUpper : float The value of fUpper to use when getting the mu coordinates from the lambda coordinates. This must be a key in metricParams.evals, metricParams.evecs and metricParams.evecsCV (ie. we must know how to do the transformation for the given value of fUpper) Returns -------- xis : list of floats or numpy.arrays Position of the system(s) in the xi coordinate system """ # Do this by doing masses - > lambdas -> mus mus = get_conv_params(mass1, mass2, spin1z, spin2z, metricParams, fUpper, lambda1=lambda1, lambda2=lambda2, quadparam1=quadparam1, quadparam2=quadparam2) # and then mus -> xis xis = get_covaried_params(mus, metricParams.evecsCV[fUpper]) return xis
Function to convert between masses and spins and locations in the xi parameter space. Xi = Cartesian metric and rotated to principal components. Parameters ----------- mass1 : float Mass of heavier body. mass2 : float Mass of lighter body. spin1z : float Spin of body 1. spin2z : float Spin of body 2. metricParams : metricParameters instance Structure holding all the options for construction of the metric and the eigenvalues, eigenvectors and covariance matrix needed to manipulate the space. fUpper : float The value of fUpper to use when getting the mu coordinates from the lambda coordinates. This must be a key in metricParams.evals, metricParams.evecs and metricParams.evecsCV (ie. we must know how to do the transformation for the given value of fUpper) Returns -------- xis : list of floats or numpy.arrays Position of the system(s) in the xi coordinate system
def user(self, id, expand=None): """Get a user Resource from the server. :param id: ID of the user to get :param id: str :param expand: Extra information to fetch inside each resource :type expand: Optional[Any] :rtype: User """ user = User(self._options, self._session) params = {} if expand is not None: params['expand'] = expand user.find(id, params=params) return user
Get a user Resource from the server. :param id: ID of the user to get :param id: str :param expand: Extra information to fetch inside each resource :type expand: Optional[Any] :rtype: User
def Debugger_setBlackboxedRanges(self, scriptId, positions): """ Function path: Debugger.setBlackboxedRanges Domain: Debugger Method name: setBlackboxedRanges WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'scriptId' (type: Runtime.ScriptId) -> Id of the script. 'positions' (type: array) -> No description No return value. Description: Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. Positions array contains positions where blackbox state is changed. First interval isn't blackboxed. Array should be sorted. """ assert isinstance(positions, (list, tuple) ), "Argument 'positions' must be of type '['list', 'tuple']'. Received type: '%s'" % type( positions) subdom_funcs = self.synchronous_command('Debugger.setBlackboxedRanges', scriptId=scriptId, positions=positions) return subdom_funcs
Function path: Debugger.setBlackboxedRanges Domain: Debugger Method name: setBlackboxedRanges WARNING: This function is marked 'Experimental'! Parameters: Required arguments: 'scriptId' (type: Runtime.ScriptId) -> Id of the script. 'positions' (type: array) -> No description No return value. Description: Makes backend skip steps in the script in blackboxed ranges. VM will try leave blacklisted scripts by performing 'step in' several times, finally resorting to 'step out' if unsuccessful. Positions array contains positions where blackbox state is changed. First interval isn't blackboxed. Array should be sorted.
def node_save(self, astr_pathInTree, **kwargs): """ Typically called by the explore()/recurse() methods and of form: f(pathInTree, **kwargs) and returns dictionary of which one element is 'status': True|False recursion continuation flag is returned: 'continue': True|False to signal calling parent whether or not to continue with tree transversal. Save the node specified by a path in the data tree to disk. Given a "root" on the disk storage, create the path relative to that root, and in that location, save the contents of the internal node's d_data at that tree path location. :param kwargs: :return: """ str_pathDiskRoot = '/tmp' str_pathDiskOrig = os.getcwd() srt_pathDiskFull = '' str_pathTree = '' str_pathTreeOrig = self.pwd() b_failOnDirExist = True b_saveJSON = True b_savePickle = False for key, val in kwargs.items(): if key == 'startPath': str_pathTree = val if key == 'pathDiskRoot': str_pathDiskRoot = val if key == 'failOnDirExist': b_failOnDirExist = val if key == 'saveJSON': b_saveJSON = val if key == 'savePickle': b_savePickle = val str_pathDiskFull = str_pathDiskRoot + str_pathTree # print('\n') # print('In self.node_save():') # print('memTree: %s' % (str_pathTree)) # print('diskRoot: %s' % str_pathDiskRoot) # print('distPath: %s' % str_pathDiskFull) # print(kwargs.keys()) # print(kwargs.values()) # print('\n') if len(str_pathDiskRoot): if not os.path.isdir(str_pathDiskRoot): # print('Processing path: %s' % str_pathDiskRoot) try: # print('mkdir %s' % str_pathDiskRoot) os.makedirs(str_pathDiskRoot) except OSError as exception: return {'status' : False, 'continue': False, 'message': 'unable to create pathDiskRoot: %s' % str_pathDiskRoot, 'exception': exception} # print('cd to %s' % str_pathDiskRoot) os.chdir(str_pathDiskRoot) if self.cd(str_pathTree)['status']: if str_pathTree != '/': # print('mkdir %s' % str_pathDiskFull) try: os.makedirs(str_pathDiskFull) except OSError as exception: if b_failOnDirExist: return {'status' : False, 'continue': False, 'message': 'unable to create pathDiskRoot: %s' % str_pathDiskRoot, 'exception': exception} os.chdir(str_pathDiskFull) for str_filename, contents in self.snode_current.d_data.items(): # print("str_filename = %s; contents = %s" % (str_filename, contents)) if b_saveJSON: with open(str_filename, 'w') as f: json.dump(contents, f) f.close() if b_savePickle: with open(str_filename, 'wb') as f: json.dump(contents, f) f.close() else: return{'status': False, 'continue': False, 'message': 'pathTree invalid'} self.cd(str_pathTreeOrig) os.chdir(str_pathDiskOrig) return {'status': True, 'continue': True} return {'status': False, 'continue': False, 'message': 'pathDisk not specified'}
Typically called by the explore()/recurse() methods and of form: f(pathInTree, **kwargs) and returns dictionary of which one element is 'status': True|False recursion continuation flag is returned: 'continue': True|False to signal calling parent whether or not to continue with tree transversal. Save the node specified by a path in the data tree to disk. Given a "root" on the disk storage, create the path relative to that root, and in that location, save the contents of the internal node's d_data at that tree path location. :param kwargs: :return:
def emit(self, signalName, *args, **kwargs): """ Emits a signal by name if it exists. Any additional args or kwargs are passed to the signal :param signalName: the signal name to emit """ assert signalName in self, "%s is not a registered signal" % signalName self[signalName].emit(*args, **kwargs)
Emits a signal by name if it exists. Any additional args or kwargs are passed to the signal :param signalName: the signal name to emit
def get_label(self, name): """ :calls: `GET /repos/:owner/:repo/labels/:name <http://developer.github.com/v3/issues/labels>`_ :param name: string :rtype: :class:`github.Label.Label` """ assert isinstance(name, (str, unicode)), name headers, data = self._requester.requestJsonAndCheck( "GET", self.url + "/labels/" + urllib.quote(name) ) return github.Label.Label(self._requester, headers, data, completed=True)
:calls: `GET /repos/:owner/:repo/labels/:name <http://developer.github.com/v3/issues/labels>`_ :param name: string :rtype: :class:`github.Label.Label`
def read_igor_marginals_txt(marginals_file_name , dim_names=False): """Load raw IGoR model marginals. Parameters ---------- marginals_file_name : str File name for a IGOR model marginals file. Returns ------- model_dict : dict Dictionary with model marginals. dimension_names_dict : dict Dictionary that defines IGoR model dependecies. """ with open(marginals_file_name,'r') as file: #Model parameters are stored inside a dictionary of ndarrays model_dict = {} dimension_names_dict = {} element_name="" first = True first_dim_line = False element_marginal_array = [] indices_array = [] for line in file: strip_line = line.rstrip('\n') #Remove end of line character if strip_line[0]=='@': first_dim_line = True if not(first): #Add the previous to the dictionnary model_dict[element_name] = element_marginal_array else: first = False element_name = strip_line[1:] if strip_line[0]=='$': #define array dimensions coma_index = strip_line.find(',') dimensions = [] #Get rid of $Dim[ previous_coma_index = 4 while coma_index != -1: dimensions.append(int(strip_line[previous_coma_index+1:coma_index])) previous_coma_index = coma_index coma_index = strip_line.find(',',coma_index+1) #Add last dimension and get rid of the closing bracket dimensions.append(int(strip_line[previous_coma_index+1:-1])) element_marginal_array = np.ndarray(shape=dimensions) if strip_line[0]=='#': if first_dim_line: dimensions_names = [] if len(dimensions) > 1: comma_index = strip_line.find(',') opening_bracket_index = strip_line.find('[') while opening_bracket_index != -1: dimensions_names.append(strip_line[opening_bracket_index+1:comma_index]) opening_bracket_index = strip_line.find('[',comma_index) comma_index = strip_line.find(',',opening_bracket_index) first_dim_line = False dimensions_names.append(element_name) dimension_names_dict[element_name] = dimensions_names #update indices indices_array = [] if len(dimensions) > 1: comma_index = strip_line.find(',') closing_brack_index = strip_line.find(']') while closing_brack_index != -1: indices_array.append(int(strip_line[comma_index+1:closing_brack_index])) opening_bracket_index = strip_line.find('[',closing_brack_index) comma_index = strip_line.find(',',opening_bracket_index) closing_brack_index = strip_line.find(']',closing_brack_index+1) if strip_line[0]=='%': #read doubles coma_index = strip_line.find(',') marginals_values = [] #Get rid of the % previous_coma_index = 0 while coma_index != -1: marginals_values.append(float(strip_line[previous_coma_index+1:coma_index])) previous_coma_index = coma_index coma_index = strip_line.find(',',coma_index+1) #Add last dimension and get rid of the closing bracket marginals_values.append(float(strip_line[previous_coma_index+1:])) if len(marginals_values)!=dimensions[-1]: print "problem" element_marginal_array[tuple(indices_array)] = marginals_values model_dict[element_name] = element_marginal_array return [model_dict,dimension_names_dict]
Load raw IGoR model marginals. Parameters ---------- marginals_file_name : str File name for a IGOR model marginals file. Returns ------- model_dict : dict Dictionary with model marginals. dimension_names_dict : dict Dictionary that defines IGoR model dependecies.
def load_local_config(filename): """Loads the pylint.config.py file. Args: filename (str): The python file containing the local configuration. Returns: module: The loaded Python module. """ if not filename: return imp.new_module('local_pylint_config') module = imp.load_source('local_pylint_config', filename) return module
Loads the pylint.config.py file. Args: filename (str): The python file containing the local configuration. Returns: module: The loaded Python module.
def generator(self, Xgen, Xexc, Xgov, Vgen): """ Generator model. Based on Generator.m from MatDyn by Stijn Cole, developed at Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/electa/teaching/ matdyn/} for more information. """ generators = self.dyn_generators omegas = 2 * pi * self.freq F = zeros(Xgen.shape) typ1 = [g._i for g in generators if g.model == CLASSICAL] typ2 = [g._i for g in generators if g.model == FOURTH_ORDER] # Generator type 1: classical model omega = Xgen[typ1, 1] Pm0 = Xgov[typ1, 0] H = array([g.h for g in generators])[typ1] D = array([g.d for g in generators])[typ1] Pe = Vgen[typ1, 2] ddelta = omega = omegas domega = pi * self.freq / H * (-D * (omega - omegas) + Pm0 - Pe) dEq = zeros(len(typ1)) F[typ1, :] = c_[ddelta, domega, dEq] # Generator type 2: 4th order model omega = Xgen[typ2, 1] Eq_tr = Xgen[typ2, 2] Ed_tr = Xgen[typ2, 3] H = array([g.h for g in generators]) D = array([g.d for g in generators]) xd = array([g.xd for g in generators]) xq = array([g.xq for g in generators]) xd_tr = array([g.xd_tr for g in generators]) xq_tr = array([g.xq_tr for g in generators]) Td0_tr = array([g.td for g in generators]) Tq0_tr = array([g.tq for g in generators]) Id = Vgen[typ2, 0] Iq = Vgen[typ2, 1] Pe = Vgen[typ2, 2] Efd = Xexc[typ2, 0] Pm = Xgov[typ2, 0] ddelta = omega - omegas domega = pi * self.freq / H * (-D * (omega - omegas) + Pm - Pe) dEq = 1 / Td0_tr * (Efd - Eq_tr + (xd - xd_tr) * Id) dEd = 1 / Tq0_tr * (-Ed_tr - (xq - xq_tr) * Iq) F[typ2, :] = c_[ddelta, domega, dEq, dEd] # Generator type 3: # Generator type 4: return F
Generator model. Based on Generator.m from MatDyn by Stijn Cole, developed at Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/electa/teaching/ matdyn/} for more information.
def argument_parser(args): """Argparse logic, command line options. Args: args: sys.argv[1:], everything passed to the program after its name Returns: A tuple of: a list of words/letters to search a boolean to declare if we want to use the sowpods words file a boolean to declare if we want to output anagrams by length a string of starting characters to find anagrams based on a string of ending characters to find anagrams based on Raises: SystemExit if the user passes invalid arguments, --version or --help """ parser = argparse.ArgumentParser( prog="nagaram", description="Finds Scabble anagrams.", formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False, ) parser.add_argument( "-h", "--help", dest="help", action="store_true", default=False, ) parser.add_argument( "--sowpods", dest="sowpods", action="store_true", default=False, ) parser.add_argument( "--length", "-l", dest="length", action="store_true", default=False, ) parser.add_argument( "--starts-with", "-s", dest="starts_with", metavar="chars", default="", nargs=1, type=str, ) parser.add_argument( "--ends-with", "-e", dest="ends_with", metavar="chars", default="", nargs=1, type=str, ) parser.add_argument( "--version", "-v", action="version", version="Nagaram {0} (Released: {1})".format( nagaram.__version__, nagaram.__release_date__, ) ) parser.add_argument( dest="wordlist", metavar="letters to find anagrams with (? for anything, _ for blanks)", nargs=argparse.REMAINDER, ) settings = parser.parse_args(args) if settings.help: raise SystemExit(nagaram.__doc__.strip()) if not settings.wordlist: raise SystemExit(parser.print_usage()) if settings.starts_with: settings.starts_with = settings.starts_with[0] if settings.ends_with: settings.ends_with = settings.ends_with[0] return (settings.wordlist, settings.sowpods, settings.length, settings.starts_with, settings.ends_with)
Argparse logic, command line options. Args: args: sys.argv[1:], everything passed to the program after its name Returns: A tuple of: a list of words/letters to search a boolean to declare if we want to use the sowpods words file a boolean to declare if we want to output anagrams by length a string of starting characters to find anagrams based on a string of ending characters to find anagrams based on Raises: SystemExit if the user passes invalid arguments, --version or --help
def _insert_file(cursor, file, media_type): """Upsert the ``file`` and ``media_type`` into the files table. Returns the ``fileid`` and ``sha1`` of the upserted file. """ resource_hash = _get_file_sha1(file) cursor.execute("SELECT fileid FROM files WHERE sha1 = %s", (resource_hash,)) try: fileid = cursor.fetchone()[0] except (IndexError, TypeError): cursor.execute("INSERT INTO files (file, media_type) " "VALUES (%s, %s)" "RETURNING fileid", (psycopg2.Binary(file.read()), media_type,)) fileid = cursor.fetchone()[0] return fileid, resource_hash
Upsert the ``file`` and ``media_type`` into the files table. Returns the ``fileid`` and ``sha1`` of the upserted file.
def job_listener(event): '''Listens to completed job''' job_id = event.job.args[0] if event.code == events.EVENT_JOB_MISSED: db.mark_job_as_missed(job_id) elif event.exception: if isinstance(event.exception, util.JobError): error_object = event.exception.as_dict() else: error_object = "\n".join(traceback.format_tb(event.traceback) + [repr(event.exception)]) db.mark_job_as_errored(job_id, error_object) else: db.mark_job_as_completed(job_id, event.retval) api_key = db.get_job(job_id)["api_key"] result_ok = send_result(job_id, api_key) if not result_ok: db.mark_job_as_failed_to_post_result(job_id) # Optionally notify tests that job_listener() has finished. if "_TEST_CALLBACK_URL" in app.config: requests.get(app.config["_TEST_CALLBACK_URL"])
Listens to completed job
def install(name=None, sources=None, saltenv='base', **kwargs): ''' Install the passed package. Can install packages from the following sources: * Locally (package already exists on the minion * HTTP/HTTPS server * FTP server * Salt master Returns a dict containing the new package names and versions: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Examples: .. code-block:: bash # Installing a data stream pkg that already exists on the minion salt '*' pkg.install sources='[{"<pkg name>": "/dir/on/minion/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' # Installing a data stream pkg that exists on the salt master salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "salt://pkgs/gcc-3.4.6-sol10-sparc-local.pkg"}]' CLI Example: .. code-block:: bash # Installing a data stream pkg that exists on a HTTP server salt '*' pkg.install sources='[{"<pkg name>": "http://packages.server.com/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "http://packages.server.com/gcc-3.4.6-sol10-sparc-local.pkg"}]' If working with solaris zones and you want to install a package only in the global zone you can pass 'current_zone_only=True' to salt to have the package only installed in the global zone. (Behind the scenes this is passing '-G' to the pkgadd command.) Solaris default when installing a package in the global zone is to install it in all zones. This overrides that and installs the package only in the global. CLI Example: .. code-block:: bash # Installing a data stream package only in the global zone: salt 'global_zone' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' current_zone_only=True By default salt automatically provides an adminfile, to automate package installation, with these options set:: email= instance=quit partial=nocheck runlevel=nocheck idepend=nocheck rdepend=nocheck space=nocheck setuid=nocheck conflict=nocheck action=nocheck basedir=default You can override any of these options in two ways. First you can optionally pass any of the options as a kwarg to the module/state to override the default value or you can optionally pass the 'admin_source' option providing your own adminfile to the minions. Note: You can find all of the possible options to provide to the adminfile by reading the admin man page: .. code-block:: bash man -s 4 admin CLI Example: .. code-block:: bash # Overriding the 'instance' adminfile option when calling the module directly salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' instance="overwrite" SLS Example: .. code-block:: yaml # Overriding the 'instance' adminfile option when used in a state SMClgcc346: pkg.installed: - sources: - SMClgcc346: salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg - instance: overwrite .. note:: The ID declaration is ignored, as the package name is read from the ``sources`` parameter. CLI Example: .. code-block:: bash # Providing your own adminfile when calling the module directly salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' admin_source='salt://pkgs/<adminfile filename>' # Providing your own adminfile when using states <pkg name>: pkg.installed: - sources: - <pkg name>: salt://pkgs/<pkg filename> - admin_source: salt://pkgs/<adminfile filename> .. note:: The ID declaration is ignored, as the package name is read from the ``sources`` parameter. ''' if salt.utils.data.is_true(kwargs.get('refresh')): log.warning('\'refresh\' argument not implemented for solarispkg ' 'module') # pkgs is not supported, but must be passed here for API compatibility pkgs = kwargs.pop('pkgs', None) try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, sources, **kwargs ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} if not sources: log.error('"sources" param required for solaris pkg_add installs') return {} try: if 'admin_source' in kwargs: adminfile = __salt__['cp.cache_file'](kwargs['admin_source'], saltenv) else: adminfile = _write_adminfile(kwargs) old = list_pkgs() cmd_prefix = ['/usr/sbin/pkgadd', '-n', '-a', adminfile] # Only makes sense in a global zone but works fine in non-globals. if kwargs.get('current_zone_only') == 'True': cmd_prefix += '-G ' errors = [] for pkg in pkg_params: cmd = cmd_prefix + ['-d', pkg, 'all'] # Install the package{s} out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) if out['retcode'] != 0 and out['stderr']: errors.append(out['stderr']) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) finally: # Remove the temp adminfile if 'admin_source' not in kwargs: try: os.remove(adminfile) except (NameError, OSError): pass return ret
Install the passed package. Can install packages from the following sources: * Locally (package already exists on the minion * HTTP/HTTPS server * FTP server * Salt master Returns a dict containing the new package names and versions: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Examples: .. code-block:: bash # Installing a data stream pkg that already exists on the minion salt '*' pkg.install sources='[{"<pkg name>": "/dir/on/minion/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' # Installing a data stream pkg that exists on the salt master salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "salt://pkgs/gcc-3.4.6-sol10-sparc-local.pkg"}]' CLI Example: .. code-block:: bash # Installing a data stream pkg that exists on a HTTP server salt '*' pkg.install sources='[{"<pkg name>": "http://packages.server.com/<pkg filename>"}]' salt '*' pkg.install sources='[{"SMClgcc346": "http://packages.server.com/gcc-3.4.6-sol10-sparc-local.pkg"}]' If working with solaris zones and you want to install a package only in the global zone you can pass 'current_zone_only=True' to salt to have the package only installed in the global zone. (Behind the scenes this is passing '-G' to the pkgadd command.) Solaris default when installing a package in the global zone is to install it in all zones. This overrides that and installs the package only in the global. CLI Example: .. code-block:: bash # Installing a data stream package only in the global zone: salt 'global_zone' pkg.install sources='[{"SMClgcc346": "/var/spool/pkg/gcc-3.4.6-sol10-sparc-local.pkg"}]' current_zone_only=True By default salt automatically provides an adminfile, to automate package installation, with these options set:: email= instance=quit partial=nocheck runlevel=nocheck idepend=nocheck rdepend=nocheck space=nocheck setuid=nocheck conflict=nocheck action=nocheck basedir=default You can override any of these options in two ways. First you can optionally pass any of the options as a kwarg to the module/state to override the default value or you can optionally pass the 'admin_source' option providing your own adminfile to the minions. Note: You can find all of the possible options to provide to the adminfile by reading the admin man page: .. code-block:: bash man -s 4 admin CLI Example: .. code-block:: bash # Overriding the 'instance' adminfile option when calling the module directly salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' instance="overwrite" SLS Example: .. code-block:: yaml # Overriding the 'instance' adminfile option when used in a state SMClgcc346: pkg.installed: - sources: - SMClgcc346: salt://srv/salt/pkgs/gcc-3.4.6-sol10-sparc-local.pkg - instance: overwrite .. note:: The ID declaration is ignored, as the package name is read from the ``sources`` parameter. CLI Example: .. code-block:: bash # Providing your own adminfile when calling the module directly salt '*' pkg.install sources='[{"<pkg name>": "salt://pkgs/<pkg filename>"}]' admin_source='salt://pkgs/<adminfile filename>' # Providing your own adminfile when using states <pkg name>: pkg.installed: - sources: - <pkg name>: salt://pkgs/<pkg filename> - admin_source: salt://pkgs/<adminfile filename> .. note:: The ID declaration is ignored, as the package name is read from the ``sources`` parameter.
def run(*awaitables, timeout: float = None): """ By default run the event loop forever. When awaitables (like Tasks, Futures or coroutines) are given then run the event loop until each has completed and return their results. An optional timeout (in seconds) can be given that will raise asyncio.TimeoutError if the awaitables are not ready within the timeout period. """ loop = asyncio.get_event_loop() if not awaitables: if loop.is_running(): return loop.run_forever() f = asyncio.gather(*asyncio.Task.all_tasks()) f.cancel() result = None try: loop.run_until_complete(f) except asyncio.CancelledError: pass else: if len(awaitables) == 1: future = awaitables[0] else: future = asyncio.gather(*awaitables) if timeout: future = asyncio.wait_for(future, timeout) task = asyncio.ensure_future(future) def onError(_): task.cancel() globalErrorEvent.connect(onError) try: result = loop.run_until_complete(task) except asyncio.CancelledError as e: raise globalErrorEvent.value() or e finally: globalErrorEvent.disconnect(onError) return result
By default run the event loop forever. When awaitables (like Tasks, Futures or coroutines) are given then run the event loop until each has completed and return their results. An optional timeout (in seconds) can be given that will raise asyncio.TimeoutError if the awaitables are not ready within the timeout period.
def set_dut_configuration(self, ident, config): """ Set requirements for dut ident. :param ident: Identity of dut. :param config: If ResourceRequirements object, add object as requirements for resource ident. If dictionary, create new ResourceRequirements object from dictionary. :return: Nothing """ if hasattr(config, "get_requirements"): self._dut_requirements[ident] = config elif isinstance(config, dict): self._dut_requirements[ident] = ResourceRequirements(config)
Set requirements for dut ident. :param ident: Identity of dut. :param config: If ResourceRequirements object, add object as requirements for resource ident. If dictionary, create new ResourceRequirements object from dictionary. :return: Nothing
def _read_datasets(self, dataset_nodes, **kwargs): """Read the given datasets from file.""" # Sort requested datasets by reader reader_datasets = {} for node in dataset_nodes: ds_id = node.name # if we already have this node loaded or the node was assigned # by the user (node data is None) then don't try to load from a # reader if ds_id in self.datasets or not isinstance(node.data, dict): continue reader_name = node.data.get('reader_name') if reader_name is None: # This shouldn't be possible raise RuntimeError("Dependency tree has a corrupt node.") reader_datasets.setdefault(reader_name, set()).add(ds_id) # load all datasets for one reader at a time loaded_datasets = DatasetDict() for reader_name, ds_ids in reader_datasets.items(): reader_instance = self.readers[reader_name] new_datasets = reader_instance.load(ds_ids, **kwargs) loaded_datasets.update(new_datasets) self.datasets.update(loaded_datasets) return loaded_datasets
Read the given datasets from file.
def unzip(from_file, to_folder): """ Convenience function. Extracts files from the zip file `fromFile` into the folder `toFolder`. """ with ZipFile(os.path.abspath(from_file), 'r') as to_unzip: to_unzip.extractall(os.path.abspath(to_folder))
Convenience function. Extracts files from the zip file `fromFile` into the folder `toFolder`.
def map_reduce(self, map, reduce, out, full_response=False, **kwargs): """Perform a map/reduce operation on this collection. If `full_response` is ``False`` (default) returns a :class:`~pymongo.collection.Collection` instance containing the results of the operation. Otherwise, returns the full response from the server to the `map reduce command`_. :Parameters: - `map`: map function (as a JavaScript string) - `reduce`: reduce function (as a JavaScript string) - `out`: output collection name or `out object` (dict). See the `map reduce command`_ documentation for available options. Note: `out` options are order sensitive. :class:`~bson.son.SON` can be used to specify multiple options. e.g. SON([('replace', <collection name>), ('db', <database name>)]) - `full_response` (optional): if ``True``, return full response to this command - otherwise just return the result collection - `**kwargs` (optional): additional arguments to the `map reduce command`_ may be passed as keyword arguments to this helper method, e.g.:: >>> db.test.map_reduce(map, reduce, "myresults", limit=2) .. note:: The :meth:`map_reduce` method does **not** obey the :attr:`read_preference` of this :class:`Collection`. To run mapReduce on a secondary use the :meth:`inline_map_reduce` method instead. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation (if the output is not inline) when using MongoDB >= 3.4. .. versionchanged:: 3.4 Apply this collection's write concern automatically to this operation when connected to MongoDB >= 3.4. .. seealso:: :doc:`/examples/aggregation` .. versionchanged:: 3.4 Added the `collation` option. .. versionchanged:: 2.2 Removed deprecated arguments: merge_output and reduce_output .. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/ .. mongodoc:: mapreduce """ if not isinstance(out, (string_type, collections.Mapping)): raise TypeError("'out' must be an instance of " "%s or a mapping" % (string_type.__name__,)) cmd = SON([("mapreduce", self.__name), ("map", map), ("reduce", reduce), ("out", out)]) collation = validate_collation_or_none(kwargs.pop('collation', None)) cmd.update(kwargs) inline = 'inline' in cmd['out'] with self._socket_for_primary_reads() as (sock_info, slave_ok): if (sock_info.max_wire_version >= 5 and self.write_concern and not inline): cmd['writeConcern'] = self.write_concern.document cmd.update(kwargs) if (sock_info.max_wire_version >= 4 and 'readConcern' not in cmd and inline): # No need to parse 'writeConcernError' here, since the command # is an inline map reduce. response = self._command( sock_info, cmd, slave_ok, ReadPreference.PRIMARY, read_concern=self.read_concern, collation=collation) else: response = self._command( sock_info, cmd, slave_ok, ReadPreference.PRIMARY, parse_write_concern_error=not inline, collation=collation) if full_response or not response.get('result'): return response elif isinstance(response['result'], dict): dbase = response['result']['db'] coll = response['result']['collection'] return self.__database.client[dbase][coll] else: return self.__database[response["result"]]
Perform a map/reduce operation on this collection. If `full_response` is ``False`` (default) returns a :class:`~pymongo.collection.Collection` instance containing the results of the operation. Otherwise, returns the full response from the server to the `map reduce command`_. :Parameters: - `map`: map function (as a JavaScript string) - `reduce`: reduce function (as a JavaScript string) - `out`: output collection name or `out object` (dict). See the `map reduce command`_ documentation for available options. Note: `out` options are order sensitive. :class:`~bson.son.SON` can be used to specify multiple options. e.g. SON([('replace', <collection name>), ('db', <database name>)]) - `full_response` (optional): if ``True``, return full response to this command - otherwise just return the result collection - `**kwargs` (optional): additional arguments to the `map reduce command`_ may be passed as keyword arguments to this helper method, e.g.:: >>> db.test.map_reduce(map, reduce, "myresults", limit=2) .. note:: The :meth:`map_reduce` method does **not** obey the :attr:`read_preference` of this :class:`Collection`. To run mapReduce on a secondary use the :meth:`inline_map_reduce` method instead. .. note:: The :attr:`~pymongo.collection.Collection.write_concern` of this collection is automatically applied to this operation (if the output is not inline) when using MongoDB >= 3.4. .. versionchanged:: 3.4 Apply this collection's write concern automatically to this operation when connected to MongoDB >= 3.4. .. seealso:: :doc:`/examples/aggregation` .. versionchanged:: 3.4 Added the `collation` option. .. versionchanged:: 2.2 Removed deprecated arguments: merge_output and reduce_output .. _map reduce command: http://docs.mongodb.org/manual/reference/command/mapReduce/ .. mongodoc:: mapreduce
def echo(root_resource, message): """Have the server echo our message back.""" params = dict(message=message) return root_resource.get(ECHO_PATH, params)
Have the server echo our message back.
def parse_file_provider(uri): """Find the file provider for a URI.""" providers = {'gs': job_model.P_GCS, 'file': job_model.P_LOCAL} # URI scheme detector uses a range up to 30 since none of the IANA # registered schemes are longer than this. provider_found = re.match(r'^([A-Za-z][A-Za-z0-9+.-]{0,29})://', uri) if provider_found: prefix = provider_found.group(1).lower() else: # If no provider is specified in the URI, assume that the local # filesystem is being used. Availability and validity of the local # file/directory will be checked later. prefix = 'file' if prefix in providers: return providers[prefix] else: raise ValueError('File prefix not supported: %s://' % prefix)
Find the file provider for a URI.
def _single_array_element(data_obj, xj_path, array_path, create_dict_path): """Retrieves a single array for a '@' JSON path marker. :param list data_obj: The current data object. :param str xj_path: A json path. :param str array_path: A lookup key. :param bool create_dict_path create a dict path. """ val_type, array_path = _clean_key_type(array_path) array_idx = _get_array_index(array_path) if data_obj and isinstance(data_obj, (list, tuple)): try: value = data_obj[array_idx] if val_type is not None and not isinstance(value, val_type): raise XJPathError('Index array "%s" of "%s" type does not ' 'match expected type "%s"' % (array_idx, type(value).__name__, val_type.__name__)) if xj_path: return path_lookup(value, xj_path, create_dict_path) else: return value, True except IndexError: return None, False else: if val_type is not None: raise XJPathError('Expected the list element type, but "%s" found' % type(data_obj).__name__) return None, False
Retrieves a single array for a '@' JSON path marker. :param list data_obj: The current data object. :param str xj_path: A json path. :param str array_path: A lookup key. :param bool create_dict_path create a dict path.
def _get_anon_bind(self): """Check anonymous bind :return: 'on', 'off', 'rootdse' or None """ r = self._search( 'cn=config', '(objectClass=*)', ['nsslapd-allow-anonymous-access'], scope=ldap.SCOPE_BASE ) dn, attrs = r[0] state = attrs.get('nsslapd-allow-anonymous-access')[0].decode('utf-8', 'ignore') if state in ['on', 'off', 'rootdse']: r = state else: r = None self._anon_bind = r
Check anonymous bind :return: 'on', 'off', 'rootdse' or None
def search(query, tld='com', lang='en', num=10, start=0, stop=None, pause=2.0, only_standard=False): """ Search the given query string using Google. @type query: str @param query: Query string. Must NOT be url-encoded. @type tld: str @param tld: Top level domain. @type lang: str @param lang: Languaje. @type num: int @param num: Number of results per page. @type start: int @param start: First result to retrieve. @type stop: int @param stop: Last result to retrieve. Use C{None} to keep searching forever. @type pause: float @param pause: Lapse to wait between HTTP requests. A lapse too long will make the search slow, but a lapse too short may cause Google to block your IP. Your mileage may vary! @type only_standard: bool @param only_standard: If C{True}, only returns the standard results from each page. If C{False}, it returns every possible link from each page, except for those that point back to Google itself. Defaults to C{False} for backwards compatibility with older versions of this module. @rtype: generator @return: Generator (iterator) that yields found URLs. If the C{stop} parameter is C{None} the iterator will loop forever. """ # Lazy import of BeautifulSoup. # Try to use BeautifulSoup 4 if available, fall back to 3 otherwise. global BeautifulSoup if BeautifulSoup is None: try: from bs4 import BeautifulSoup except ImportError: from BeautifulSoup import BeautifulSoup # Set of hashes for the results found. # This is used to avoid repeated results. hashes = set() # Prepare the search string. query = quote_plus(query) # Grab the cookie from the home page. get_page(url_home % vars()) # Prepare the URL of the first request. if start: if num == 10: url = url_next_page % vars() else: url = url_next_page_num % vars() else: if num == 10: url = url_search % vars() else: url = url_search_num % vars() # Loop until we reach the maximum result, if any (otherwise, loop forever). while not stop or start < stop: # Sleep between requests. time.sleep(pause) # Request the Google Search results page. html = get_page(url) # Parse the response and process every anchored URL. soup = BeautifulSoup(html) anchors = soup.find(id='search').findAll('a') for a in anchors: # Leave only the "standard" results if requested. # Otherwise grab all possible links. if only_standard and ( not a.parent or a.parent.name.lower() != "h3"): continue # Get the URL from the anchor tag. try: link = a['href'] except KeyError: continue # Filter invalid links and links pointing to Google itself. link = filter_result(link) if not link: continue # Discard repeated results. h = hash(link) if h in hashes: continue hashes.add(h) # Yield the result. yield link # End if there are no more results. if not soup.find(id='nav'): break # Prepare the URL for the next request. start += num if num == 10: url = url_next_page % vars() else: url = url_next_page_num % vars()
Search the given query string using Google. @type query: str @param query: Query string. Must NOT be url-encoded. @type tld: str @param tld: Top level domain. @type lang: str @param lang: Languaje. @type num: int @param num: Number of results per page. @type start: int @param start: First result to retrieve. @type stop: int @param stop: Last result to retrieve. Use C{None} to keep searching forever. @type pause: float @param pause: Lapse to wait between HTTP requests. A lapse too long will make the search slow, but a lapse too short may cause Google to block your IP. Your mileage may vary! @type only_standard: bool @param only_standard: If C{True}, only returns the standard results from each page. If C{False}, it returns every possible link from each page, except for those that point back to Google itself. Defaults to C{False} for backwards compatibility with older versions of this module. @rtype: generator @return: Generator (iterator) that yields found URLs. If the C{stop} parameter is C{None} the iterator will loop forever.
def get_last_traded_dt(self, asset, dt): """ Get the latest day on or before ``dt`` in which ``asset`` traded. If there are no trades on or before ``dt``, returns ``pd.NaT``. Parameters ---------- asset : zipline.asset.Asset The asset for which to get the last traded day. dt : pd.Timestamp The dt at which to start searching for the last traded day. Returns ------- last_traded : pd.Timestamp The day of the last trade for the given asset, using the input dt as a vantage point. """ sid_ix = self.sids.searchsorted(asset.sid) # Used to get a slice of all dates up to and including ``dt``. dt_limit_ix = self.dates.searchsorted(dt.asm8, side='right') # Get the indices of all dates with nonzero volume. nonzero_volume_ixs = np.ravel( np.nonzero(self._country_group[DATA][VOLUME][sid_ix, :dt_limit_ix]) ) if len(nonzero_volume_ixs) == 0: return pd.NaT return pd.Timestamp(self.dates[nonzero_volume_ixs][-1], tz='UTC')
Get the latest day on or before ``dt`` in which ``asset`` traded. If there are no trades on or before ``dt``, returns ``pd.NaT``. Parameters ---------- asset : zipline.asset.Asset The asset for which to get the last traded day. dt : pd.Timestamp The dt at which to start searching for the last traded day. Returns ------- last_traded : pd.Timestamp The day of the last trade for the given asset, using the input dt as a vantage point.
def list_container_instance_groups(access_token, subscription_id, resource_group): '''List the container groups in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON list of container groups and their properties. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', resource_group, '/providers/Microsoft.ContainerInstance/ContainerGroups', '?api-version=', CONTAINER_API]) return do_get(endpoint, access_token)
List the container groups in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON list of container groups and their properties.
def pipeline(pipe=None, name=None, autoexec=False, exit_handler=None): """ This is the foundational function for all of redpipe. Everything goes through here. create pipelines, nest pipelines, get pipelines for a specific name. It all happens here. Here's a simple example: .. code:: python with pipeline() as pipe: pipe.set('foo', 'bar') foo = pipe.get('foo') pipe.execute() print(foo) > bar Now let's look at how we can nest a pipeline. .. code:: python def process(key, pipe=None): with pipeline(pipe, autoexec=True) as pipe: return pipe.incr(key) with pipeline() as pipe: key1 = process('key1', pipe) key2 = process('key2', pipe) pipe.execute() print([key1, key2]) > [1, 1] :param pipe: a Pipeline() or NestedPipeline() object, or None :param name: str, optional. the name of the connection to use. :param autoexec: bool, if true, implicitly execute the pipe :return: Pipeline or NestedPipeline """ if pipe is None: return Pipeline(name=name, autoexec=autoexec, exit_handler=exit_handler) try: if pipe.supports_redpipe_pipeline(): return NestedPipeline( parent=pipe, name=name, autoexec=autoexec, exit_handler=exit_handler ) except AttributeError: pass raise InvalidPipeline('check your configuration')
This is the foundational function for all of redpipe. Everything goes through here. create pipelines, nest pipelines, get pipelines for a specific name. It all happens here. Here's a simple example: .. code:: python with pipeline() as pipe: pipe.set('foo', 'bar') foo = pipe.get('foo') pipe.execute() print(foo) > bar Now let's look at how we can nest a pipeline. .. code:: python def process(key, pipe=None): with pipeline(pipe, autoexec=True) as pipe: return pipe.incr(key) with pipeline() as pipe: key1 = process('key1', pipe) key2 = process('key2', pipe) pipe.execute() print([key1, key2]) > [1, 1] :param pipe: a Pipeline() or NestedPipeline() object, or None :param name: str, optional. the name of the connection to use. :param autoexec: bool, if true, implicitly execute the pipe :return: Pipeline or NestedPipeline
def lookup(instruction, instructions = None): """Looks up instruction, which can either be a function or a string. If it's a string, returns the corresponding method. If it's a function, returns the corresponding name. """ if instructions is None: instructions = default_instructions if isinstance(instruction, str): return instructions[instruction] elif hasattr(instruction, "__call__"): rev = dict(((v,k) for (k,v) in instructions.items())) return rev[instruction] else: raise errors.MachineError(KeyError("Unknown instruction: %s" % str(instruction)))
Looks up instruction, which can either be a function or a string. If it's a string, returns the corresponding method. If it's a function, returns the corresponding name.
def _compute_value(power, wg): """Return the weight corresponding to single power.""" if power not in wg: p1, p2 = power # y power if p1 == 0: yy = wg[(0, -1)] wg[power] = numpy.power(yy, p2 / 2).sum() / len(yy) # x power else: xx = wg[(-1, 0)] wg[power] = numpy.power(xx, p1 / 2).sum() / len(xx) return wg[power]
Return the weight corresponding to single power.
def __precision(y_true, y_pred): ''' Precision metric tolerant to unlabeled data in y_true, NA values are ignored for the precision calculation ''' # make copies of the arrays to avoid modifying the original ones y_true = np.copy(y_true) y_pred = np.copy(y_pred) # precision = tp/(tp+fp) # True nehatives do not affect precision value, so for every missing # value in y_true, replace it with 0 and also replace the value # in y_pred with 0 is_nan = np.isnan(y_true) y_true[is_nan] = 0 y_pred[is_nan] = 0 precision = precision_score(y_true, y_pred) return precision
Precision metric tolerant to unlabeled data in y_true, NA values are ignored for the precision calculation
def resetaA(self,pot=None,type=None): """ NAME: resetaA PURPOSE: re-set up an actionAngle module for this Orbit INPUT: (none) OUTPUT: True if reset happened, False otherwise HISTORY: 2014-01-06 - Written - Bovy (IAS) """ try: delattr(self._orb,'_aA') except AttributeError: return False else: return True
NAME: resetaA PURPOSE: re-set up an actionAngle module for this Orbit INPUT: (none) OUTPUT: True if reset happened, False otherwise HISTORY: 2014-01-06 - Written - Bovy (IAS)
def _add_nic_to_mapping(self, net, dom, nic): """ Populates the given net spec mapping entry with the nics of the given domain, by the following rules: * If ``net`` is management, 'domain_name': nic_ip * For each interface: 'domain_name-eth#': nic_ip, where # is the index of the nic in the *domain* definition. * For each interface: 'domain_name-net_name-#': nic_ip, where # is a running number of interfaces from that network. * For each interface: 'domain_name-net_name', which has an identical IP to 'domain_name-net_name-0' Args: net (dict): Network spec to populate dom (dict): libvirt domain specification nic (str): Name of the interface to add to the net mapping from the domain Returns: None """ dom_name = dom['name'] idx = dom['nics'].index(nic) name = '{0}-eth{1}'.format(dom_name, idx) net['mapping'][name] = nic['ip'] if dom['nics'][idx]['net'] == dom['mgmt_net']: net['mapping'][dom_name] = nic['ip'] name_by_net = '{0}-{1}'.format(dom_name, nic['net']) named_nets = sorted( [ net_name for net_name in net['mapping'].keys() if net_name.startswith(name_by_net) and net_name != name_by_net ] ) if len(named_nets) == 0: named_idx = 0 net['mapping'][name_by_net] = nic['ip'] else: named_idx = len(named_nets) named_net = '{0}-{1}'.format(name_by_net, named_idx) net['mapping'][named_net] = nic['ip']
Populates the given net spec mapping entry with the nics of the given domain, by the following rules: * If ``net`` is management, 'domain_name': nic_ip * For each interface: 'domain_name-eth#': nic_ip, where # is the index of the nic in the *domain* definition. * For each interface: 'domain_name-net_name-#': nic_ip, where # is a running number of interfaces from that network. * For each interface: 'domain_name-net_name', which has an identical IP to 'domain_name-net_name-0' Args: net (dict): Network spec to populate dom (dict): libvirt domain specification nic (str): Name of the interface to add to the net mapping from the domain Returns: None
def default_absorbers(Tatm, ozone_file = 'apeozone_cam3_5_54.nc', verbose = True,): '''Initialize a dictionary of well-mixed radiatively active gases All values are volumetric mixing ratios. Ozone is set to a climatology. All other gases are assumed well-mixed: - CO2 - CH4 - N2O - O2 - CFC11 - CFC12 - CFC22 - CCL4 Specific values are based on the AquaPlanet Experiment protocols, except for O2 which is set the realistic value 0.21 (affects the RRTMG scheme). ''' absorber_vmr = {} absorber_vmr['CO2'] = 348. / 1E6 absorber_vmr['CH4'] = 1650. / 1E9 absorber_vmr['N2O'] = 306. / 1E9 absorber_vmr['O2'] = 0.21 absorber_vmr['CFC11'] = 0. absorber_vmr['CFC12'] = 0. absorber_vmr['CFC22'] = 0. absorber_vmr['CCL4'] = 0. # Ozone: start with all zeros, interpolate to data if we can xTatm = Tatm.to_xarray() O3 = 0. * xTatm if ozone_file is not None: ozonefilepath = os.path.join(os.path.dirname(__file__), 'data', 'ozone', ozone_file) remotepath_http = 'http://thredds.atmos.albany.edu:8080/thredds/fileServer/CLIMLAB/ozone/' + ozone_file remotepath_opendap = 'http://thredds.atmos.albany.edu:8080/thredds/dodsC/CLIMLAB/ozone/' + ozone_file ozonedata, path = load_data_source(local_path=ozonefilepath, remote_source_list=[remotepath_http, remotepath_opendap], open_method=xr.open_dataset, remote_kwargs={'engine':'pydap'}, verbose=verbose,) ## zonal and time average ozone_zon = ozonedata.OZONE.mean(dim=('time','lon')).transpose('lat','lev') if ('lat' in xTatm.dims): O3source = ozone_zon else: weight = np.cos(np.deg2rad(ozonedata.lat)) ozone_global = (ozone_zon * weight).mean(dim='lat') / weight.mean(dim='lat') O3source = ozone_global try: O3 = O3source.interp_like(xTatm) # There will be NaNs for gridpoints outside the ozone file domain assert not np.any(np.isnan(O3)) except: warnings.warn('Some grid points are beyond the bounds of the ozone file. Ozone values will be extrapolated.') try: # passing fill_value=None to the underlying scipy interpolator # will result in extrapolation instead of NaNs O3 = O3source.interp_like(xTatm, kwargs={'fill_value':None}) assert not np.any(np.isnan(O3)) except: warnings.warn('Interpolation of ozone data failed. Setting O3 to zero instead.') O3 = 0. * xTatm absorber_vmr['O3'] = O3.values return absorber_vmr
Initialize a dictionary of well-mixed radiatively active gases All values are volumetric mixing ratios. Ozone is set to a climatology. All other gases are assumed well-mixed: - CO2 - CH4 - N2O - O2 - CFC11 - CFC12 - CFC22 - CCL4 Specific values are based on the AquaPlanet Experiment protocols, except for O2 which is set the realistic value 0.21 (affects the RRTMG scheme).
def query_cast(value, answers, ignorecase = False): """A cast function for query Answers should look something like it does in query """ if ignorecase: value = value.lower() for item in answers: for a in item['values']: if ignorecase and (value == str(a).lower()): return item['values'][0] elif value == a: return item['values'][0] raise ValueError("Response '%s' not understood, please try again." % value)
A cast function for query Answers should look something like it does in query
def get_count(cls, date_field=None, start=None, end=None, filters={}): """ Build the DSL query for counting the number of items. :param date_field: field with the date :param start: date from which to start counting, should be a datetime.datetime object :param end: date until which to count items, should be a datetime.datetime object :param filters: dict with the filters to be applied :return: a DSL query with size parameter """ """ Total number of items """ query_basic = cls.__get_query_basic(date_field=date_field, start=start, end=end, filters=filters) # size=0 gives only the count and not the hits query = query_basic.extra(size=0) return query
Build the DSL query for counting the number of items. :param date_field: field with the date :param start: date from which to start counting, should be a datetime.datetime object :param end: date until which to count items, should be a datetime.datetime object :param filters: dict with the filters to be applied :return: a DSL query with size parameter
def async_(fn): """Wrap the given function into a coroutine function.""" @functools.wraps(fn) async def wrapper(*args, **kwargs): return await fn(*args, **kwargs) return wrapper
Wrap the given function into a coroutine function.
def create_atomic_wrapper(cls, wrapped_func): """Returns a wrapped function.""" def _create_atomic_wrapper(*args, **kwargs): """Actual wrapper.""" # When a view call fails due to a permissions error, it raises an exception. # An uncaught exception breaks the DB transaction for any following DB operations # unless it's wrapped in a atomic() decorator or context manager. with transaction.atomic(): return wrapped_func(*args, **kwargs) return _create_atomic_wrapper
Returns a wrapped function.
def get_dip(self): """ Return the fault dip as the average dip over the mesh. The average dip is defined as the weighted mean inclination of all the mesh cells. See :meth:`openquake.hazardlib.geo.mesh.RectangularMesh.get_mean_inclination_and_azimuth` :returns: The average dip, in decimal degrees. """ # uses the same approach as in simple fault surface if self.dip is None: mesh = self.mesh self.dip, self.strike = mesh.get_mean_inclination_and_azimuth() return self.dip
Return the fault dip as the average dip over the mesh. The average dip is defined as the weighted mean inclination of all the mesh cells. See :meth:`openquake.hazardlib.geo.mesh.RectangularMesh.get_mean_inclination_and_azimuth` :returns: The average dip, in decimal degrees.
def setup_matchedfltr_dax_generated(workflow, science_segs, datafind_outs, tmplt_banks, output_dir, injection_file=None, tags=None, link_to_tmpltbank=False, compatibility_mode=False): ''' Setup matched-filter jobs that are generated as part of the workflow. This module can support any matched-filter code that is similar in principle to lalapps_inspiral, but for new codes some additions are needed to define Executable and Job sub-classes (see jobutils.py). Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the coincidence jobs will be added to. science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances The list of times that are being analysed in this workflow. datafind_outs : pycbc.workflow.core.FileList An FileList of the datafind files that are needed to obtain the data used in the analysis. tmplt_banks : pycbc.workflow.core.FileList An FileList of the template bank files that will serve as input in this stage. output_dir : path The directory in which output will be stored. injection_file : pycbc.workflow.core.File, optional (default=None) If given the file containing the simulation file to be sent to these jobs on the command line. If not given no file will be sent. tags : list of strings (optional, default = []) A list of the tagging strings that will be used for all jobs created by this call to the workflow. An example might be ['BNSINJECTIONS'] or ['NOINJECTIONANALYSIS']. This will be used in output names. link_to_tmpltbank : boolean, optional (default=True) If this option is given, the job valid_times will be altered so that there will be one inspiral file for every template bank and they will cover the same time span. Note that this option must also be given during template bank generation to be meaningful. Returns ------- inspiral_outs : pycbc.workflow.core.FileList A list of output files written by this stage. This *will not* contain any intermediate products produced within this stage of the workflow. If you require access to any intermediate products produced at this stage you can call the various sub-functions directly. ''' if tags is None: tags = [] # Need to get the exe to figure out what sections are analysed, what is # discarded etc. This should *not* be hardcoded, so using a new executable # will require a bit of effort here .... cp = workflow.cp ifos = science_segs.keys() match_fltr_exe = os.path.basename(cp.get('executables','inspiral')) # Select the appropriate class exe_class = select_matchedfilter_class(match_fltr_exe) if link_to_tmpltbank: # Use this to ensure that inspiral and tmpltbank jobs overlap. This # means that there will be 1 inspiral job for every 1 tmpltbank and # the data read in by both will overlap as much as possible. (If you # ask the template bank jobs to use 2000s of data for PSD estimation # and the matched-filter jobs to use 4000s, you will end up with # twice as many matched-filter jobs that still use 4000s to estimate a # PSD but then only generate triggers in the 2000s of data that the # template bank jobs ran on. tmpltbank_exe = os.path.basename(cp.get('executables', 'tmpltbank')) link_exe_instance = select_tmpltbank_class(tmpltbank_exe) else: link_exe_instance = None # Set up class for holding the banks inspiral_outs = FileList([]) # Matched-filtering is done independently for different ifos, but might not be! # If we want to use multi-detector matched-filtering or something similar to this # it would probably require a new module for ifo in ifos: logging.info("Setting up matched-filtering for %s." %(ifo)) job_instance = exe_class(workflow.cp, 'inspiral', ifo=ifo, out_dir=output_dir, injection_file=injection_file, tags=tags) if link_exe_instance: link_job_instance = link_exe_instance(cp, 'tmpltbank', ifo=ifo, out_dir=output_dir, tags=tags) else: link_job_instance = None sngl_ifo_job_setup(workflow, ifo, inspiral_outs, job_instance, science_segs[ifo], datafind_outs, parents=tmplt_banks, allow_overlap=False, link_job_instance=link_job_instance, compatibility_mode=compatibility_mode) return inspiral_outs
Setup matched-filter jobs that are generated as part of the workflow. This module can support any matched-filter code that is similar in principle to lalapps_inspiral, but for new codes some additions are needed to define Executable and Job sub-classes (see jobutils.py). Parameters ----------- workflow : pycbc.workflow.core.Workflow The Workflow instance that the coincidence jobs will be added to. science_segs : ifo-keyed dictionary of ligo.segments.segmentlist instances The list of times that are being analysed in this workflow. datafind_outs : pycbc.workflow.core.FileList An FileList of the datafind files that are needed to obtain the data used in the analysis. tmplt_banks : pycbc.workflow.core.FileList An FileList of the template bank files that will serve as input in this stage. output_dir : path The directory in which output will be stored. injection_file : pycbc.workflow.core.File, optional (default=None) If given the file containing the simulation file to be sent to these jobs on the command line. If not given no file will be sent. tags : list of strings (optional, default = []) A list of the tagging strings that will be used for all jobs created by this call to the workflow. An example might be ['BNSINJECTIONS'] or ['NOINJECTIONANALYSIS']. This will be used in output names. link_to_tmpltbank : boolean, optional (default=True) If this option is given, the job valid_times will be altered so that there will be one inspiral file for every template bank and they will cover the same time span. Note that this option must also be given during template bank generation to be meaningful. Returns ------- inspiral_outs : pycbc.workflow.core.FileList A list of output files written by this stage. This *will not* contain any intermediate products produced within this stage of the workflow. If you require access to any intermediate products produced at this stage you can call the various sub-functions directly.
def calculate_statistics(self, start, stop): """Starts the statistics calculation. :param start: The left limit of the time window in percent. :param stop: The right limit of the time window in percent. .. note:: The calculation takes some time. Check the status byte to see when the operation is done. A running scan will be paused until the operation is complete. .. warning:: The SR850 will generate an error if the active display trace is not stored when the command is executed. """ cmd = 'STAT', Integer, Integer self._write(cmd, start, stop)
Starts the statistics calculation. :param start: The left limit of the time window in percent. :param stop: The right limit of the time window in percent. .. note:: The calculation takes some time. Check the status byte to see when the operation is done. A running scan will be paused until the operation is complete. .. warning:: The SR850 will generate an error if the active display trace is not stored when the command is executed.
def get_console_info(kernel32, handle): """Get information about this current console window (Windows only). https://github.com/Robpol86/colorclass/blob/ab42da59/colorclass/windows.py#L111 :raise OSError: When handle is invalid or GetConsoleScreenBufferInfo API call fails. :param ctypes.windll.kernel32 kernel32: Loaded kernel32 instance. :param int handle: stderr or stdout handle. :return: Width (number of characters) and height (number of lines) of the terminal. :rtype: tuple """ if handle == INVALID_HANDLE_VALUE: raise OSError('Invalid handle.') # Query Win32 API. lpcsbi = ctypes.create_string_buffer(22) # Populated by GetConsoleScreenBufferInfo. if not kernel32.GetConsoleScreenBufferInfo(handle, lpcsbi): raise ctypes.WinError() # Subclass of OSError. # Parse data. left, top, right, bottom = struct.unpack('hhhhHhhhhhh', lpcsbi.raw)[5:-2] width, height = right - left, bottom - top return width, height
Get information about this current console window (Windows only). https://github.com/Robpol86/colorclass/blob/ab42da59/colorclass/windows.py#L111 :raise OSError: When handle is invalid or GetConsoleScreenBufferInfo API call fails. :param ctypes.windll.kernel32 kernel32: Loaded kernel32 instance. :param int handle: stderr or stdout handle. :return: Width (number of characters) and height (number of lines) of the terminal. :rtype: tuple
def dns_name(self): """Get the DNS name for this machine. This is a best guess based on the addresses available in current data. May return None if no suitable address is found. """ for scope in ['public', 'local-cloud']: addresses = self.safe_data['addresses'] or [] addresses = [address for address in addresses if address['scope'] == scope] if addresses: return addresses[0]['value'] return None
Get the DNS name for this machine. This is a best guess based on the addresses available in current data. May return None if no suitable address is found.
def content_upload(self, key, model, contentid, data, mimetype): """Store the given data as a result of a query for content id given the model. This method maps to https://github.com/exosite/docs/tree/master/provision#post---upload-content Args: key: The CIK or Token for the device model: contentid: The ID used to name the entity bucket data: The data blob to save mimetype: The Content-Type to use when serving the blob later """ headers = {"Content-Type": mimetype} path = PROVISION_MANAGE_CONTENT + model + '/' + contentid return self._request(path, key, data, 'POST', self._manage_by_cik, headers)
Store the given data as a result of a query for content id given the model. This method maps to https://github.com/exosite/docs/tree/master/provision#post---upload-content Args: key: The CIK or Token for the device model: contentid: The ID used to name the entity bucket data: The data blob to save mimetype: The Content-Type to use when serving the blob later
def get_window_by_caption(caption): """ finds the window by caption and returns handle (int) """ try: hwnd = win32gui.FindWindow(None, caption) return hwnd except Exception as ex: print('error calling win32gui.FindWindow ' + str(ex)) return -1
finds the window by caption and returns handle (int)
def _virial(self, T): """Virial coefficient Parameters ---------- T : float Temperature [K] Returns ------- prop : dict Dictionary with residual adimensional helmholtz energy: * B: ∂fir/∂δ|δ->0 * C: ∂²fir/∂δ²|δ->0 """ Tc = self._constants.get("Tref", self.Tc) tau = Tc/T B = C = 0 delta = 1e-200 # Polinomial terms nr1 = self._constants.get("nr1", []) d1 = self._constants.get("d1", []) t1 = self._constants.get("t1", []) for n, d, t in zip(nr1, d1, t1): B += n*d*delta**(d-1)*tau**t C += n*d*(d-1)*delta**(d-2)*tau**t # Exponential terms nr2 = self._constants.get("nr2", []) d2 = self._constants.get("d2", []) g2 = self._constants.get("gamma2", []) t2 = self._constants.get("t2", []) c2 = self._constants.get("c2", []) for n, d, g, t, c in zip(nr2, d2, g2, t2, c2): B += n*exp(-g*delta**c)*delta**(d-1)*tau**t*(d-g*c*delta**c) C += n*exp(-g*delta**c)*(delta**(d-2)*tau**t*( (d-g*c*delta**c)*(d-1-g*c*delta**c)-g**2*c**2*delta**c)) # Gaussian terms nr3 = self._constants.get("nr3", []) d3 = self._constants.get("d3", []) t3 = self._constants.get("t3", []) a3 = self._constants.get("alfa3", []) e3 = self._constants.get("epsilon3", []) b3 = self._constants.get("beta3", []) g3 = self._constants.get("gamma3", []) for n, d, t, a, e, b, g in zip(nr3, d3, t3, a3, e3, b3, g3): B += n*delta**d*tau**t*exp(-a*(delta-e)**2-b*(tau-g)**2)*( d/delta-2*a*(delta-e)) C += n*tau**t*exp(-a*(delta-e)**2-b*(tau-g)**2)*( -2*a*delta**d+4*a**2*delta**d*( delta-e)**2-4*d*a*delta**2*( delta-e)+d*2*delta) # Non analitic terms nr4 = self._constants.get("nr4", []) a4 = self._constants.get("a4", []) b4 = self._constants.get("b4", []) Ai = self._constants.get("A", []) Bi = self._constants.get("B", []) Ci = self._constants.get("C", []) Di = self._constants.get("D", []) bt4 = self._constants.get("beta4", []) for n, a, b, A, B, C, D, bt in zip(nr4, a4, b4, Ai, Bi, Ci, Di, bt4): Tita = (1-tau)+A*((delta-1)**2)**(0.5/bt) Delta = Tita**2+B*((delta-1)**2)**a Deltad = (delta-1)*(A*Tita*2/bt*((delta-1)**2)**( 0.5/bt-1)+2*B*a*((delta-1)**2)**(a-1)) Deltadd = Deltad/(delta-1) + (delta-1)**2*( 4*B*a*(a-1)*((delta-1)**2)**(a-2) + 2*A**2/bt**2*(((delta-1)**2)**(0.5/bt-1))**2 + A*Tita*4/bt*(0.5/bt-1)*((delta-1)**2)**(0.5/bt-2)) DeltaBd = b*Delta**(b-1)*Deltad DeltaBdd = b*(Delta**(b-1)*Deltadd+(b-1)*Delta**(b-2)*Deltad**2) F = exp(-C*(delta-1)**2-D*(tau-1)**2) Fd = -2*C*F*(delta-1) Fdd = 2*C*F*(2*C*(delta-1)**2-1) B += n*(Delta**b*(F+delta*Fd)+DeltaBd*delta*F) C += n*(Delta**b*(2*Fd+delta*Fdd)+2*DeltaBd*(F+delta*Fd) + DeltaBdd*delta*F) prop = {} prop["B"] = B prop["C"] = C return prop
Virial coefficient Parameters ---------- T : float Temperature [K] Returns ------- prop : dict Dictionary with residual adimensional helmholtz energy: * B: ∂fir/∂δ|δ->0 * C: ∂²fir/∂δ²|δ->0
def get_quant_NAs(quantdata, quantheader): """Takes quantdata in a dict and header with quantkeys (eg iTRAQ isotopes). Returns dict of quant intensities with missing keys set to NA.""" out = {} for qkey in quantheader: out[qkey] = quantdata.get(qkey, 'NA') return out
Takes quantdata in a dict and header with quantkeys (eg iTRAQ isotopes). Returns dict of quant intensities with missing keys set to NA.
def evict(self, key): """ Evicts the specified key from this map. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), key to evict. :return: (bool), ``true`` if the key is evicted, ``false`` otherwise. """ check_not_none(key, "key can't be None") key_data = self._to_data(key) return self._evict_internal(key_data)
Evicts the specified key from this map. **Warning: This method uses __hash__ and __eq__ methods of binary form of the key, not the actual implementations of __hash__ and __eq__ defined in key's class.** :param key: (object), key to evict. :return: (bool), ``true`` if the key is evicted, ``false`` otherwise.
def method_delegate(**methods): """ Construct a renderer that delegates based on the request's HTTP method. """ methods = {k.upper(): v for k, v in iteritems(methods)} if PY3: methods = {k.encode("utf-8"): v for k, v in iteritems(methods)} def render(request): renderer = methods.get(request.method) if renderer is None: return Response(code=405) return renderer(request) return render
Construct a renderer that delegates based on the request's HTTP method.
def foreign_key(model_or_table_name_or_column_name: Union[str, Type[Model]], model_or_table_name: Optional[Union[str, Type[Model]]] = None, *, fk_col: str = 'id', primary_key: bool = False, **kwargs, ) -> Column: """Helper method to add a foreign key column to a model. For example:: class Post(Model): category_id = foreign_key('Category') category = relationship('Category', back_populates='posts') Is equivalent to:: class Post(Model): category_id = Column(BigInteger, ForeignKey('category.id'), nullable=False) category = relationship('Category', back_populates='posts') :param model_or_table_name_or_column_name: If two arguments are given, then this is treated as the column name. Otherwise, it's treated as the table name (see docs for model_or_table_name) :param model_or_table_name: the model or table name to link to If given a lowercase string, it's treated as an explicit table name. If there are any uppercase characters, it's assumed to be a model name, and will be converted to snake case using the same automatic conversion as Flask-SQLAlchemy does itself. If given a subclass of :class:`flask_sqlalchemy.Model`, use its :attr:`__tablename__` attribute. :param str fk_col: column name of the primary key (defaults to "id") :param bool primary_key: Whether or not this Column is a primary key :param dict kwargs: any other kwargs to pass the Column constructor """ column_name = model_or_table_name_or_column_name if model_or_table_name is None: column_name = None model_or_table_name = model_or_table_name_or_column_name table_name = model_or_table_name if inspect.isclass(model_or_table_name): table_name = model_or_table_name.__tablename__ elif table_name != table_name.lower(): table_name = snake_case(model_or_table_name) args = [column_name] if column_name else [] args += [BigInteger, ForeignKey(f'{table_name}.{fk_col}')] return Column(*args, primary_key=primary_key, **kwargs)
Helper method to add a foreign key column to a model. For example:: class Post(Model): category_id = foreign_key('Category') category = relationship('Category', back_populates='posts') Is equivalent to:: class Post(Model): category_id = Column(BigInteger, ForeignKey('category.id'), nullable=False) category = relationship('Category', back_populates='posts') :param model_or_table_name_or_column_name: If two arguments are given, then this is treated as the column name. Otherwise, it's treated as the table name (see docs for model_or_table_name) :param model_or_table_name: the model or table name to link to If given a lowercase string, it's treated as an explicit table name. If there are any uppercase characters, it's assumed to be a model name, and will be converted to snake case using the same automatic conversion as Flask-SQLAlchemy does itself. If given a subclass of :class:`flask_sqlalchemy.Model`, use its :attr:`__tablename__` attribute. :param str fk_col: column name of the primary key (defaults to "id") :param bool primary_key: Whether or not this Column is a primary key :param dict kwargs: any other kwargs to pass the Column constructor
def new_code_cell(input=None, prompt_number=None, outputs=None, language=u'python', collapsed=False, metadata=None): """Create a new code cell with input and output""" cell = NotebookNode() cell.cell_type = u'code' if language is not None: cell.language = unicode(language) if input is not None: cell.input = unicode(input) if prompt_number is not None: cell.prompt_number = int(prompt_number) if outputs is None: cell.outputs = [] else: cell.outputs = outputs if collapsed is not None: cell.collapsed = bool(collapsed) cell.metadata = NotebookNode(metadata or {}) return cell
Create a new code cell with input and output
def archive_insert_data(self, data_dump): ''' :param data: Archive table data :type data: list[archive] :raises: IOError ''' with self.session as session: try: data = [self.tables.archive(**entry) for entry in data_dump] session.add_all(data) session.commit() except SQLAlchemyError as exc: session.rollback() print_exc() raise IOError(exc)
:param data: Archive table data :type data: list[archive] :raises: IOError
def remove_log_action(portal): """Removes the old Log action from types """ logger.info("Removing Log Tab ...") portal_types = api.get_tool("portal_types") for name in portal_types.listContentTypes(): ti = portal_types[name] actions = map(lambda action: action.id, ti._actions) for index, action in enumerate(actions): if action == "log": logger.info("Removing Log Action for {}".format(name)) ti.deleteActions([index]) break logger.info("Removing Log Tab [DONE]")
Removes the old Log action from types
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'pronunciation') and self.pronunciation is not None: _dict['pronunciation'] = self.pronunciation return _dict
Return a json dictionary representing this model.
def _matrix_input_from_dict2d(matrix): """makes input for running clearcut on a matrix from a dict2D object""" #clearcut truncates names to 10 char- need to rename before and #reassign after #make a dict of env_index:full name int_keys = dict([('env_' + str(i), k) for i,k in \ enumerate(sorted(matrix.keys()))]) #invert the dict int_map = {} for i in int_keys: int_map[int_keys[i]] = i #make a new dict2D object with the integer keys mapped to values instead of #the original names new_dists = [] for env1 in matrix: for env2 in matrix[env1]: new_dists.append((int_map[env1], int_map[env2], matrix[env1][env2])) int_map_dists = Dict2D(new_dists) #names will be fed into the phylipTable function - it is the int map names names = sorted(int_map_dists.keys()) rows = [] #populated rows with values based on the order of names #the following code will work for a square matrix only for index, key1 in enumerate(names): row = [] for key2 in names: row.append(str(int_map_dists[key1][key2])) rows.append(row) input_matrix = phylipMatrix(rows, names) #input needs a trailing whitespace or it will fail! input_matrix += '\n' return input_matrix, int_keys
makes input for running clearcut on a matrix from a dict2D object
def add_data_field(self, name, i1, i2, subfields_dict): """ Add new datafield into :attr:`datafields` and take care of OAI MARC differencies. Args: name (str): Name of datafield. i1 (char): Value of i1/ind1 parameter. i2 (char): Value of i2/ind2 parameter. subfields_dict (dict): Dictionary containing subfields (as list). `subfields_dict` is expected to be in this format:: { "field_id": ["subfield data",], ... "z": ["X0456b"] } Warning: For your own good, use OrderedDict for `subfields_dict`, or constructor's `resort` parameter set to ``True`` (it is by default). Warning: ``field_id`` can be only one character long! """ if i1 not in self.valid_i_chars: raise ValueError("Invalid i1 parameter '" + i1 + "'!") if i2 not in self.valid_i_chars: raise ValueError("Invalid i2 parameter '" + i2 + "'!") if len(name) != 3: raise ValueError( "`name` parameter have to be exactly 3 chars long!" ) if not subfields_dict: raise ValueError( "`subfields_dict` have to contain something!" ) if not isinstance(subfields_dict, dict): raise ValueError( "`subfields_dict` parameter has to be dict instance!" ) # check local keys, convert strings to MARCSubrecord instances subrecords = [] for key, val in subfields_dict.items(): if len(key) > 1: raise KeyError( "`subfields_dict` can be only one character long!" ) # convert other values to lists if not isinstance(val, list): val = [val] subfields = map( lambda x: MARCSubrecord(x, i1, i2, None), val ) subfields_dict[key] = subfields subrecords.extend(subfields) # save i/ind values subfields_dict[self.i1_name] = i1 subfields_dict[self.i2_name] = i2 # append dict, or add new dict into self.datafields if name in self.datafields: self.datafields[name].append(subfields_dict) else: self.datafields[name] = [subfields_dict] # to each subrecord add reference to list of all subfields in this # datafield other_subfields = self.datafields[name] for record in subrecords: record.other_subfields = other_subfields
Add new datafield into :attr:`datafields` and take care of OAI MARC differencies. Args: name (str): Name of datafield. i1 (char): Value of i1/ind1 parameter. i2 (char): Value of i2/ind2 parameter. subfields_dict (dict): Dictionary containing subfields (as list). `subfields_dict` is expected to be in this format:: { "field_id": ["subfield data",], ... "z": ["X0456b"] } Warning: For your own good, use OrderedDict for `subfields_dict`, or constructor's `resort` parameter set to ``True`` (it is by default). Warning: ``field_id`` can be only one character long!
def commits(self, branch, since=0, to=int(time.time()) + 86400): """For given branch return a list of commits. Each commit contains basic information about itself. :param branch: git branch :type branch: [str]{} :param since: minimal timestamp for commit's commit date :type since: int :param to: maximal timestamp for commit's commit date :type to: int """ since_str = datetime.datetime.fromtimestamp(since).strftime('%Y-%m-%d') to_str = datetime.datetime.fromtimestamp(to).strftime('%Y-%m-%d') commits = {} req_message = 'https://api.bitbucket.org/2.0/repositories/' + self.reponame + \ '/commits/' + branch loop_continue = True while loop_continue: response_data = self._bitbucketAPIRequest(req_message) for commit in response_data['values']: if commit['date'] < since_str: loop_continue = False break elif commit['date'] > to_str: continue else: commits[commit['hash']] = self._commitData(commit) if 'next' not in response_data: break req_message = response_data['next'] return commits
For given branch return a list of commits. Each commit contains basic information about itself. :param branch: git branch :type branch: [str]{} :param since: minimal timestamp for commit's commit date :type since: int :param to: maximal timestamp for commit's commit date :type to: int
def get_absolute_url(self): """Return a path""" from django.urls import NoReverseMatch if self.alternate_url: return self.alternate_url try: prefix = reverse('categories_tree_list') except NoReverseMatch: prefix = '/' ancestors = list(self.get_ancestors()) + [self, ] return prefix + '/'.join([force_text(i.slug) for i in ancestors]) + '/'
Return a path