code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def isUserCert(self, name): crtpath = self._getPathJoin('users', '%s.crt' % name) return os.path.isfile(crtpath)
Checks if a user certificate exists. Args: name (str): The name of the user keypair. Examples: Check if the user cert "myuser" exists: exists = cdir.isUserCert('myuser') Returns: bool: True if the certificate is present, False otherwise.
juraj-google-style
def _construct_context_for_args(args): global_default_context = google.datalab.Context.default() config = {} for key in global_default_context.config: config[key] = global_default_context.config[key] billing_tier_arg = args.get('billing', None) if billing_tier_arg: config['bigquery_billing_tier'] = billing_tier_arg return google.datalab.Context( project_id=global_default_context.project_id, credentials=global_default_context.credentials, config=config)
Construct a new Context for the parsed arguments. Args: args: the dictionary of magic arguments. Returns: A new Context based on the current default context, but with any explicitly specified arguments overriding the default's config.
juraj-google-style
def disassemble(qobj): run_config = qobj.config.to_dict() user_qobj_header = qobj.header.to_dict() circuits = _experiments_to_circuits(qobj) return (circuits, run_config, user_qobj_header)
Dissasemble a qobj and return the circuits, run_config, and user header Args: qobj (Qobj): The input qobj object to dissasemble Returns: circuits (list): A list of quantum circuits run_config (dict): The dist of the run config user_qobj_header (dict): The dict of any user headers in the qobj
codesearchnet
def create(self, resource, id=None, timeout=(- 1)): if (not id): available_id = self.__get_first_available_id() uri = ('%s/%s' % (self.URI, str(available_id))) else: uri = ('%s/%s' % (self.URI, str(id))) return self._client.create(resource, uri=uri, timeout=timeout)
Adds the specified trap forwarding destination. The trap destination associated with the specified id will be created if trap destination with that id does not exists. The id can only be an integer greater than 0. Args: resource (dict): Object to create. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: Created resource.
codesearchnet
def read(self, vals): i = 0 if len(vals[i]) == 0: self.city = None else: self.city = vals[i] i += 1 if len(vals[i]) == 0: self.state_province_region = None else: self.state_province_region = vals[i] i += 1 if len(vals[i]) == 0: self.country = None else: self.country = vals[i] i += 1 if len(vals[i]) == 0: self.source = None else: self.source = vals[i] i += 1 if len(vals[i]) == 0: self.wmo = None else: self.wmo = vals[i] i += 1 if len(vals[i]) == 0: self.latitude = None else: self.latitude = vals[i] i += 1 if len(vals[i]) == 0: self.longitude = None else: self.longitude = vals[i] i += 1 if len(vals[i]) == 0: self.timezone = None else: self.timezone = vals[i] i += 1 if len(vals[i]) == 0: self.elevation = None else: self.elevation = vals[i] i += 1
Read values. Args: vals (list): list of strings representing values
juraj-google-style
def get_saved_issue_data(self, issue, namespace='open'): if isinstance(issue, int): issue_number = str(issue) elif isinstance(issue, basestring): issue_number = issue else: issue_number = issue.number issue_data_key = self._issue_data_key(namespace) issue_data = self.data.get(issue_data_key, {}) _data = issue_data.get(str(issue_number), {}) issue_data[str(issue_number)] = _data return _data
Returns issue data from local data. Args: issue: `int`. Github issue number. namespace: `str`. Namespace for storing this issue.
codesearchnet
def get_nets_arin(self, response): nets = [] pattern = re.compile('^NetRange:[^\\S\\n]+(.+)$', re.MULTILINE) temp = pattern.search(response) net_range = None net_range_start = None if (temp is not None): net_range = temp.group(1).strip() net_range_start = temp.start() for match in re.finditer('^CIDR:[^\\S\\n]+(.+?,[^\\S\\n].+|.+)$', response, re.MULTILINE): try: net = copy.deepcopy(BASE_NET) if (len(nets) > 0): temp = pattern.search(response, match.start()) net_range = None net_range_start = None if (temp is not None): net_range = temp.group(1).strip() net_range_start = temp.start() if (net_range is not None): if ((net_range_start < match.start()) or (len(nets) > 0)): try: net['range'] = ('{0} - {1}'.format(ip_network(net_range)[0].__str__(), ip_network(net_range)[(- 1)].__str__()) if ('/' in net_range) else net_range) except ValueError: net['range'] = net_range net['cidr'] = ', '.join([ip_network(c.strip()).__str__() for c in match.group(1).split(', ')]) net['start'] = match.start() net['end'] = match.end() nets.append(net) except ValueError: pass return nets
The function for parsing network blocks from ARIN whois data. Args: response (:obj:`str`): The response from the ARIN whois server. Returns: list of dict: Mapping of networks with start and end positions. :: [{ 'cidr' (str) - The network routing block 'start' (int) - The starting point of the network 'end' (int) - The endpoint point of the network }]
codesearchnet
def get_tool_variants(self, tool_name): variants = set() tools_dict = self.get_tools(request_only=False) for variant, tools in tools_dict.itervalues(): if tool_name in tools: variants.add(variant) return variants
Get the variant(s) that provide the named tool. If there are more than one variants, the tool is in conflict, and Rez does not know which variant's tool is actually exposed. Args: tool_name(str): Name of the tool to search for. Returns: Set of `Variant` objects. If no variant provides the tool, an empty set is returned.
juraj-google-style
def _build(self, inputs): if nest.is_sequence(inputs): merged_tensors = [self._merge(tensor) for tensor in nest.flatten(inputs)] return nest.pack_sequence_as(inputs, merged_tensors) return self._merge(inputs)
Connects the MergeDims module into the graph. Args: inputs: Tensor or a nested list of Tensors to merge. Its rank must be greater than or equal to `start` + `size`. Returns: The merged Tensor or a nested list of merged Tensors. Raises: ValueError: If any of the `inputs` tensors has insufficient rank.
juraj-google-style
def to_timestamp(dt, timestamp): if dt.tzinfo: raise TypeError('Cannot store a timezone aware datetime. ' 'Convert to UTC and store the naive datetime.') timestamp.seconds = calendar.timegm(dt.timetuple()) timestamp.nanos = dt.microsecond * _NANOS_PER_MICRO
Convert datetime to google.protobuf.Timestamp. Args: dt: a timezone naive datetime. timestamp: a google.protobuf.Timestamp to populate. Raises: TypeError: if a timezone aware datetime was provided.
juraj-google-style
def update_memo(self, task_id, task, r): if ((not self.memoize) or (not task['memoize'])): return if (task['hashsum'] in self.memo_lookup_table): logger.info(('Updating appCache entry with latest %s:%s call' % (task['func_name'], task_id))) self.memo_lookup_table[task['hashsum']] = r else: self.memo_lookup_table[task['hashsum']] = r
Updates the memoization lookup table with the result from a task. Args: - task_id (int): Integer task id - task (dict) : A task dict from dfk.tasks - r (Result future): Result future A warning is issued when a hash collision occurs during the update. This is not likely.
codesearchnet
def get_matching_text_in_strs(a, b, match_min_size=30, ignore='', end_characters=''): compare = difflib.SequenceMatcher(lambda x: x in ignore) compare.set_seqs(a=a, b=b) matching_text = list() for match in compare.get_matching_blocks(): start = match.a text = a[start: start+match.size] if end_characters: prev_text = text while len(text) != 0 and text[0] in end_characters: text = text[1:] while len(text) != 0 and text[-1] not in end_characters: text = text[:-1] if len(text) == 0: text = prev_text if len(text) >= match_min_size: matching_text.append(text) return matching_text
Returns a list of matching blocks of text in a and b Args: a (str): First string to match b (str): Second string to match match_min_size (int): Minimum block size to match on. Defaults to 30. ignore (str): Any characters to ignore in matching. Defaults to ''. end_characters (str): End characters to look for. Defaults to ''. Returns: List[str]: List of matching blocks of text
juraj-google-style
def forward(self, inference_args=None, input_tangents=None): del inference_args if input_tangents: raise errors.InternalError('unexpectedly got forwardprop information in a class that does not support forwardprop.') return self._inference_function
A forward function with only user-specified outputs. The call operation for the returned inference function can be rewritten into a forward function. This only happens if the backward function (from the `backward` method) ends up being used to compute gradients. This approach avoids constructing unnecessary graphs, but it only works if we are calling this function when not executing eagerly. Args: inference_args: A flat list of Tensors, arguments to the inference function. Unused, but taken for compatibility with _TapeGradientFunctions. input_tangents: A flat list of Tensors, jvps associated with `inference_args`. Unused; if required, tape functions must be used instead. Returns: An atomic_function.AtomicFunction.
github-repos
def set_evaluation_parameter(self, parameter_name, parameter_value): if 'evaluation_parameters' not in self._expectations_config: self._expectations_config['evaluation_parameters'] = {} self._expectations_config['evaluation_parameters'].update( {parameter_name: parameter_value})
Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate parameterized expectations. Args: parameter_name (string): The name of the kwarg to be replaced at evaluation time parameter_value (any): The value to be used
juraj-google-style
def parse_auth(cls, entries, raise_on_error=False): conf = {} for (registry, entry) in six.iteritems(entries): if (not isinstance(entry, dict)): log.debug('Config entry for key {0} is not auth config'.format(registry)) if raise_on_error: raise errors.InvalidConfigFile('Invalid configuration for registry {0}'.format(registry)) return {} if ('identitytoken' in entry): log.debug('Found an IdentityToken entry for registry {0}'.format(registry)) conf[registry] = {'IdentityToken': entry['identitytoken']} continue if ('auth' not in entry): log.debug('Auth data for {0} is absent. Client might be using a credentials store instead.'.format(registry)) conf[registry] = {} continue (username, password) = decode_auth(entry['auth']) log.debug('Found entry (registry={0}, username={1})'.format(repr(registry), repr(username))) conf[registry] = {'username': username, 'password': password, 'email': entry.get('email'), 'serveraddress': registry} return conf
Parses authentication entries Args: entries: Dict of authentication entries. raise_on_error: If set to true, an invalid format will raise InvalidConfigFile Returns: Authentication registry.
codesearchnet
def add_arg_scope(func): @functools.wraps(func) def func_with_args(*args, **kwargs): current_scope = _current_arg_scope() current_args = kwargs key_func = (func.__module__, func.__name__) if key_func in current_scope: current_args = current_scope[key_func].copy() current_args.update(kwargs) return func(*args, **current_args) _add_op(func) return func_with_args
Decorates a function with args so it can be used within an arg_scope. Args: func: function to decorate. Returns: A tuple with the decorated function func_with_args().
juraj-google-style
def remove(self, id): before_len = len(self.model.db) self.model.db = [t for t in self.model.db if t["id"] != id] if not self._batch.enable.is_set(): self.model.save_db() return before_len - len(self.model.db)
Remove a object by id Args: id (int): Object's id should be deleted Returns: len(int): affected rows
juraj-google-style
def compute_mask(self, inputs, mask=None): if not self.supports_masking: if any((m is not None for m in nest.flatten(mask))): raise TypeError('Layer ' + self.name + ' does not support masking, but was passed an input_mask: ' + str(mask)) return None return mask
Computes an output mask tensor. Args: inputs: Tensor or list of tensors. mask: Tensor or list of tensors. Returns: None or a tensor (or list of tensors, one per output tensor of the layer).
github-repos
def extract_async(self, destination, format='csv', csv_delimiter=None, csv_header=True, compress=False): format = format.upper() if (format == 'JSON'): format = 'NEWLINE_DELIMITED_JSON' if ((format == 'CSV') and (csv_delimiter is None)): csv_delimiter = ',' try: response = self._api.table_extract(self._name_parts, destination, format, compress, csv_delimiter, csv_header) return self._init_job_from_response(response) except Exception as e: raise google.datalab.JobError(location=traceback.format_exc(), message=str(e), reason=str(type(e)))
Starts a job to export the table to GCS. Args: destination: the destination URI(s). Can be a single URI or a list. format: the format to use for the exported data; one of 'csv', 'json', or 'avro' (default 'csv'). csv_delimiter: for CSV exports, the field delimiter to use. Defaults to ',' csv_header: for CSV exports, whether to include an initial header line. Default true. compress: whether to compress the data on export. Compression is not supported for AVRO format. Defaults to False. Returns: A Job object for the export Job if it was started successfully; else None.
codesearchnet
def rouge_2_fscore(predictions, labels, **unused_kwargs): outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) outputs = tf.squeeze(outputs, axis=[-1, -2]) labels = tf.squeeze(labels, axis=[-1, -2]) rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32) return rouge_2_f_score, tf.constant(1.0)
ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score.
juraj-google-style
def get_replacement_transform_for_applied_ptransform(self, applied_ptransform): return self.get_replacement_transform(applied_ptransform.transform)
Provides a runner specific override for a given `AppliedPTransform`. Args: applied_ptransform: `AppliedPTransform` containing the `PTransform` to be replaced. Returns: A `PTransform` that will be the replacement for the `PTransform` inside the `AppliedPTransform` given as an argument.
github-repos
def unused(node): cfg.forward(node, cfg.ReachingDefinitions()) unused_obj = Unused() unused_obj.visit(node) return unused_obj.unused
Find unused definitions that can be remove. This runs reaching definitions analysis followed by a walk over the AST to find all variable definitions that are not used later on. Args: node: The AST of e.g. a function body to find unused variable definitions. Returns: unused: After visiting all the nodes, this attribute contanis a set of definitions in the form of `(variable_name, node)` pairs which are unused in this AST.
juraj-google-style
def get_port_map(self, id_or_uri): uri = self._client.build_uri(id_or_uri) + self.PORT_MAP_PATH return self._client.get(id_or_uri=uri)
Use to get the drive enclosure I/O adapter port to SAS interconnect port connectivity. Args: id_or_uri: Can be either the resource ID or the resource URI. Returns: dict: Drive Enclosure Port Map
juraj-google-style
def converted_self(self): if self._converted_self is None: source = self._function or self._enclosing_graph self._converted_self = source.converted_self().nodes[self._node.name] return self._converted_self
The NodeDef to be converted. Returns: The NodeDef to be converted, which can come from either a graph for a function. Derived classes should call this (via 'super') to make sure the node is retrieved from the right place.
github-repos
def start(self, interval_s): if self.running: return False self.stopped.clear() def _execute(): if ((not self.method()) and self.stop_if_false): return while (not self.stopped.wait(interval_s)): if ((not self.method()) and self.stop_if_false): return self.thread = threading.Thread(target=_execute) self.thread.daemon = True self.thread.start() return True
Starts executing the method at the specified interval. Args: interval_s: The amount of time between executions of the method. Returns: False if the interval was already running.
codesearchnet
def add_backend_policy(self, json_data): env = boto3.session.Session(profile_name=self.env, region_name=self.region) elbclient = env.client('elb') for job in json.loads(json_data)['job']: for listener in job['listeners']: instance_port = listener['internalPort'] backend_policy_list = listener['backendPolicies'] if backend_policy_list: LOG.info('Adding backend server policies: %s', backend_policy_list) elbclient.set_load_balancer_policies_for_backend_server(LoadBalancerName=self.app, InstancePort=instance_port, PolicyNames=backend_policy_list)
Attaches backend server policies to an ELB Args: json_data (json): return data from ELB upsert
codesearchnet
def format_param_list(listed_params, output_name): output_payload = {} if listed_params: for index, item in enumerate(listed_params): output_payload[str(output_name) + "[" + str(index) + "]" ] = item return output_payload
Utility method for formatting lists of parameters for api consumption Useful for email address lists, etc Args: listed_params (list of values) - the list to format output_name (str) - the parameter name to prepend to each key
juraj-google-style
def load_extension(self, path, name_filter=None, class_filter=None, unique=False, component=None): import_name = None if (component is not None): import_name = _ensure_package_loaded(path, component) (name, ext) = _try_load_module(path, import_name=import_name) if ((name_filter is not None) and (name != name_filter)): return [] found = [(name, x) for x in self._filter_subclasses(ext, class_filter)] found = [(name, x) for (name, x) in found if self._filter_nonextensions(x)] if (not unique): return found if (len(found) > 1): raise ArgumentError(('Extension %s should have had exactly one instance of class %s, found %d' % (path, class_filter.__name__, len(found))), classes=found) elif (len(found) == 0): raise ArgumentError(('Extension %s had no instances of class %s' % (path, class_filter.__name__))) return found[0]
Load a single python module extension. This function is similar to using the imp module directly to load a module and potentially inspecting the objects it declares to filter them by class. Args: path (str): The path to the python file to load name_filter (str): If passed, the basename of the module must match name or nothing is returned. class_filter (type): If passed, only instance of this class are returned. unique (bool): If True (default is False), there must be exactly one object found inside this extension that matches all of the other criteria. component (IOTile): The component that this extension comes from if it is loaded from an installed component. This is used to properly import the extension as a submodule of the component's support package. Returns: list of (name, type): A list of the objects found at the extension path. If unique is True, then the list only contains a single entry and that entry will be directly returned.
codesearchnet
def save(self, config_loc=None): if (not os.path.exists(_USER_CONFIG_DIR)): 'create directory if not exists' os.makedirs(_USER_CONFIG_DIR) with open(_DEFAULT_PATH, 'w') as f: json.dump({'key': self._key, 'base_url': self._base_url, 'username': self._username}, f)
Saves current user credentials to user directory. Args: config_loc (str, optional): Location where credentials are to be stored. If no argument is provided, it will be send to the default location. Example: .. code:: from cartoframes import Credentials creds = Credentials(username='eschbacher', key='abcdefg') creds.save() # save to default location
codesearchnet
def checkPermissions(permissions=[], obj=None): if not obj: return False sm = getSecurityManager() for perm in permissions: if not sm.checkPermission(perm, obj): return '' return True
Checks if a user has permissions for a given object. Args: permissions: The permissions the current user must be compliant with obj: The object for which the permissions apply Returns: 1 if the user complies with all the permissions for the given object. Otherwise, it returns empty.
juraj-google-style
def get_extra_vars(): g = ops.get_default_graph() if isinstance(g, _FuncGraph): return g.extra_vars else: return []
Returns the captured variables by the function. Returns: If the default graph is being used to define a function, the returned list of variables are those created inside the function body so far. Otherwise, returns an empty list.
github-repos
def model_fn(hparams, seed): rng = random.Random(seed) model = tf.keras.models.Sequential() model.add(tf.keras.layers.Input(INPUT_SHAPE)) model.add(tf.keras.layers.Reshape((INPUT_SHAPE + (1,)))) conv_filters = 8 for _ in xrange(hparams[HP_CONV_LAYERS]): model.add(tf.keras.layers.Conv2D(filters=conv_filters, kernel_size=hparams[HP_CONV_KERNEL_SIZE], padding='same', activation='relu')) model.add(tf.keras.layers.MaxPool2D(pool_size=2, padding='same')) conv_filters *= 2 model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dropout(hparams[HP_DROPOUT], seed=rng.random())) dense_neurons = 32 for _ in xrange(hparams[HP_DENSE_LAYERS]): model.add(tf.keras.layers.Dense(dense_neurons, activation='relu')) dense_neurons *= 2 model.add(tf.keras.layers.Dense(OUTPUT_CLASSES, activation='softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer=hparams[HP_OPTIMIZER], metrics=['accuracy']) return model
Create a Keras model with the given hyperparameters. Args: hparams: A dict mapping hyperparameters in `HPARAMS` to values. seed: A hashable object to be used as a random seed (e.g., to construct dropout layers in the model). Returns: A compiled Keras model.
codesearchnet
def convert_elementwise_sub(params, w_name, scope_name, inputs, layers, weights, names): print('Converting elementwise_sub ...') model0 = layers[inputs[0]] model1 = layers[inputs[1]] if (names == 'short'): tf_name = ('S' + random_string(7)) elif (names == 'keep'): tf_name = w_name else: tf_name = (w_name + str(random.random())) sub = keras.layers.Subtract(name=tf_name) layers[scope_name] = sub([model0, model1])
Convert elementwise subtraction. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
codesearchnet
def ParseFileObject(self, parser_mediator, file_object): if not self._line_structures: raise errors.UnableToParseFile( 'Line structure undeclared, unable to proceed.') encoding = self._ENCODING or parser_mediator.codepage text_file_object = text_file.TextFile(file_object, encoding=encoding) try: line = self._ReadLine(text_file_object, max_len=self.MAX_LINE_LENGTH) except UnicodeDecodeError: raise errors.UnableToParseFile( 'Not a text file or encoding not supported.') if not line: raise errors.UnableToParseFile('Not a text file.') if len(line) == self.MAX_LINE_LENGTH or len( line) == self.MAX_LINE_LENGTH - 1: logger.debug(( 'Trying to read a line and reached the maximum allowed length of ' '{0:d}. The last few bytes of the line are: {1:s} [parser ' '{2:s}]').format( self.MAX_LINE_LENGTH, repr(line[-10:]), self.NAME)) if not self._IsText(line): raise errors.UnableToParseFile('Not a text file, unable to proceed.') if not self.VerifyStructure(parser_mediator, line): raise errors.UnableToParseFile('Wrong file structure.') consecutive_line_failures = 0 index = None self._current_offset = 0 while line: if parser_mediator.abort: break parsed_structure = None use_key = None for index, (key, structure) in enumerate(self._line_structures): try: parsed_structure = structure.parseString(line) except pyparsing.ParseException: pass if parsed_structure: use_key = key break if parsed_structure: self.ParseRecord(parser_mediator, use_key, parsed_structure) consecutive_line_failures = 0 if index is not None and index != 0: key_structure = self._line_structures.pop(index) self._line_structures.insert(0, key_structure) else: if len(line) > 80: line = '{0:s}...'.format(line[:77]) parser_mediator.ProduceExtractionWarning( 'unable to parse log line: {0:s} at offset: {1:d}'.format( repr(line), self._current_offset)) consecutive_line_failures += 1 if (consecutive_line_failures > self.MAXIMUM_CONSECUTIVE_LINE_FAILURES): raise errors.UnableToParseFile( 'more than {0:d} consecutive failures to parse lines.'.format( self.MAXIMUM_CONSECUTIVE_LINE_FAILURES)) self._current_offset = text_file_object.get_offset() try: line = self._ReadLine(text_file_object, max_len=self.MAX_LINE_LENGTH) except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning( 'unable to read and decode log line at offset {0:d}'.format( self._current_offset)) break
Parses a text file-like object using a pyparsing definition. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
juraj-google-style
def Write2000256List(self, arr): for item in arr: ba = bytearray(binascii.unhexlify(item)) ba.reverse() self.WriteBytes(ba)
Write an array of 64 byte items to the stream. Args: arr (list): a list of 2000 items of 64 bytes in size.
codesearchnet
def build_genotype(gt_call): gt_obj = dict( sample_id = gt_call['individual_id'], display_name = gt_call['display_name'], genotype_call = gt_call['genotype_call'], allele_depths = [gt_call['ref_depth'], gt_call['alt_depth']], read_depth = gt_call['read_depth'], genotype_quality = gt_call['genotype_quality'] ) return gt_obj
Build a genotype call Args: gt_call(dict) Returns: gt_obj(dict) gt_call = dict( sample_id = str, display_name = str, genotype_call = str, allele_depths = list, # int read_depth = int, genotype_quality = int, )
juraj-google-style
def get_supported_features_for_model_type(model_type: str, model_name: Optional[str]=None) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]: model_type = model_type.lower() if model_type not in FeaturesManager._SUPPORTED_MODEL_TYPE: model_type_and_model_name = f'{model_type} ({model_name})' if model_name else model_type raise KeyError(f'{model_type_and_model_name} is not supported yet. Only {list(FeaturesManager._SUPPORTED_MODEL_TYPE.keys())} are supported. If you want to support {model_type} please propose a PR or open up an issue.') return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type]
Tries to retrieve the feature -> OnnxConfig constructor map from the model type. Args: model_type (`str`): The model type to retrieve the supported features for. model_name (`str`, *optional*): The name attribute of the model object, only used for the exception message. Returns: The dictionary mapping each feature to a corresponding OnnxConfig constructor.
github-repos
def save_link(self, path_info): assert (path_info['scheme'] == 'local') path = path_info['path'] if (not os.path.exists(path)): return (mtime, _) = get_mtime_and_size(path) inode = get_inode(path) relpath = os.path.relpath(path, self.root_dir) cmd = 'REPLACE INTO {}(path, inode, mtime) VALUES ("{}", {}, "{}")'.format(self.LINK_STATE_TABLE, relpath, self._to_sqlite(inode), mtime) self._execute(cmd)
Adds the specified path to the list of links created by dvc. This list is later used on `dvc checkout` to cleanup old links. Args: path_info (dict): path info to add to the list of links.
codesearchnet
def Sign(message, private_key): hash = hashlib.sha256(binascii.unhexlify(message)).hexdigest() v, r, s = bitcoin.ecdsa_raw_sign(hash, private_key) rb = bytearray(r.to_bytes(32, 'big')) sb = bytearray(s.to_bytes(32, 'big')) sig = rb + sb return sig
Sign the message with the given private key. Args: message (str): message to be signed private_key (str): 32 byte key as a double digit hex string (e.g. having a length of 64) Returns: bytearray: the signature of the message.
juraj-google-style
def __init__(self, path=None): self.path = None if path is None: self.path = self.get_working_directory() else: self.path = path assert self.exists()
Initialize a new Vcs object for a repository located at `path`. If `path` is `None`, then `get_working_directory` is used to identify the path. Args: path (str) - optional. The path to the repo working directory.
juraj-google-style
def _ensure_tuple(item): if isinstance(item, tuple): return item elif isinstance(item, list): return tuple(item) elif isinstance(item, np.ndarray): return tuple(item.tolist()) else: raise NotImplementedError
Simply ensure that the passed item is a tuple. If it is not, then convert it if possible, or raise a NotImplementedError Args: item: the item that needs to become a tuple Returns: the item casted as a tuple Raises: NotImplementedError: if converting the given item to a tuple is not implemented.
juraj-google-style
def google_api(config, task): if config.verbose: print('GOOGLE_API', task['api'], task['version'], task['function']) api_call = {'auth': task['auth'], 'api': task['api'], 'version': task['version'], 'function': task['function'], 'iterate': task.get('iterate', False), 'limit': task.get('limit'), 'key': task.get('key', config.key), 'labels': task.get('labels'), 'headers': task.get('headers')} append = task.get('append') results = google_api_build_results(config, task['auth'], api_call, task.get('results', {})) errors = google_api_build_errors(config, task['auth'], api_call, task.get('errors', {})) if 'kwargs' in task: kwargs_list = task['kwargs'] if isinstance(task['kwargs'], (list, tuple)) else [task['kwargs']] elif 'kwargs_remote' in task: kwargs_list = get_rows(config, task['auth'], task['kwargs_remote'], as_object=True) else: kwargs_list = [{}] def google_api_combine(): for kwargs in kwargs_list: api_call['kwargs'] = kwargs google_api_initilaize(config, api_call, task.get('alias')) yield from google_api_execute(config, task['auth'], api_call, results, errors, append) if append: results['bigquery']['schema'].extend(append) return put_rows(config, task['auth'], results, google_api_combine())
Task handler for recipe, delegates all JSON parameters to functions. Executes the following steps: 1. Define the API call. 2. Define the results destination. 3. Define the error destination. The results table for BigQuery is created first as blank, this allows writes from multiple API calls to aggregate into a single table. The API call can be specified via kwargs or kwargs_remote. kwargs - hard coded values for the API call as a dictionary. kwargs_remote - values loaded from a source such as BigQuery. Args: None, all parameters are exposed via task. Returns: None, all data is read and written as a side effect. Raises: ValueError: If a required key in the recipe is missing.
github-repos
def gets(self, key, default=None, cas_default=None): defaults = (default, cas_default) return self._fetch_cmd(b'gets', [key], True).get(key, defaults)
The memcached "gets" command for one key, as a convenience. Args: key: str, see class docs for details. default: value that will be returned if the key was not found. cas_default: same behaviour as default argument. Returns: A tuple of (value, cas) or (default, cas_defaults) if the key was not found.
juraj-google-style
def _get_sql_args(parser, args=None): overrides = None if (args is None): tokens = [] elif isinstance(args, basestring): command_line = ' '.join(args.split('\n')) tokens = shlex.split(command_line) elif isinstance(args, dict): overrides = args tokens = [] else: tokens = args args = ({} if (parser is None) else vars(parser.parse_args(tokens))) if overrides: args.update(overrides) return {arg: value for (arg, value) in args.items() if (value is not None)}
Parse a set of %%sql arguments or get the default value of the arguments. Args: parser: the argument parser to use. args: the argument flags. May be a string or a list. If omitted the empty string is used so we can get the default values for the arguments. These are all used to override the arg parser. Alternatively args may be a dictionary, in which case it overrides the default values from the arg parser. Returns: A dictionary of argument names and values.
codesearchnet
def _get_resource_from_obj(self, resource): if isinstance(resource, str): if is_valid_uuid(resource) is False: raise HDXError('%s is not a valid resource id!' % resource) resource = hdx.data.resource.Resource.read_from_hdx(resource, configuration=self.configuration) elif isinstance(resource, dict): resource = hdx.data.resource.Resource(resource, configuration=self.configuration) if not isinstance(resource, hdx.data.resource.Resource): raise HDXError('Type %s cannot be added as a resource!' % type(resource).__name__) return resource
Add new or update existing resource in dataset with new metadata Args: resource (Union[hdx.data.resource.Resource,Dict,str]): Either resource id or resource metadata from a Resource object or a dictionary Returns: hdx.data.resource.Resource: Resource object
juraj-google-style
def dense_to_sparse(x, ignore_value=None, name=None): with tf.compat.v1.name_scope(name, 'dense_to_sparse', [x, ignore_value]): x = tf.convert_to_tensor(value=x, name='x') if (ignore_value is None): if (x.dtype.base_dtype == tf.string): ignore_value = '' else: ignore_value = x.dtype.as_numpy_dtype(0) ignore_value = tf.cast(ignore_value, x.dtype, name='ignore_value') indices = tf.where(tf.not_equal(x, ignore_value), name='indices') return tf.SparseTensor(indices=indices, values=tf.gather_nd(x, indices, name='values'), dense_shape=tf.shape(input=x, out_type=tf.int64, name='dense_shape'))
Converts dense `Tensor` to `SparseTensor`, dropping `ignore_value` cells. Args: x: A `Tensor`. ignore_value: Entries in `x` equal to this value will be absent from the return `SparseTensor`. If `None`, default value of `x` dtype will be used (e.g. '' for `str`, 0 for `int`). name: Python `str` prefix for ops created by this function. Returns: sparse_x: A `tf.SparseTensor` with the same shape as `x`. Raises: ValueError: when `x`'s rank is `None`.
codesearchnet
def with_extrapolation(points, noise, n_points): n_points = 10 return kalman_filter(extrapolate_points(points, n_points) + points, noise)[n_points:]
Smooths a set of points, but it extrapolates some points at the beginning Args: points (:obj:`list` of :obj:`Point`) noise (float): Expected noise, the higher it is the more the path will be smoothed. Returns: :obj:`list` of :obj:`Point`
juraj-google-style
def _save_model(self, epoch, logs): logs = logs or {} if isinstance(self.save_freq, int) or self.epochs_since_last_save >= self.period: logs = tf_utils.sync_to_numpy_or_python_type(logs) self.epochs_since_last_save = 0 filepath = self._get_file_path(epoch, logs) try: if self.save_best_only: current = logs.get(self.monitor) if current is None: logging.warning('Can save best model only with %s available, skipping.', self.monitor) elif self.monitor_op(current, self.best): if self.verbose > 0: print('\nEpoch %05d: %s improved from %0.5f to %0.5f, saving model to %s' % (epoch + 1, self.monitor, self.best, current, filepath)) self.best = current if self.save_weights_only: self.model.save_weights(filepath, overwrite=True, options=self._options) else: self.model.save(filepath, overwrite=True, options=self._options) elif self.verbose > 0: print('\nEpoch %05d: %s did not improve from %0.5f' % (epoch + 1, self.monitor, self.best)) else: if self.verbose > 0: print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath)) if self.save_weights_only: self.model.save_weights(filepath, overwrite=True, options=self._options) else: self.model.save(filepath, overwrite=True, options=self._options) self._maybe_remove_file() except IsADirectoryError as e: raise IOError('Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: {}'.format(filepath)) except IOError as e: if 'is a directory' in str(e.args[0]).lower(): raise IOError('Please specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: {}'.format(filepath)) raise e
Saves the model. Args: epoch: the epoch this iteration is in. logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
github-repos
def pull_doc(self, document): msg = self._protocol.create('PULL-DOC-REQ') reply = self._send_message_wait_for_reply(msg) if reply is None: raise RuntimeError("Connection to server was lost") elif reply.header['msgtype'] == 'ERROR': raise RuntimeError("Failed to pull document: " + reply.content['text']) else: reply.push_to_document(document)
Pull a document from the server, overwriting the passed-in document Args: document : (Document) The document to overwrite with server content. Returns: None
juraj-google-style
def MakeMixture(metapmf, name='mix'): mix = Pmf(name=name) for (pmf, p1) in metapmf.Items(): for (x, p2) in pmf.Items(): mix.Incr(x, (p1 * p2)) return mix
Make a mixture distribution. Args: metapmf: Pmf that maps from Pmfs to probs. name: string name for the new Pmf. Returns: Pmf object.
codesearchnet
def separate(df, column, into, sep='[\\W_]+', remove=True, convert=False, extra='drop', fill='right'): assert isinstance(into, (tuple, list)) if isinstance(sep, (tuple, list)): inds = ([0] + list(sep)) if (len(inds) > len(into)): if (extra == 'drop'): inds = inds[:(len(into) + 1)] elif (extra == 'merge'): inds = (inds[:len(into)] + [None]) else: inds = (inds + [None]) splits = df[column].map((lambda x: [(str(x)[slice(inds[i], inds[(i + 1)])] if (i < (len(inds) - 1)) else np.nan) for i in range(len(into))])) else: maxsplit = ((len(into) - 1) if (extra == 'merge') else 0) splits = df[column].map((lambda x: re.split(sep, x, maxsplit))) right_filler = (lambda x: (x + [np.nan for i in range((len(into) - len(x)))])) left_filler = (lambda x: ([np.nan for i in range((len(into) - len(x)))] + x)) if (fill == 'right'): splits = [right_filler(x) for x in splits] elif (fill == 'left'): splits = [left_filler(x) for x in splits] for (i, split_col) in enumerate(into): df[split_col] = [(x[i] if (not (x[i] == '')) else np.nan) for x in splits] if convert: df = convert_type(df, into) if remove: df.drop(column, axis=1, inplace=True) return df
Splits columns into multiple columns. Args: df (pandas.DataFrame): DataFrame passed in through the pipe. column (str, symbolic): Label of column to split. into (list): List of string names for new columns. Kwargs: sep (str or list): If a string, the regex string used to split the column. If a list, a list of integer positions to split strings on. remove (bool): Boolean indicating whether to remove the original column. convert (bool): Boolean indicating whether the new columns should be converted to the appropriate type. extra (str): either `'drop'`, where split pieces beyond the specified new columns are dropped, or `'merge'`, where the final split piece contains the remainder of the original column. fill (str): either `'right'`, where `np.nan` values are filled in the right-most columns for missing pieces, or `'left'` where `np.nan` values are filled in the left-most columns.
codesearchnet
def backend_config_to_configparser(config): def get_store(): return config.get('store') def get_day_start(): day_start = config.get('day_start') if day_start: day_start = day_start.strftime('%H:%M:%S') return day_start def get_fact_min_delta(): return text_type(config.get('fact_min_delta')) def get_tmpfile_path(): return text_type(config.get('tmpfile_path')) def get_db_engine(): return text_type(config.get('db_engine')) def get_db_path(): return text_type(config.get('db_path')) def get_db_host(): return text_type(config.get('db_host')) def get_db_port(): return text_type(config.get('db_port')) def get_db_name(): return text_type(config.get('db_name')) def get_db_user(): return text_type(config.get('db_user')) def get_db_password(): return text_type(config.get('db_password')) cp_instance = SafeConfigParser() cp_instance.add_section('Backend') cp_instance.set('Backend', 'store', get_store()) cp_instance.set('Backend', 'day_start', get_day_start()) cp_instance.set('Backend', 'fact_min_delta', get_fact_min_delta()) cp_instance.set('Backend', 'tmpfile_path', get_tmpfile_path()) cp_instance.set('Backend', 'db_engine', get_db_engine()) cp_instance.set('Backend', 'db_path', get_db_path()) cp_instance.set('Backend', 'db_host', get_db_host()) cp_instance.set('Backend', 'db_port', get_db_port()) cp_instance.set('Backend', 'db_name', get_db_name()) cp_instance.set('Backend', 'db_user', get_db_user()) cp_instance.set('Backend', 'db_password', get_db_password()) return cp_instance
Return a ConfigParser instance representing a given backend config dictionary. Args: config (dict): Dictionary of config key/value pairs. Returns: SafeConfigParser: SafeConfigParser instance representing config. Note: We do not provide *any* validation about mandatory values what so ever.
juraj-google-style
def set_attribute(self, key, value): if not isinstance(key, str) or not isinstance(value, str): raise ValueError("The arguments 'key' and 'value' must both be " "strings. Instead they are {} and {}.".format( key, value)) self.extra_data[key] = value
Add a key-value pair to the extra_data dict. This can be used to add attributes that are not available when ray.profile was called. Args: key: The attribute name. value: The attribute value.
juraj-google-style
def create(self, domain, type_name, search_command, body): return self._request(domain, type_name, search_command, 'POST', body)
Create entry in ThreatConnect Data Store Args: domain (string): One of 'local', 'organization', or 'system'. type_name (string): This is a free form index type name. The ThreatConnect API will use this resource verbatim. search_command (string): Search command to pass to ES. body (str): JSON serialized data.
juraj-google-style
def results_tc(self, key, value): if os.access(self.default_args.tc_out_path, os.W_OK): results_file = '{}/results.tc'.format(self.default_args.tc_out_path) else: results_file = 'results.tc' new = True open(results_file, 'a').close() with open(results_file, 'r+') as fh: results = '' for line in fh.read().strip().split('\n'): if (not line): continue try: (k, v) = line.split(' = ') except ValueError: (k, v) = line.split(' =') if (k == key): v = value new = False if (v is not None): results += '{} = {}\n'.format(k, v) if (new and (value is not None)): results += '{} = {}\n'.format(key, value) fh.seek(0) fh.write(results) fh.truncate()
Write data to results_tc file in TcEX specified directory. The TcEx platform support persistent values between executions of the App. This method will store the values for TC to read and put into the Database. Args: key (string): The data key to be stored. value (string): The data value to be stored.
codesearchnet
def plot_probabilities_histogram(Y_p, title=None): if (Y_p.ndim > 1): msg = f'Arg Y_p should be a 1-dimensional np.ndarray, not of shape {Y_p.shape}.' raise ValueError(msg) plt.hist(Y_p, bins=20) plt.xlim((0, 1.025)) plt.xlabel('Probability') plt.ylabel(' if isinstance(title, str): plt.title(title) plt.show()
Plot a histogram from a numpy array of probabilities Args: Y_p: An [n] or [n, 1] np.ndarray of probabilities (floats in [0,1])
codesearchnet
def _validate_representative_dataset(representative_dataset: rd.RepresentativeDatasetOrMapping, signature_keys: Collection[str]) -> None: if isinstance(representative_dataset, Mapping): if set(signature_keys) != set(representative_dataset.keys()): raise ValueError(f'The signature keys and the keys of representative dataset map do not match. Signature keys: {set(signature_keys)}, representative dataset map: {set(representative_dataset.keys())}.') elif len(signature_keys) > 1: raise ValueError(f'Representative dataset is not a mapping (got: {type(representative_dataset)}), but there is more than one signature key provided. Please provide a map of {{signature_key -> dataset}} with more than one signature key.')
Validates the representative dataset, based on the signature keys. Representative dataset can be provided in two different forms: a single instance of `RepresentativeDataset` or a map of signature key to the corresponding `RepresentativeDataset`. These have a relationship with `signature_keys`. This function validates the following conditions: * If `len(signature_keys) > 1`, then `representative_dataset` should be a mapping where the keys exactly match the elements in `signature_keys`. * If `len(signature_keys) == 1`, then both a mapping and a single instance of `RepresentativeDataset` are allowed. * This function also assumes `len(signature_keys) > 0`. Args: representative_dataset: A `RepresentativeDataset` or a map of string to `RepresentativeDataset` to be validated. signature_keys: A collection of strings that contains the signature keys, each identifying a `SignatureDef`. Raises: ValueError: Iff `representative_dataset` does not satisfy the conditions above.
github-repos
def print_info(self, capture): self.frame_offset += 1 ret, frame = capture.read() if ret: print('Capture Information') print('\tDimensions (HxW): {}x{}'.format(*frame.shape[0:2])) print('\tColor channels: {}'.format(frame.shape[2] if len(frame.shape) > 2 else 1)) print('\tColor range: {}-{}'.format(np.min(frame), np.max(frame))) print('\tdtype: {}'.format(frame.dtype)) else: print('No source found.')
Prints information about the unprocessed image. Reads one frame from the source to determine image colors, dimensions and data types. Args: capture: the source to read from.
juraj-google-style
def _zip_from_file_patterns(root, includes, excludes, follow_symlinks): logger.info('lambda: base directory: %s', root) files = list(_find_files(root, includes, excludes, follow_symlinks)) if (not files): raise RuntimeError('Empty list of files for Lambda payload. Check your include/exclude options for errors.') logger.info('lambda: adding %d files:', len(files)) for fname in files: logger.debug('lambda: + %s', fname) return _zip_files(files, root)
Generates a ZIP file in-memory from file search patterns. Args: root (str): base directory to list files from. includes (list[str]): inclusion patterns. Only files matching those patterns will be included in the result. excludes (list[str]): exclusion patterns. Files matching those patterns will be excluded from the result. Exclusions take precedence over inclusions. follow_symlinks (bool): If true, symlinks will be included in the resulting zip file See Also: :func:`_zip_files`, :func:`_find_files`. Raises: RuntimeError: when the generated archive would be empty.
codesearchnet
def notify_txn_invalid(self, txn_id, message=None, extended_data=None): invalid_txn_info = {'id': txn_id} if (message is not None): invalid_txn_info['message'] = message if (extended_data is not None): invalid_txn_info['extended_data'] = extended_data with self._lock: for (batch_id, txn_ids) in self._batch_info.items(): if (txn_id in txn_ids): if (batch_id not in self._invalid): self._invalid[batch_id] = [invalid_txn_info] else: self._invalid[batch_id].append(invalid_txn_info) self._pending.discard(batch_id) self._update_observers(batch_id, ClientBatchStatus.INVALID) return
Adds a batch id to the invalid cache along with the id of the transaction that was rejected and any error message or extended data. Removes that batch id from the pending set. The cache is only temporary, and the batch info will be purged after one hour. Args: txn_id (str): The id of the invalid batch message (str, optional): Message explaining why batch is invalid extended_data (bytes, optional): Additional error data
codesearchnet
def load(cls, path: str, password: str=None) -> 'Account': with open(path) as f: keystore = json.load(f) if (not check_keystore_json(keystore)): raise ValueError('Invalid keystore file') return Account(keystore, password, path=path)
Load an account from a keystore file. Args: path: full path to the keyfile password: the password to decrypt the key file or `None` to leave it encrypted
codesearchnet
def splitext2(filepath): (root, filename) = os.path.split(safepath(filepath)) (filename, ext) = os.path.splitext(safepath(filename)) return (root, filename, ext)
Split filepath into root, filename, ext Args: filepath (str, path): file path Returns: str
codesearchnet
def _GetRowValue(self, query_hash, row, value_name): keys_name_to_index_map = self._keys_per_query.get(query_hash, None) if (not keys_name_to_index_map): keys_name_to_index_map = {name: index for (index, name) in enumerate(row.keys())} self._keys_per_query[query_hash] = keys_name_to_index_map value_index = keys_name_to_index_map.get(value_name) return row[value_index]
Retrieves a value from the row. Args: query_hash (int): hash of the query, that uniquely identifies the query that produced the row. row (sqlite3.Row): row. value_name (str): name of the value. Returns: object: value.
codesearchnet
def cartesian(self, subsets=None, step_pixels=100, max_distance_pixels=150, *args, **kwargs): n = Cartesian.read_cellframe(self, *args, subsets=subsets, step_pixels=step_pixels, max_distance_pixels=max_distance_pixels, prune_neighbors=False, **kwargs) if ('measured_regions' in kwargs): n.measured_regions = kwargs['measured_regions'] else: n.measured_regions = self.get_measured_regions() if ('measured_phenotypes' in kwargs): n.measured_phenotypes = kwargs['measured_phenotypes'] else: n.measured_phenotypes = self.phenotypes n.microns_per_pixel = self.microns_per_pixel return n
Return a class that can be used to create honeycomb plots Args: subsets (list): list of SubsetLogic objects step_pixels (int): distance between hexagons max_distance_pixels (int): the distance from each point by which to caclulate the quanitty of the phenotype for that area Returns: Cartesian: returns a class that holds the layout of the points to plot.
codesearchnet
def rank(input, name=None): return rank_internal(input, name, optimize=True)
Returns the rank of a tensor. See also `tf.shape`. Returns a 0-D `int32` `Tensor` representing the rank of `input`. For example: ```python # shape of tensor 't' is [2, 2, 3] t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]) tf.rank(t) # 3 ``` **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank of a tensor is the number of indices required to uniquely select each element of the tensor. Rank is also known as "order", "degree", or "ndims." Args: input: A `Tensor` or `SparseTensor`. name: A name for the operation (optional). Returns: A `Tensor` of type `int32`. @compatibility(numpy) Equivalent to np.ndim @end_compatibility
github-repos
def get_valid_build_systems(working_dir, package=None): from rez.plugin_managers import plugin_manager from rez.exceptions import PackageMetadataError try: package = (package or get_developer_package(working_dir)) except PackageMetadataError: pass if package: if (getattr(package, 'build_command', None) is not None): buildsys_name = 'custom' else: buildsys_name = getattr(package, 'build_system', None) if buildsys_name: cls = plugin_manager.get_plugin_class('build_system', buildsys_name) return [cls] clss = [] for buildsys_name in get_buildsys_types(): cls = plugin_manager.get_plugin_class('build_system', buildsys_name) if cls.is_valid_root(working_dir, package=package): clss.append(cls) child_clss = set((x.child_build_system() for x in clss)) clss = list((set(clss) - child_clss)) return clss
Returns the build system classes that could build the source in given dir. Args: working_dir (str): Dir containing the package definition and potentially build files. package (`Package`): Package to be built. This may or may not be needed to determine the build system. For eg, cmake just has to look for a CMakeLists.txt file, whereas the 'build_command' package field must be present for the 'custom' build system type. Returns: List of class: Valid build system class types.
codesearchnet
def router_id(self, **kwargs): router_id = kwargs.pop('router_id') rbridge_id = kwargs.pop('rbridge_id', '1') callback = kwargs.pop('callback', self._callback) rid_args = dict(rbridge_id=rbridge_id, router_id=router_id) config = self._rbridge.rbridge_id_ip_rtm_config_router_id(**rid_args) return callback(config)
Configures device's Router ID. Args: router_id (str): Router ID for the device. rbridge_id (str): The rbridge ID of the device on which BGP will be configured in a VCS fabric. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `router_id` is not specified. Examples: >>> import pynos.device >>> conn = ('10.24.39.211', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.system.router_id(router_id='10.24.39.211', ... rbridge_id='225') ... dev.system.router_id() # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError
codesearchnet
def process_actions(self, actions): notices = {} notification_contacts = {} for action in actions: resource = action['resource'] action_status = ActionStatus.SUCCEED try: if (action['action'] == AuditActions.REMOVE): action_status = self.process_action(resource, AuditActions.REMOVE) if (action_status == ActionStatus.SUCCEED): db.session.delete(action['issue'].issue) elif (action['action'] == AuditActions.STOP): action_status = self.process_action(resource, AuditActions.STOP) if (action_status == ActionStatus.SUCCEED): action['issue'].update({'missing_tags': action['missing_tags'], 'notes': action['notes'], 'last_alert': action['last_alert'], 'state': action['action']}) elif (action['action'] == AuditActions.FIXED): db.session.delete(action['issue'].issue) elif (action['action'] == AuditActions.ALERT): action['issue'].update({'missing_tags': action['missing_tags'], 'notes': action['notes'], 'last_alert': action['last_alert'], 'state': action['action']}) db.session.commit() if (action_status == ActionStatus.SUCCEED): for owner in [dict(t) for t in {tuple(d.items()) for d in (action['owners'] + self.permanent_emails)}]: if (owner['value'] not in notification_contacts): contact = NotificationContact(type=owner['type'], value=owner['value']) notification_contacts[owner['value']] = contact notices[contact] = {'fixed': [], 'not_fixed': []} else: contact = notification_contacts[owner['value']] if (action['action'] == AuditActions.FIXED): notices[contact]['fixed'].append(action) else: notices[contact]['not_fixed'].append(action) except Exception as ex: self.log.exception('Unexpected error while processing resource {}/{}/{}/{}'.format(action['resource'].account.account_name, action['resource'].id, action['resource'], ex)) return notices
Process the actions we want to take Args: actions (`list`): List of actions we want to take Returns: `list` of notifications
codesearchnet
def __request_finish(self, queue_item, new_requests, request_failed=False): if self.__stopping: return del self.__threads[queue_item.get_hash()] if request_failed: new_queue_items = [] self.queue.move(queue_item, QueueItem.STATUS_ERRORED) else: self.routing.increase_route_count(queue_item.request) new_queue_items = self.__add_scraped_requests_to_queue(queue_item, new_requests) self.queue.move(queue_item, QueueItem.STATUS_FINISHED) try: action = self.__options.callbacks.request_after_finish(self.queue, queue_item, new_queue_items) except Exception as e: action = None print(e) print(traceback.format_exc()) queue_item.decompose() if action == CrawlerActions.DO_STOP_CRAWLING: self.__should_stop = True if action == CrawlerActions.DO_CONTINUE_CRAWLING or action is None: self.__should_spawn_new_requests = True
Called when the crawler finished the given queue item. Args: queue_item (:class:`nyawc.QueueItem`): The request/response pair that finished. new_requests list(:class:`nyawc.http.Request`): All the requests that were found during this request. request_failed (bool): True if the request failed (if needs to be moved to errored).
juraj-google-style
def __init__(self, env_id): self.env_id = env_id self.env = gym.make(env_id)
Initialize OpenAI universe environment. Args: env_id: string with id/descriptor of the universe environment, e.g. 'HarvestDay-v0'.
juraj-google-style
def updateParams(self, newvalues): for (param, value) in newvalues.items(): if (param not in self.model.freeparams): raise RuntimeError("Can't handle param: {0}".format(param)) if newvalues: self.model.updateParams(newvalues) self._updateInternals() self._paramsarray = None
Update model parameters and re-compute likelihoods. This method is the **only** acceptable way to update model parameters. The likelihood is re-computed as needed by this method. Args: `newvalues` (dict) A dictionary keyed by param name and with value as new value to set. Each parameter name must either be a valid model parameter (in `model.freeparams`).
codesearchnet
def _index_filter(index_data, filter_value, filter_operator, field_converter=None): filtered_data = [] if filter_operator == operator.eq: if field_converter is not None: filter_value = field_converter(filter_value) filtered_data = index_data.get(filter_value) else: for field, data_obj_list in index_data.items(): if field_converter is not None: field = field_converter(field) if filter_operator(field, filter_value): filtered_data.extend(data_obj_list) return filtered_data
Post Filter Args: index_data (dictionary): The indexed data for the provided field. field (string): The field to filter on. filter_value (string | list): The value to match. filter_operator (string): The operator for comparison. field_converter (method): A method used to convert the field before comparison. Returns: (list): Matching data objects
juraj-google-style
def unpack_iterator_input(iterator): try: next_element = iterator.get_next() except errors.OutOfRangeError: raise RuntimeError('Your dataset iterator ran out of data; Make sure that your dataset can generate required number of samples.') if isinstance(next_element, (list, tuple)): if len(next_element) not in [2, 3]: raise ValueError('Please provide model inputs as a list or tuple of 2 or 3 elements: (input, target) or (input, target, sample_weights) Received %s' % next_element) if len(next_element) == 2: x, y = next_element weights = None else: x, y, weights = next_element else: x = next_element y = None weights = None return (x, y, weights)
Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`. Args: iterator: Instance of a dataset iterator. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
github-repos
def delete_subscription(self, subscription_id): return self.client._delete((self.url + 'subscriptions/{}'.format(subscription_id)), headers=self.get_headers())
Unsubscribe, delete the relationship of the customer with the plan. Args: subscription_id: Identification of the subscription. Returns:
codesearchnet
def conformPadding(cls, chars): pad = chars if pad and pad[0] not in PAD_MAP: pad = cls.getPaddingChars(cls.getPaddingNum(pad)) return pad
Ensure alternate input padding formats are conformed to formats defined in PAD_MAP If chars is already a format defined in PAD_MAP, then it is returned unmodified. Example:: '#' -> '#' '@@@@' -> '@@@@' '%04d' -> '#' Args: chars (str): input padding chars Returns: str: conformed padding chars Raises: ValueError: If chars contains invalid padding characters
juraj-google-style
def ExamineEvent(self, mediator, event): event_data_type = getattr(event, 'data_type', '') if (event_data_type == 'windows:registry:service'): service = WindowsService.FromEvent(event) self._service_collection.AddService(service)
Analyzes an event and creates Windows Services as required. At present, this method only handles events extracted from the Registry. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event to examine.
codesearchnet
def parse_psqs(psqs_results_file): psqs_results = pd.read_csv(psqs_results_file, sep='\t', header=None) psqs_results['pdb_file'] = psqs_results[0].apply((lambda x: str(x).strip('./').strip('.pdb'))) psqs_results = psqs_results.rename(columns={1: 'psqs_local', 2: 'psqs_burial', 3: 'psqs_contact', 4: 'psqs_total'}).drop(0, axis=1) psqs_results['u_pdb'] = psqs_results['pdb_file'].apply((lambda x: (x.upper() if (len(x) == 4) else np.nan))) psqs_results['i_entry_name'] = psqs_results['pdb_file'].apply((lambda x: (x.split('_model1')[0] if (len(x) > 4) else np.nan))) psqs_results = psqs_results[pd.notnull(psqs_results.psqs_total)] return psqs_results
Parse a PSQS result file and returns a Pandas DataFrame of the results Args: psqs_results_file: Path to psqs results file Returns: Pandas DataFrame: Summary of PSQS results
codesearchnet
def _sorted_results(self, results_dicts): print('results dicts:', results_dicts) sorted_dict = sorted(results_dicts, key=(lambda k: k['start_time'])) results = [] for entry in sorted_dict: results.append(entry['dt']) return results
Sorts dict of results based on log start_time. Sorts the results and returns an array with only the values but sorted by oldest value first.value Args: results_dicts: List of result dicts Returns: List of only the time but sorted oldest first.
codesearchnet
def fuse_awq_modules(model, quantization_config): if isinstance(quantization_config, dict): quantization_config = AwqConfig.from_dict(quantization_config) backend = quantization_config.backend modules_to_fuse = get_modules_to_fuse(model, quantization_config) modules_to_not_convert = getattr(quantization_config, 'modules_to_not_convert', None) if backend == AwqBackendPackingMethod.AUTOAWQ: from awq.modules.fused.attn import QuantAttentionFused from awq.modules.fused.mlp import QuantFusedMLP from awq.modules.fused.norm import FasterTransformerRMSNorm else: raise ValueError('Fusing is only supported for the AutoAWQ backend') fused_attention_modules = [] for name, module in model.named_modules(): if modules_to_not_convert is not None: if any((module_name_to_not_convert in name for module_name_to_not_convert in modules_to_not_convert)): continue _fuse_awq_layernorm(modules_to_fuse['layernorm'], module, FasterTransformerRMSNorm) if quantization_config.version != 'ipex': _fuse_awq_mlp(model, name, modules_to_fuse['mlp'], module, QuantFusedMLP) else: logger.info('The IPEX version AWQ does not support fuse mlp for now.') attention_has_been_fused = _fuse_awq_attention_layers(model, module, modules_to_fuse, name, QuantAttentionFused) if attention_has_been_fused: fused_attention_modules.append(name.split('.')[0]) if len(fused_attention_modules) > 0: for module_name, module in model.named_modules(): if any((module_name in fused_attention_modules for fused_attention_parent_module in fused_attention_modules)): if hasattr(module, 'config') and hasattr(module.config, '_attn_implementation'): module.config._attn_implementation = 'custom' return model
Optionally fuse some modules in the model to speedup inference. Args: model (`~PreTrainedModel`): The model to fuse - note this model should have been converted into AWQ format beforehand. quantization_config (`Union[AwqConfig, dict]`): The quantization configuration to use.
github-repos
def dbmin_stddev(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `dbmin_stddev`'.format(value)) self._dbmin_stddev = value
Corresponds to IDD Field `dbmin_stddev` Standard deviation of extreme annual minimum dry-bulb temperature Args: value (float): value for IDD Field `dbmin_stddev` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def world_info(world_name, world_config=None, initial_indent='', next_indent=' '): if (world_config is None): for (config, _) in _iter_packages(): for world in config['maps']: if (world['name'] == world_name): world_config = world if (world_config is None): raise HolodeckException(("Couldn't find world " + world_name)) second_indent = (initial_indent + next_indent) agent_indent = (second_indent + next_indent) sensor_indent = (agent_indent + next_indent) print(initial_indent, world_config['name']) print(second_indent, 'Resolution:', world_config['window_width'], 'x', world_config['window_height']) print(second_indent, 'Agents:') for agent in world_config['agents']: print(agent_indent, 'Name:', agent['agent_name']) print(agent_indent, 'Type:', agent['agent_type']) print(agent_indent, 'Sensors:') for sensor in agent['sensors']: print(sensor_indent, sensor)
Gets and prints the information of a world. Args: world_name (str): the name of the world to retrieve information for world_config (dict optional): A dictionary containing the world's configuration. Will find the config if None. Defaults to None. initial_indent (str optional): This indent will apply to each output line. Defaults to "". next_indent (str optional): This indent will be applied within each nested line. Defaults to " ".
codesearchnet
def get_tests_from_description(name, descriptions, parsed=None): tests = [] if not parsed: parsed = [] description = descriptions.get(name, None) if not description: raise IpaUtilsException( 'Test description file with name: %s cannot be located.' % name ) if description in parsed: return tests parsed.append(description) test_data = get_yaml_config(description) if 'tests' in test_data: tests += test_data.get('tests') if 'include' in test_data: for description_name in test_data.get('include'): tests += get_tests_from_description( description_name, descriptions, parsed ) return tests
Recursively collect all tests in test description. Args: name (str): Yaml test description file name. descriptions (dict): Dict of test description name (key) and absolute file paths (value). parsed (list): List of description paths which have already been parsed to prevent infinte recursion. Returns: A list of expanded test files.
juraj-google-style
def _path_formatter(self, suffix): if (suffix.lower() == 'mirror'): path_items = [self.bucket, self.s3path] else: path_items = [self.bucket, self.s3path, suffix] path = '/'.join(path_items) s3_format = 's3: formatted_path = path.replace(' full_path = s3_format.format(formatted_path) return full_path
Format the s3 path properly. Args: suffix (str): suffix to add on to an s3 path Returns: str: formatted path
codesearchnet
def ParseFileObject(self, parser_mediator, file_object): olecf_file = pyolecf.file() olecf_file.set_ascii_codepage(parser_mediator.codepage) try: olecf_file.open_file_object(file_object) except IOError as exception: parser_mediator.ProduceExtractionWarning( 'unable to open file with error: {0!s}'.format(exception)) return root_item = olecf_file.root_item if not root_item: return item_names = [item.name for item in root_item.sub_items] item_names = frozenset(item_names) try: for plugin in self._plugins: if parser_mediator.abort: break if not plugin.REQUIRED_ITEMS.issubset(item_names): continue try: plugin.UpdateChainAndProcess(parser_mediator, root_item=root_item) except Exception as exception: parser_mediator.ProduceExtractionWarning(( 'plugin: {0:s} unable to parse OLECF file with error: ' '{1!s}').format(plugin.NAME, exception)) if self._default_plugin and not parser_mediator.abort: try: self._default_plugin.UpdateChainAndProcess( parser_mediator, root_item=root_item) except Exception as exception: parser_mediator.ProduceExtractionWarning(( 'plugin: {0:s} unable to parse OLECF file with error: ' '{1!s}').format(self._default_plugin.NAME, exception)) finally: olecf_file.close()
Parses an OLE Compound File (OLECF) file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object.
juraj-google-style
def get_structural_variant(self, variant): query = { 'chrom': variant['chrom'], 'end_chrom': variant['end_chrom'], 'sv_type': variant['sv_type'], '$and': [ {'pos_left': {'$lte': variant['pos']}}, {'pos_right': {'$gte': variant['pos']}}, ] } res = self.db.structural_variant.find(query).sort('pos_left',1) match = None distance = None closest_hit = None for hit in res: if hit['end_left'] > variant['end']: continue if hit['end_right'] < variant['end']: continue distance = (abs(variant['pos'] - (hit['pos_left'] + hit['pos_right'])/2) + abs(variant['end'] - (hit['end_left'] + hit['end_right'])/2)) if closest_hit is None: match = hit closest_hit = distance continue if distance < closest_hit: match = hit closest_hit = distance return match
Check if there are any overlapping sv clusters Search the sv variants with chrom start end_chrom end and sv_type Args: variant (dict): A variant dictionary Returns: variant (dict): A variant dictionary
juraj-google-style
def make_transaction(self): if self.pk: raise CannotRecreateTransactionOnRecurredCost('The transaction for this recurred cost has already been created. You cannot create it again.') amount = self.recurring_cost.get_amount(self.billing_cycle) if (not amount): return None self.transaction = Transaction.objects.create(description='Created by recurring cost', date=self.billing_cycle.date_range.lower) splits = self.recurring_cost.splits.all().split(amount) self.transaction.legs.add(Leg.objects.create(transaction=self.transaction, amount=Money(amount, self.recurring_cost.currency), account=self.recurring_cost.to_account)) for (split, split_amount) in splits: if split_amount: self.transaction.legs.add(Leg.objects.create(transaction=self.transaction, amount=Money((split_amount * (- 1)), self.recurring_cost.currency), account=split.from_account)) return self.transaction
Create the transaction for this RecurredCost May only be used to create the RecurredCost's initial transaction. Returns: Transaction: The created transaction, also assigned to self.transaction. None if the amount is zero.
codesearchnet
def metadata_path(self, m_path): if not m_path: self.metadata_dir = None self.metadata_file = None else: if not op.exists(m_path): raise OSError('{}: file does not exist!'.format(m_path)) if not op.dirname(m_path): self.metadata_dir = '.' else: self.metadata_dir = op.dirname(m_path) self.metadata_file = op.basename(m_path) self.update(parse_kegg_gene_metadata(self.metadata_path), overwrite=True)
Provide pointers to the paths of the metadata file Args: m_path: Path to metadata file
juraj-google-style
def parse_GSE(filepath): gpls = {} gsms = {} series_counter = 0 database = None metadata = {} gse_name = None with utils.smart_open(filepath) as soft: groupper = groupby(soft, (lambda x: x.startswith('^'))) for (is_new_entry, group) in groupper: if is_new_entry: (entry_type, entry_name) = __parse_entry(next(group)) logger.debug(('%s: %s' % (entry_type.upper(), entry_name))) if (entry_type == 'SERIES'): gse_name = entry_name series_counter += 1 if (series_counter > 1): raise Exception('GSE file should contain only one series entry!') (is_data, data_group) = next(groupper) message = 'The key is not False, probably there is an error in the SOFT file' assert (not is_data), message metadata = parse_metadata(data_group) elif (entry_type == 'SAMPLE'): (is_data, data_group) = next(groupper) gsms[entry_name] = parse_GSM(data_group, entry_name) elif (entry_type == 'PLATFORM'): (is_data, data_group) = next(groupper) gpls[entry_name] = parse_GPL(data_group, entry_name) elif (entry_type == 'DATABASE'): (is_data, data_group) = next(groupper) database_metadata = parse_metadata(data_group) database = GEODatabase(name=entry_name, metadata=database_metadata) else: logger.error(('Cannot recognize type %s' % entry_type)) gse = GSE(name=gse_name, metadata=metadata, gpls=gpls, gsms=gsms, database=database) return gse
Parse GSE SOFT file. Args: filepath (:obj:`str`): Path to GSE SOFT file. Returns: :obj:`GEOparse.GSE`: A GSE object.
codesearchnet
def _cell_magic(line, query): args = magic_arguments.parse_argstring(_cell_magic, line) params = [] if args.params is not None: try: params = _helpers.to_query_parameters( ast.literal_eval("".join(args.params)) ) except Exception: raise SyntaxError( "--params is not a correctly formatted JSON string or a JSON " "serializable dictionary" ) project = args.project or context.project client = bigquery.Client(project=project, credentials=context.credentials) bqstorage_client = _make_bqstorage_client( args.use_bqstorage_api or context.use_bqstorage_api, context.credentials ) job_config = bigquery.job.QueryJobConfig() job_config.query_parameters = params job_config.use_legacy_sql = args.use_legacy_sql query_job = _run_query(client, query, job_config) if not args.verbose: display.clear_output() result = query_job.to_dataframe(bqstorage_client=bqstorage_client) if args.destination_var: IPython.get_ipython().push({args.destination_var: result}) else: return result
Underlying function for bigquery cell magic Note: This function contains the underlying logic for the 'bigquery' cell magic. This function is not meant to be called directly. Args: line (str): "%%bigquery" followed by arguments as required query (str): SQL query to run Returns: pandas.DataFrame: the query results.
juraj-google-style
def overlay(self, dimensions=None, **kwargs): if dimensions is None: dimensions = self.kdims else: if not isinstance(dimensions, (list, tuple)): dimensions = [dimensions] dimensions = [self.get_dimension(d, strict=True) for d in dimensions] dims = [d for d in self.kdims if d not in dimensions] return self.groupby(dims, group_type=NdOverlay)
Group by supplied dimension(s) and overlay each group Groups data by supplied dimension(s) overlaying the groups along the dimension(s). Args: dimensions: Dimension(s) of dimensions to group by Returns: NdOverlay object(s) with supplied dimensions
juraj-google-style
def gradients(loss, variables): return gradients_module.gradients(loss, variables, colocate_gradients_with_ops=True)
Returns the gradients of `loss` w.r.t. `variables`. Args: loss: Scalar tensor to minimize. variables: List of variables. Returns: A gradients tensor.
github-repos
def has_entities(status): try: if (sum((len(v) for v in status.entities.values())) > 0): return True except AttributeError: if (sum((len(v) for v in status['entities'].values())) > 0): return True return False
Returns true if a Status object has entities. Args: status: either a tweepy.Status object or a dict returned from Twitter API
codesearchnet
def quarter_boundaries(quarter): (year, quarter) = quarter.split('Q') year = int(year) quarter = int(quarter) first_month_of_quarter = ((3 * quarter) - 2) last_month_of_quarter = (3 * quarter) first_day = date(year, first_month_of_quarter, 1) last_day = date(year, last_month_of_quarter, monthrange(year, last_month_of_quarter)[1]) return (first_day, last_day)
Returns first and last day of a quarter Args: quarter (str) quarter, in format '2015Q1' Returns: (tuple) datetime.dates for the first and last days of the quarter
codesearchnet
def gen_ref_docs(gen_index=False): try: from refdoc import generate_docs except ImportError as ex: msg = 'You need to install sphinx-refdoc if you want to generate code reference docs.' print(msg, file=sys.stderr) log.err('Exception: {}'.format(ex)) sys.exit((- 1)) pretend = context.get('pretend', False) docs_dir = conf.get_path('docs.path', 'docs') docs_ref_dir = os.path.join(docs_dir, 'ref') refdoc_paths = conf.get('docs.reference', []) if os.path.exists(docs_ref_dir): if (not pretend): log.info('Removing existing reference docs') shutil.rmtree(docs_ref_dir) else: log.info('Would remove old reference docs') args = {'out_dir': docs_ref_dir, 'verbose': context.get('verbose', 0)} if gen_index: args['gen_index'] = True pkg_paths = [conf.proj_path(p) for p in refdoc_paths] if (not pretend): log.info('Generating reference documentation') generate_docs(pkg_paths, **args) else: log.info('Would generate reference docs with the following params') shell.cprint('<90>{}', util.yaml_dump(args).rstrip()) shell.cprint('<90>paths:\n<34>{}', util.yaml_dump(pkg_paths).rstrip())
Generate reference documentation for the project. This will use **sphinx-refdoc** to generate the source .rst files for the reference documentation. Args: gen_index (bool): Set it to **True** if you want to generate the index file with the list of top-level packages. This is set to default as in most cases you only have one package per project so you can link directly to that package reference (and if index were generated sphinx would complain about file not included in toctree).
codesearchnet
def DeviceReadThread(hid_device): hid_device.run_loop_ref = cf.CFRunLoopGetCurrent() if (not hid_device.run_loop_ref): logger.error('Failed to get current run loop') return iokit.IOHIDDeviceScheduleWithRunLoop(hid_device.device_handle, hid_device.run_loop_ref, K_CF_RUNLOOP_DEFAULT_MODE) run_loop_run_result = K_CF_RUN_LOOP_RUN_TIMED_OUT while ((run_loop_run_result == K_CF_RUN_LOOP_RUN_TIMED_OUT) or (run_loop_run_result == K_CF_RUN_LOOP_RUN_HANDLED_SOURCE)): run_loop_run_result = cf.CFRunLoopRunInMode(K_CF_RUNLOOP_DEFAULT_MODE, 1000, False) if (run_loop_run_result != K_CF_RUN_LOOP_RUN_STOPPED): logger.error('Unexpected run loop exit code: %d', run_loop_run_result) iokit.IOHIDDeviceUnscheduleFromRunLoop(hid_device.device_handle, hid_device.run_loop_ref, K_CF_RUNLOOP_DEFAULT_MODE)
Binds a device to the thread's run loop, then starts the run loop. Args: hid_device: The MacOsHidDevice object The HID manager requires a run loop to handle Report reads. This thread function serves that purpose.
codesearchnet
def read_video_torchvision(video_path: str, sample_indices_fn: Callable, **kwargs): video, _, info = torchvision_io.read_video(video_path, start_pts=0.0, end_pts=None, pts_unit='sec', output_format='THWC') video_fps = info['video_fps'] total_num_frames = video.size(0) duration = total_num_frames / video_fps if video_fps else 0 metadata = VideoMetadata(total_num_frames=int(total_num_frames), fps=float(video_fps), duration=float(duration), video_backend='torchvision') indices = sample_indices_fn(metadata=metadata, **kwargs) video = video[indices].contiguous().numpy() metadata.frames_indices = indices return (video, metadata)
Decode the video with torchvision decoder. Args: video_path (`str`): Path to the video file. sample_indices_fn (`Callable`, *optional*): A callable function that will return indices at which the video should be sampled. If the video has to be loaded using by a different sampling technique than provided by `num_frames` or `fps` arguments, one should provide their own `sample_indices_fn`. If not provided, simple uniform sampling with fps is performed. Example: def sample_indices_fn(metadata, **kwargs): return np.linspace(0, metadata.total_num_frames - 1, num_frames, dtype=int) Returns: Tuple[`np.array`, `VideoMetadata`]: A tuple containing: - Numpy array of frames in RGB (shape: [num_frames, height, width, 3]). - `VideoMetadata` object.
github-repos
def view_as(self, cls: Type[PipelineOptionsT]) -> PipelineOptionsT: view = cls(self._flags) for option_name in view._visible_option_list(): if option_name not in self._all_options: self._all_options[option_name] = getattr(view._visible_options, option_name) view._all_options = self._all_options return view
Returns a view of current object as provided PipelineOption subclass. Example Usage:: options = PipelineOptions(['--runner', 'Direct', '--streaming']) standard_options = options.view_as(StandardOptions) if standard_options.streaming: # ... start a streaming job ... Note that options objects may have multiple views, and modifications of values in any view-object will apply to current object and other view-objects. Args: cls: PipelineOptions class or any of its subclasses. Returns: An instance of cls that is initialized using options contained in current object.
github-repos
def find_subclass_in_module(base_class, module): subclasses = find_subclasses_in_module([base_class], module) if len(subclasses) != 1: raise ValueError('Expected 1 subclass of %s per module, found %s.' % (base_class.__name__, [subclass.__name__ for subclass in subclasses])) return subclasses[0]
Finds the single subclass of the given base class in the given module. Args: base_class: class, the base class to look for a subclass of in the module. module: module, the module to look for the single subclass in. Returns: The single subclass of the given base class. Raises: ValueError: If the number of subclasses found was not exactly one.
github-repos
def _GetArgSpecInfo(fn): skip_arg = False if inspect.isclass(fn): skip_arg = True elif inspect.ismethod(fn): skip_arg = fn.__self__ is not None elif inspect.isbuiltin(fn): if not isinstance(fn.__self__, types.ModuleType): skip_arg = True elif not inspect.isfunction(fn): skip_arg = True return (fn, skip_arg)
Gives information pertaining to computing the ArgSpec of fn. Determines if the first arg is supplied automatically when fn is called. This arg will be supplied automatically if fn is a bound method or a class with an __init__ method. Also returns the function who's ArgSpec should be used for determining the calling parameters for fn. This may be different from fn itself if fn is a class with an __init__ method. Args: fn: The function or class of interest. Returns: A tuple with the following two items: fn: The function to use for determining the arg spec of this function. skip_arg: Whether the first argument will be supplied automatically, and hence should be skipped when supplying args from a Fire command.
github-repos
def _load_callwraps(packname, package): global _callwraps from acorn.config import settings from acorn.logging.descriptors import _obj_getattr spack = settings(packname) if spack is not None: if spack.has_section("callwrap"): wrappings = dict(spack.items("callwrap")) for fqdn, target in wrappings.items(): caller = _obj_getattr(package, target) _callwraps[fqdn] = caller
Loads the special call wrapping settings for functions in the specified package. This allows the result of the original method call to be cast as a different type, or passed to a different constructor before returning from the wrapped function. Args: packname (str): name of the package to get config settings for. package: actual package object.
juraj-google-style