code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __fetch_route53_zones(self): done = False marker = None zones = {} route53 = self.session.client('route53') try: while (not done): if marker: response = route53.list_hosted_zones(Marker=marker) else: response = route53.list_hosted_zones() if response['IsTruncated']: marker = response['NextMarker'] else: done = True for zone_data in response['HostedZones']: zones[get_resource_id('r53z', zone_data['Id'])] = {'name': zone_data['Name'].rstrip('.'), 'source': 'AWS/{}'.format(self.account), 'comment': (zone_data['Config']['Comment'] if ('Comment' in zone_data['Config']) else None), 'zone_id': zone_data['Id'], 'private_zone': zone_data['Config']['PrivateZone'], 'tags': self.__fetch_route53_zone_tags(zone_data['Id'])} return zones finally: del route53
Return a list of all DNS zones hosted in Route53 Returns: :obj:`list` of `dict`
codesearchnet
def prepare_partition_index(config: Config, chunk_size: t.Optional[int]=None) -> t.Iterator[t.Tuple[Config, t.List[Index]]]: dims = [range(len(config.selection[key])) for key in config.partition_keys] n_partitions = math.prod([len(d) for d in dims]) logger.info(f'Creating {n_partitions} partitions.') if chunk_size is None: chunk_size = 1000 for option_idx in ichunked(itertools.product(*dims), chunk_size): yield (config, list(option_idx))
Produce indexes over client parameters, partitioning over `partition_keys` This produces a Cartesian-Cross over the range of keys. For example, if the keys were 'year' and 'month', it would produce an iterable like: ( (0, 0), (0, 1), (0, 2), ...) After the indexes were converted back to keys, it would produce values like: ( ('2020', '01'), ('2020', '02'), ('2020', '03'), ...) Returns: An iterator of index tuples.
github-repos
def to(self, jid: str): if jid is not None and not isinstance(jid, str): raise TypeError("'to' MUST be a string") self._to = aioxmpp.JID.fromstr(jid) if jid is not None else None
Set jid of the receiver. Args: jid (str): the jid of the receiver.
juraj-google-style
def parameterized_truncated_normal(shape, means=0.0, stddevs=1.0, minvals=-2.0, maxvals=2.0, dtype=dtypes.float32, seed=None, name=None): with ops.name_scope(name, 'parameterized_truncated_normal', [shape, means, stddevs, minvals, maxvals]) as name: shape_tensor = shape_util.shape_tensor(shape) means_tensor = ops.convert_to_tensor(means, dtype=dtype, name='means') stddevs_tensor = ops.convert_to_tensor(stddevs, dtype=dtype, name='stddevs') minvals_tensor = ops.convert_to_tensor(minvals, dtype=dtype, name='minvals') maxvals_tensor = ops.convert_to_tensor(maxvals, dtype=dtype, name='maxvals') seed1, seed2 = random_seed.get_seed(seed) rnd = gen_random_ops.parameterized_truncated_normal(shape_tensor, means_tensor, stddevs_tensor, minvals_tensor, maxvals_tensor, seed=seed1, seed2=seed2) shape_util.maybe_set_static_shape(rnd, shape) return rnd
Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. means: A 0-D Tensor or Python value of type `dtype`. The mean of the truncated normal distribution. stddevs: A 0-D Tensor or Python value of type `dtype`. The standard deviation of the truncated normal distribution. minvals: A 0-D Tensor or Python value of type `dtype`. The minimum value of the truncated normal distribution. maxvals: A 0-D Tensor or Python value of type `dtype`. The maximum value of the truncated normal distribution. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See `tf.random.set_seed` for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values.
github-repos
def van_enc_2d(x, first_depth, reuse=False): with tf.variable_scope('van_enc', reuse=reuse): a = 4 b = 4 enc = tf.nn.relu(x) enc = tf.layers.dense(enc, ((first_depth * a) * b), tf.nn.relu) enc = tf.contrib.layers.layer_norm(enc) enc = tf.reshape(enc, [(- 1), a, b, first_depth]) enc = tf.layers.conv2d_transpose(enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.contrib.layers.layer_norm(enc) enc = tf.layers.conv2d_transpose(enc, (first_depth * 2), 3, padding='same', activation=tf.nn.relu, strides=2) van_higher_level_2 = tf.reshape(enc, [(- 1), (((((a * 2) * b) * 2) * first_depth) * 2)]) enc = tf.layers.conv2d_transpose(enc, (first_depth * 2), 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.contrib.layers.layer_norm(enc) enc = tf.layers.conv2d_transpose(enc, (first_depth * 4), 3, padding='same', activation=tf.nn.relu, strides=1) van_higher_level_4 = tf.reshape(enc, [(- 1), (((((a * 2) * b) * 2) * first_depth) * 4)]) van_higher_level = tf.concat([x, van_higher_level_2, van_higher_level_4], 1) return (enc, van_higher_level)
The higher level structure encoder for the VAN. The high level structure is a vector instead of an image. Args: x: The higher level structure to encode. first_depth: The depth of the first layer. Depth is increased in subsequent layers. reuse: To reuse in variable scope or not. Returns: The encoded image.
codesearchnet
def same_dynamic_shape(a, b): a = ops.convert_to_tensor(a, name='a') b = ops.convert_to_tensor(b, name='b') def all_shapes_equal(): return math_ops.reduce_all(math_ops.equal(array_ops.concat([array_ops.shape(a), array_ops.shape(b)], 0), array_ops.concat([array_ops.shape(b), array_ops.shape(a)], 0))) return tf_cond.cond(math_ops.equal(array_ops.rank(a), array_ops.rank(b)), all_shapes_equal, lambda: constant_op.constant(False))
Returns whether a and b have the same dynamic shape. Args: a: `Tensor` b: `Tensor` Returns: `bool` `Tensor` representing if both tensors have the same shape.
github-repos
def __init__(self, path): self.path = Path(path).resolve() if not self.path.is_dir(): log.warning("path given to render engine is not a directory") raise NotADirectoryError("path '%s' is not a directory" % path)
Constructor Args: path (str): Top level directory to search for template files - the path must exist and the path must be a directory. Raises: FileNotFoundError: If the provided path does not exists. NotADirectoryError: If the path is not a directory.
juraj-google-style
def transform_op_tree(root: OP_TREE, op_transformation: Callable[([Operation], OP_TREE)]=(lambda e: e), iter_transformation: Callable[([Iterable[OP_TREE]], OP_TREE)]=(lambda e: e), preserve_moments: bool=False) -> OP_TREE: if isinstance(root, Operation): return op_transformation(root) if (preserve_moments and isinstance(root, Moment)): return root if isinstance(root, collections.Iterable): return iter_transformation((transform_op_tree(subtree, op_transformation, iter_transformation, preserve_moments) for subtree in root)) raise TypeError('Not a collections.Iterable or an Operation: {}'.format(root))
Maps transformation functions onto the nodes of an OP_TREE. Args: root: The operation or tree of operations to transform. op_transformation: How to transform the operations (i.e. leaves). iter_transformation: How to transform the iterables (i.e. internal nodes). preserve_moments: Whether to leave Moments alone. If True, the transformation functions will not be applied to Moments or the operations within them. Returns: A transformed operation tree. Raises: TypeError: root isn't a valid OP_TREE.
codesearchnet
def GetTestConfigs(): test_configs = ['NHWC', 'NCHW'] return test_configs
Get all the valid tests configs to run. Returns: all the valid test configs
github-repos
def _assert_tensorlike_all_close(self, sess: session.Session, tensorlike_value_1: core.TensorLike, tensorlike_value_2: core.TensorLike) -> None: if isinstance(tensorlike_value_1, core.Tensor): tensorlike_value_1 = tensorlike_value_1.eval(session=sess) if isinstance(tensorlike_value_2, core.Tensor): tensorlike_value_2 = tensorlike_value_2.eval(session=sess) self.assertAllClose(tensorlike_value_1, tensorlike_value_2)
Asserts that two different TensorLike values are "all close". Args: sess: Session instance used to evaluate any tf.Tensors. tensorlike_value_1: A TensorLike value. tensorlike_value_2: A TensorLike value.
github-repos
def get_method_returning_field_value(self, field_name): method = getattr(self, field_name, None) return method if method and callable(method) else None
Method should return object method that can be used to get field value. Args: field_name: name of the field Returns: method for obtaining a field value
juraj-google-style
def resource_input_index(tensor_name, input_names, node_defs, functions): while tensor_name not in input_names: parts = tensor_name.split(':') if len(parts) == 3: op_name, _, output_idx = parts elif len(parts) == 2: op_name, output_idx = parts else: assert len(parts) == 1 op_name = parts[0] output_idx = 0 tensor_name = '%s:%d' % (tensor_name, output_idx) if tensor_name in input_names: break output_idx = int(output_idx) node_def = node_defs[op_name] def _extract_input_index(function_attribute_name): func_name = node_def.attr[function_attribute_name].func.name fdef = functions[func_name].cached_definition output_arg_name = fdef.signature.output_arg[output_idx].name output_tensor_name = fdef.ret[output_arg_name] return resource_input_index(output_tensor_name, [arg.name for arg in fdef.signature.input_arg], {ndef.name: ndef for ndef in fdef.node_def}, functions) if node_def.op in ('Identity', 'While'): tensor_name = node_def.input[output_idx] elif node_def.op in ('PartitionedCall', 'StatefulPartitionedCall'): tensor_name = node_def.input[_extract_input_index('f')] elif node_def.op in ('If', 'StatelessIf'): input_index = _extract_input_index('then_branch') if input_index != _extract_input_index('else_branch'): raise AssertionError('Expected cond branches ({} op) to each have the same input->output mapping of resources.'.format(node_def.op)) tensor_name = node_def.input[input_index + 1] else: raise ValueError('Taking gradient of a while loop which creates a resource in its body is not supported: %s (%s)' % (op_name, node_def.op)) return input_names.index(tensor_name)
Returns the index of the input corresponding to `tensor_name`. This method is used to find the corresponding index of an arbitrary resource tensor in a function (the function could be a loop body). We assume that resource handles are never created in functions, so that every resource tensor can be traced back to a function input. The awkward signature of this method is to make it work with both FuncGraphs and FunctionDefs. This is so we can recurse on function call ops without building the corresponding FuncGraph (note that even if a FuncGraph for a FunctionDef already exists, the input/output/node names may have been changed when the FuncGraph was serialized to the FunctionDef, which makes it unusable with this algorithm). Args: tensor_name: the name of the resource tensor to be resolved to an input. input_names: a list of the names of all inputs to the function. node_defs: a dict mapping op name -> NodeDef for every op in the function. functions: a dict mapping function name -> AtomicFunction. Returns: The index into input_names corresponding to `tensor_name`.
github-repos
def register(self, callback_id: str, handler: Any, name: str = "*") -> None: LOG.info("Registering %s, %s to %s", callback_id, name, handler) if name not in self._routes[callback_id]: self._routes[callback_id][name] = [] self._routes[callback_id][name].append(handler)
Register a new handler for a specific :class:`slack.actions.Action` `callback_id`. Optional routing based on the action name too. The name argument is useful for actions of type `interactive_message` to provide a different handler for each individual action. Args: callback_id: Callback_id the handler is interested in handler: Callback name: Name of the action (optional).
juraj-google-style
def get_node_angle(self, node): return (atan2((self.pos[0] - node.pos[0]), (self.pos[1] - node.pos[1])) - (pi / 2))
Get the angle beetween 2 nodes relative to the horizont. Args: node (object): The other node. Returns: rad: The angle
codesearchnet
def msgconvert(email): log.debug('Started converting Outlook email') (temph, temp) = tempfile.mkstemp(prefix='outlook_') command = ['msgconvert', '--outfile', temp, email] try: if six.PY2: with open(os.devnull, 'w') as devnull: out = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=devnull) elif six.PY3: out = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) except OSError: message = "To use this function you must install 'msgconvert' tool" log.exception(message) raise MailParserOSError(message) else: (stdoutdata, _) = out.communicate() return (temp, stdoutdata.decode('utf-8').strip()) finally: os.close(temph)
Exec msgconvert tool, to convert msg Outlook mail in eml mail format Args: email (string): file path of Outlook msg mail Returns: tuple with file path of mail converted and standard output data (unicode Python 2, str Python 3)
codesearchnet
def variables(self): return self._opt.variables()
Fetches a list of optimizer variables in the default graph. This wraps `variables()` from the actual optimizer. It does not include the `SyncReplicasOptimizer`'s local step. Returns: A list of variables.
github-repos
def matches_to_marker_results(df): assert isinstance(df, pd.DataFrame) from collections import defaultdict d = defaultdict(list) for idx, row in df.iterrows(): marker = row['marker'] d[marker].append(row) marker_results = {} for k,v in d.items(): if len(v) > 1: logging.debug('Multiple potential cgMLST allele matches (n=%s) found for marker %s. Selecting match on longest contig.', len(v), k) df_marker = pd.DataFrame(v) df_marker.sort_values('slen', ascending=False, inplace=True) for i,r in df_marker.iterrows(): allele = r['allele_name'] slen = r['slen'] logging.debug('Selecting allele %s from contig with length %s', allele, slen) seq = r['sseq'] if '-' in seq: logging.warning('Gaps found in allele. Removing gaps. %s', r) seq = seq.replace('-', '').upper() allele = allele_name(seq) marker_results[k] = allele_result_dict(allele, seq, r.to_dict()) break elif len(v) == 1: row = v[0] seq = row['sseq'] if '-' in seq: logging.warning('Gaps found in allele. Removing gaps. %s', row) seq = seq.replace('-', '').upper() allele = allele_name(seq) marker_results[k] = allele_result_dict(allele, seq, row.to_dict()) else: err_msg = 'Empty list of matches for marker {}'.format(k) logging.error(err_msg) raise Exception(err_msg) return marker_results
Perfect BLAST matches to marker results dict Parse perfect BLAST matches to marker results dict. Args: df (pandas.DataFrame): DataFrame of perfect BLAST matches Returns: dict: cgMLST330 marker names to matching allele numbers
juraj-google-style
def get_list_index(lst, index_or_name): if isinstance(index_or_name, six.integer_types): return index_or_name return lst.index(index_or_name)
Return the index of an element in the list. Args: lst (list): The list. index_or_name (int or str): The value of the reference element, or directly its numeric index. Returns: (int) The index of the element in the list.
codesearchnet
def read_trailer_lines(self): if (not self.closed): raise ValueError('Cannot read trailers until the request body has been read.') while True: line = self.rfile.readline() if (not line): raise ValueError('Illegal end of headers.') self.bytes_read += len(line) if (self.maxlen and (self.bytes_read > self.maxlen)): raise IOError('Request Entity Too Large') if (line == CRLF): break if (not line.endswith(CRLF)): raise ValueError('HTTP requires CRLF terminators') (yield line)
Read HTTP headers and yield them. Returns: Generator: yields CRLF separated lines.
codesearchnet
def _dispatch_event(self, event, data=None): for callback in self._callbacks[event]: self._logger.debug("Running %s callbacks for event: '%s'", len(self._callbacks[event]), event) try: if (self._stopped and (event not in ['close', 'error'])): break if self.run_async: self._execute_callback_async(callback, data) else: self._execute_callback(callback, data) except Exception as err: name = callback.__name__ module = callback.__module__ msg = f"When calling ' self._logger.error(msg) raise
Dispatches the event and executes any associated callbacks. Note: To prevent the app from crashing due to callback errors. We catch all exceptions and send all data to the logger. Args: event (str): The type of event. e.g. 'bot_added' data (dict): The data Slack sent. e.g. { "type": "bot_added", "bot": { "id": "B024BE7LH", "app_id": "A4H1JB4AZ", "name": "hugbot" } }
codesearchnet
def get_course_completions(self, enterprise_customer, days): return PersistentCourseGrade.objects.filter(passed_timestamp__gt=(datetime.datetime.now() - datetime.timedelta(days=days))).filter(user_id__in=enterprise_customer.enterprise_customer_users.values_list('user_id', flat=True))
Get course completions via PersistentCourseGrade for all the learners of given enterprise customer. Arguments: enterprise_customer (EnterpriseCustomer): Include Course enrollments for learners of this enterprise customer. days (int): Include course enrollment of this number of days. Returns: (list): A list of PersistentCourseGrade objects.
codesearchnet
def _begin(self, retry_id=None): if self.in_progress: msg = _CANT_BEGIN.format(self._id) raise ValueError(msg) transaction_response = self._client._firestore_api.begin_transaction(self._client._database_string, options_=self._options_protobuf(retry_id), metadata=self._client._rpc_metadata) self._id = transaction_response.transaction
Begin the transaction. Args: retry_id (Optional[bytes]): Transaction ID of a transaction to be retried. Raises: ValueError: If the current transaction has already begun.
codesearchnet
def convert_response(allocate_quota_response, project_id): if not allocate_quota_response or not allocate_quota_response.allocateErrors: return _IS_OK theError = allocate_quota_response.allocateErrors[0] error_tuple = _QUOTA_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN) if error_tuple[1].find(u'{') == -1: return error_tuple updated_msg = error_tuple[1].format(project_id=project_id, detail=theError.description or u'') return error_tuple[0], updated_msg
Computes a http status code and message `AllocateQuotaResponse` The return value a tuple (code, message) where code: is the http status code message: is the message to return Args: allocate_quota_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.AllocateQuotaResponse`): the response from calling an api Returns: tuple(code, message)
juraj-google-style
def insert_system(cur, system_name, encoded_data=None): if encoded_data is None: encoded_data = {} if 'system_name' not in encoded_data: encoded_data['system_name'] = system_name insert = "INSERT OR IGNORE INTO system(system_name) VALUES (:system_name);" cur.execute(insert, encoded_data)
Insert a system name into the cache. Args: cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement. system_name (str): The unique name of a system encoded_data (dict, optional): If a dictionary is provided, it will be populated with the serialized data. This is useful for preventing encoding the same information many times.
juraj-google-style
def _preprocess_journal_query_value(third_journal_field, old_publication_info_values): publication_info_keys = [ ElasticSearchVisitor.JOURNAL_TITLE, ElasticSearchVisitor.JOURNAL_VOLUME, third_journal_field, ] values_list = [ value.strip() for value in old_publication_info_values.split(',') if value ] old_publication_info = [ { key: value for key, value in zip(publication_info_keys, values_list) if value } ] new_publication_info = convert_old_publication_info_to_new(old_publication_info)[0] return new_publication_info
Transforms the given journal query value (old publication info) to the new one. Args: third_journal_field (six.text_type): The final field to be used for populating the old publication info. old_publication_info_values (six.text_type): The old publication info. It must be one of {only title, title & volume, title & volume & artid/page_start}. Returns: (dict) The new publication info.
juraj-google-style
def register(self, alias, service_class, configs=None, start_service=True): if not inspect.isclass(service_class): raise Error(self._device, '"%s" is not a class!' % service_class) if not issubclass(service_class, base_service.BaseService): raise Error(self._device, 'Class %s is not a subclass of BaseService!' % service_class) if alias in self._service_objects: raise Error(self._device, 'A service is already registered with alias "%s".' % alias) service_obj = service_class(self._device, configs) service_obj.alias = alias if start_service: service_obj.start() self._service_objects[alias] = service_obj
Registers a service. This will create a service instance, starts the service, and adds the instance to the mananger. Args: alias: string, the alias for this instance. service_class: class, the service class to instantiate. configs: (optional) config object to pass to the service class's constructor. start_service: bool, whether to start the service instance or not. Default is True.
github-repos
def start_tpot(automated_run, session, path): module = functions.import_string_code_as_module(automated_run.source) extraction = session.query(models.Extraction).first() X, y = extraction.return_train_dataset() tpot_learner = module.tpot_learner tpot_learner.fit(X, y) temp_filename = os.path.join(path, 'tpot-temp-export-{}'.format(os.getpid())) tpot_learner.export(temp_filename) with open(temp_filename) as f: base_learner_source = f.read() base_learner_source = constants.tpot_learner_docstring + base_learner_source try: os.remove(temp_filename) except OSError: pass blo = models.BaseLearnerOrigin( source=base_learner_source, name='TPOT Learner', meta_feature_generator='predict' ) session.add(blo) session.commit()
Starts a TPOT automated run that exports directly to base learner setup Args: automated_run (xcessiv.models.AutomatedRun): Automated run object session: Valid SQLAlchemy session path (str, unicode): Path to project folder
juraj-google-style
def _polar(abs_, angle): abs_ = backend.convert_to_tensor(abs_) angle = backend.convert_to_tensor(angle) real = abs_ * backend.numpy.cos(angle) imaginary = abs_ * backend.numpy.sin(angle) result = backend.math._get_complex_tensor_from_tuple((real, imaginary)) return result
Internal implementation of the polar function. Args: abs_: The magnitude (absolute value) of the complex number. angle: The angle (in radians) of the complex number. Returns: A complex number (or array of complex numbers) with the same shape as `abs_` and `angle`.
github-repos
def DeregisterDecrypter(cls, decrypter): encryption_method = decrypter.ENCRYPTION_METHOD.lower() if (encryption_method not in cls._decrypters): raise KeyError('Decrypter for encryption method: {0:s} not set.'.format(decrypter.ENCRYPTION_METHOD)) del cls._decrypters[encryption_method]
Deregisters a decrypter for a specific encryption method. Args: decrypter (type): decrypter class. Raises: KeyError: if the corresponding decrypter is not set.
codesearchnet
def prefix(self, imod: YangIdentifier, mid: ModuleId) -> YangIdentifier: try: did = (imod, self.implement[imod]) except KeyError: raise ModuleNotImplemented(imod) from None try: pmap = self.modules[mid].prefix_map except KeyError: raise ModuleNotRegistered(*mid) from None for p in pmap: if pmap[p] == did: return p raise ModuleNotImported(imod, mid)
Return the prefix corresponding to an implemented module. Args: imod: Name of an implemented module. mid: Identifier of the context module. Raises: ModuleNotImplemented: If `imod` is not implemented. ModuleNotRegistered: If `mid` is not registered in YANG library. ModuleNotImported: If `imod` is not imported in `mid`.
juraj-google-style
def read(self, vals): i = 0 count = int(vals[i]) i += 1 for _ in range(count): obj = DesignCondition() obj.read(vals[i:i + obj.field_count]) self.add_design_condition(obj) i += obj.field_count
Read values. Args: vals (list): list of strings representing values
juraj-google-style
def residual_block_layer(inputs, hparams): kernel = (hparams.res_kernel_size, hparams.res_kernel_size) x = inputs for i in range(hparams.num_res_layers): with tf.variable_scope("res_conv_%d" % i): y = common_layers.conv_block( common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"), hparams.hidden_size, [((1, 1), kernel)], strides=(1, 1), padding="SAME", name="residual_conv") y = common_layers.conv_block( y, hparams.hidden_size, [((1, 1), (1, 1))], strides=(1, 1), padding="SAME", name="residual_dense") x = common_layers.layer_postprocess(x, y, hparams) return x
Residual block over inputs. Runs a residual block consisting of conv: kernel_size x kernel_size conv: 1x1 dropout, add and normalize according to hparams.layer_postprocess_sequence. Args: inputs: Tensor of shape [batch, height, width, hparams.hidden_size]. hparams: HParams. Returns: Tensor of shape [batch, height, width, hparams.hidden_size].
juraj-google-style
def push_image(registry, image): values = {'registry': registry, 'image': image['name']} log.info('Pushing <33>{registry}<35>/{image}'.format(**values)) shell.run('docker push {registry}/{image}'.format(**values))
Push the given image to selected repository. Args: registry (str): The name of the registry we're pushing to. This is the address of the repository without the protocol specification (no http(s)://) image (dict[str, Any]): The dict containing the information about the image. This is the same dictionary as defined in DOCKER_IMAGES variable.
codesearchnet
def compute_video_metrics_from_predictions(predictions, decode_hparams): all_results = {} ssim_all_decodes, psnr_all_decodes = [], [] for single_decode in predictions: args = get_zipped_dataset_from_predictions(single_decode) psnr_single, ssim_single = compute_one_decoding_video_metrics(*args) psnr_all_decodes.append(psnr_single) ssim_all_decodes.append(ssim_single) psnr_all_decodes = np.array(psnr_all_decodes) ssim_all_decodes = np.array(ssim_all_decodes) all_results.update({"PSNR": psnr_all_decodes, "SSIM": ssim_all_decodes}) return compute_all_metrics_statistics(all_results)
Computes metrics from predictions. Args: predictions: list of list of dicts. outer length: num_decodes, inner_length: num_samples decode_hparams: Decode hparams. instance of HParams. Returns: statistics: dict of Tensors, key being the metric with each Tensor having the shape (num_samples, num_frames).
juraj-google-style
def _name_to_tensor(self, tensor_name): (id1, id2) = self._tensor_name_to_ids[tensor_name] return self._operations[id1].outputs[id2]
The tensor with the given name. Args: tensor_name: a string, name of a tensor in the graph. Returns: a tf.Tensor or mtf.Tensor
codesearchnet
def load(filename): fileObj = open(filename, 'rb') variable = pickle.load(fileObj) fileObj.close() return variable
Load variable from Pickle file Args: path (str): path of the file to load Returns: variable read from path
juraj-google-style
def market_info(ticker: str) -> dict: t_info = ticker.split() assets = param.load_info('assets') if ((t_info[(- 1)] == 'Equity') and ('=' not in t_info[0])): exch = t_info[(- 2)] for info in assets.get('Equity', [dict()]): if ('exch_codes' not in info): continue if (exch in info['exch_codes']): return info return dict() if (t_info[(- 1)] == 'Curncy'): for info in assets.get('Curncy', [dict()]): if ('tickers' not in info): continue if ((t_info[0].split('+')[0] in info['tickers']) or (t_info[0][(- 1)].isdigit() and (t_info[0][:(- 1)] in info['tickers']))): return info return dict() if (t_info[(- 1)] == 'Comdty'): for info in assets.get('Comdty', [dict()]): if ('tickers' not in info): continue if (t_info[0][:(- 1)] in info['tickers']): return info return dict() if ((t_info[(- 1)] == 'Index') or ((t_info[(- 1)] == 'Equity') and ('=' in t_info[0]))): if (t_info[(- 1)] == 'Equity'): tck = t_info[0].split('=')[0] else: tck = ' '.join(t_info[:(- 1)]) for info in assets.get('Index', [dict()]): if ('tickers' not in info): continue if ((tck[:2] == 'UX') and ('UX' in info['tickers'])): return info if (tck in info['tickers']): if (t_info[(- 1)] == 'Equity'): return info if (not info.get('is_fut', False)): return info if (tck[:(- 1)].rstrip() in info['tickers']): if info.get('is_fut', False): return info return dict() if (t_info[(- 1)] == 'Corp'): for info in assets.get('Corp', [dict()]): if ('ticker' not in info): continue return dict()
Get info for given market Args: ticker: Bloomberg full ticker Returns: dict Examples: >>> info = market_info('SHCOMP Index') >>> info['exch'] 'EquityChina' >>> info = market_info('ICICIC=1 IS Equity') >>> info['freq'], info['is_fut'] ('M', True) >>> info = market_info('INT1 Curncy') >>> info['freq'], info['is_fut'] ('M', True) >>> info = market_info('CL1 Comdty') >>> info['freq'], info['is_fut'] ('M', True) >>> # Wrong tickers >>> market_info('C XX Equity') {} >>> market_info('XXX Comdty') {} >>> market_info('Bond_ISIN Corp') {} >>> market_info('XYZ Index') {} >>> market_info('XYZ Curncy') {}
codesearchnet
def build_tfexample_transfored_training_input_fn(schema, features, analysis_output_dir, raw_data_file_pattern, training_batch_size, num_epochs=None, randomize_input=False, min_after_dequeue=1, reader_num_threads=1, allow_smaller_final_batch=True): def transformed_training_input_fn(): 'Training input function that reads transformed data.' if isinstance(raw_data_file_pattern, six.string_types): filepath_list = [raw_data_file_pattern] else: filepath_list = raw_data_file_pattern files = [] for path in filepath_list: files.extend(file_io.get_matching_files(path)) filename_queue = tf.train.string_input_producer(files, num_epochs=num_epochs, shuffle=randomize_input) options = tf.python_io.TFRecordOptions(compression_type=tf.python_io.TFRecordCompressionType.GZIP) (ex_id, ex_str) = tf.TFRecordReader(options=options).read_up_to(filename_queue, training_batch_size) queue_capacity = (((reader_num_threads + 3) * training_batch_size) + min_after_dequeue) if randomize_input: (_, batch_ex_str) = tf.train.shuffle_batch(tensors=[ex_id, ex_str], batch_size=training_batch_size, capacity=queue_capacity, min_after_dequeue=min_after_dequeue, enqueue_many=True, num_threads=reader_num_threads, allow_smaller_final_batch=allow_smaller_final_batch) else: (_, batch_ex_str) = tf.train.batch(tensors=[ex_id, ex_str], batch_size=training_batch_size, capacity=queue_capacity, enqueue_many=True, num_threads=reader_num_threads, allow_smaller_final_batch=allow_smaller_final_batch) feature_spec = {} feature_info = get_transformed_feature_info(features, schema) for (name, info) in six.iteritems(feature_info): if (info['size'] is None): feature_spec[name] = tf.VarLenFeature(dtype=info['dtype']) else: feature_spec[name] = tf.FixedLenFeature(shape=[info['size']], dtype=info['dtype']) parsed_tensors = tf.parse_example(batch_ex_str, feature_spec) transformed_features = {} for (k, v) in six.iteritems(parsed_tensors): if (isinstance(v, tf.Tensor) and (v.get_shape().ndims == 1)): transformed_features[k] = tf.expand_dims(v, (- 1)) else: transformed_features[k] = v transformed_features = image_feature_engineering(features=features, feature_tensors_dict=transformed_features) target_name = get_target_name(features) if ((not target_name) or (target_name not in transformed_features)): raise ValueError('Cannot find target transform in features') transformed_target = transformed_features.pop(target_name) return (transformed_features, transformed_target) return transformed_training_input_fn
Creates training input_fn that reads transformed tf.example files. Args: schema: schema list features: features dict analysis_output_dir: output folder from analysis raw_data_file_pattern: file path, or list of files training_batch_size: An int specifying the batch size to use. num_epochs: numer of epochs to read from the files. Use None to read forever. randomize_input: If true, the input rows are read out of order. This randomness is limited by the min_after_dequeue value. min_after_dequeue: Minimum number elements in the reading queue after a dequeue, used to ensure a level of mixing of elements. Only used if randomize_input is True. reader_num_threads: The number of threads enqueuing data. allow_smaller_final_batch: If false, fractional batches at the end of training or evaluation are not used. Returns: An input_fn suitable for training that reads transformed data in tf record files of tf.example.
codesearchnet
def register_repeating_metric(self, metric_name, frequency, getter): l = task.LoopingCall(self._publish_repeating_metric, metric_name, getter) repeating_metric_handle = RepeatingMetricHandle(l, frequency) self._repeating_metric_handles.append(repeating_metric_handle) if self.running: repeating_metric_handle.start() return repeating_metric_handle
Record hits to a metric at a specified interval. Args: metric_name: The name of the metric to record with Carbon. frequency: The frequency with which to poll the getter and record the value with Carbon. getter: A function which takes no arguments and returns the value to record with Carbon. Returns: RepeatingMetricHandle instance. Call .stop() on it to stop recording the metric.
codesearchnet
def _is_molecule_linear(self, mol): if (mol.NumAtoms() < 3): return True a1 = mol.GetAtom(1) a2 = mol.GetAtom(2) for i in range(3, (mol.NumAtoms() + 1)): angle = float(mol.GetAtom(i).GetAngle(a2, a1)) if (angle < 0.0): angle = (- angle) if (angle > 90.0): angle = (180.0 - angle) if (angle > self._angle_tolerance): return False return True
Is the molecule a linear one Args: mol: The molecule. OpenBabel OBMol object. Returns: Boolean value.
codesearchnet
def seek(self, offset=None, whence=0, position=None): self._preread_check() if offset is None and position is None: raise TypeError('seek(): offset argument required') if offset is not None and position is not None: raise TypeError('seek(): offset and position may not be set simultaneously.') if position is not None: offset = position if whence == 0: pass elif whence == 1: offset += self.tell() elif whence == 2: offset += self.size() else: raise errors.InvalidArgumentError(None, None, 'Invalid whence argument: {}. Valid values are 0, 1, or 2.'.format(whence)) self._read_buf.seek(offset)
Seeks to the offset in the file. Args: offset: The byte count relative to the whence argument. whence: Valid values for whence are: 0: start of the file (default) 1: relative to the current position of the file 2: relative to the end of file. `offset` is usually negative.
github-repos
def Unzip(iterable): lefts = [] rights = [] for left, right in iterable: lefts.append(left) rights.append(right) return lefts, rights
Unzips specified iterable of pairs to pair of two iterables. This function is an inversion of the standard `zip` function and the following hold: * ∀ l, r. l, r == unzip(zip(l, r)) * ∀ p. p == zip(unzip(p)) Examples: >>> Unzip([("foo", 1), ("bar", 2), ("baz", 3)]) (["foo", "bar", "baz"], [1, 2, 3]) Args: iterable: An iterable of pairs to unzip. Returns: A pair of iterables after unzipping.
juraj-google-style
def create_bmi_config_file(self, filename: str='bmi_config.txt') -> None: s0 = self.construct_default_initial_state() s0.to_csv(filename, index_label='variable')
Create a BMI config file to initialize the model. Args: filename: The filename with which the config file should be saved.
codesearchnet
def set_notify_dispatch_request(self, notify_dispatch_request, *args): self._notify_dispatch_request = notify_dispatch_request self._notify_args = args
Set function to call just before requests are dispatched Args: notify_dispatch_request (callable): function will be called with request as single arg just before request is dispatched
codesearchnet
def get_queue_name(queue_name): if queue_name: return queue_name queue_name = os.environ.get('HTTP_X_APPENGINE_QUEUENAME', parameters.config.QUEUE_NAME) if ((len(queue_name) > 1) and (queue_name[0:2] == '__')): return parameters.config.QUEUE_NAME else: return queue_name
Determine which queue MR should run on. How to choose the queue: 1. If user provided one, use that. 2. If we are starting a mr from taskqueue, inherit that queue. If it's a special queue, fall back to the default queue. 3. Default queue. If user is using any MR pipeline interface, pipeline.start takes a "queue_name" argument. The pipeline will run on that queue and MR will simply inherit the queue_name. Args: queue_name: queue_name from user. Maybe None. Returns: The queue name to run on.
codesearchnet
def generate_host_passthrough(self, vcpu_num): cpu = ET.Element('cpu', mode='host-passthrough') cpu.append(self.generate_topology(vcpu_num)) if (vcpu_num > 1): cpu.append(self.generate_numa(vcpu_num)) return cpu
Generate host-passthrough XML cpu node Args: vcpu_num(str): number of virtual CPUs Returns: lxml.etree.Element: CPU XML node
codesearchnet
def compute(self, x_arr, y_arr): x_arr = x_arr / np.linalg.norm(x_arr, ord=1) y_arr = y_arr / np.linalg.norm(y_arr, ord=1) mixture_arr = 0.5 * (x_arr + y_arr) return 0.5 * (super().compute(x_arr, mixture_arr) + super().compute(y_arr, mixture_arr))
Compute distance. Args: x_arr: `np.ndarray` of vectors. y_arr: `np.ndarray` of vectors. Retruns: `np.ndarray` of distances.
juraj-google-style
def download_tile(map_layer, zoom, x, y): try: tile_url = map_layer.get_tile_url(zoom, x, y) tmp_file, headers = urllib.request.urlretrieve(tile_url) return (x, y), tmp_file except URLError as e: app.logger.info("Error downloading tile x={}, y={}, z={} for layer {}: {}".format( x, y, zoom, map_layer, e.reason)) return (x, y), pkg_resources.resource_filename("geos", "static/empty_tile.png")
Download a given tile from the tile server. Args: map_layer (MapLayer): MapLayer object which provides the tile-url. zoom (int): zoom level x (int): Tile-x-coordinate y (int): Tile-y-coordinate Returns: file: temporary file containing the downloaded image.
juraj-google-style
def check_coordinates(chromosome, pos, coordinates): chrom_match = CHR_PATTERN.match(chromosome) chrom = chrom_match.group(2) if (chrom != coordinates['chrom']): return False if ((pos >= coordinates['start']) and (pos <= coordinates['end'])): return True return False
Check if the variant is in the interval given by the coordinates Args: chromosome(str): Variant chromosome pos(int): Variant position coordinates(dict): Dictionary with the region of interest
codesearchnet
def split_if_relative_reference(reference: message.Message) -> None: _validate_reference(reference) uri_field = reference.DESCRIPTOR.fields_by_name.get('uri') if not proto_utils.field_is_set(reference, uri_field): return uri = proto_utils.get_value_at_field(reference, uri_field) internal_match = re.fullmatch(_INTERNAL_REFERENCE_PATTERN, uri.value) if internal_match is not None: reference_id_field = get_reference_id_field_for_resource(reference, internal_match.group('resource_type')) reference_id = proto_utils.create_message_from_descriptor(reference_id_field.message_type) populate_typed_reference_id(reference_id, internal_match.group('resource_id'), internal_match.group('version')) proto_utils.copy_common_field(uri, reference_id, 'id') proto_utils.copy_common_field(uri, reference_id, 'extension') proto_utils.set_value_at_field(reference, reference_id_field, reference_id) return fragment_match = re.fullmatch(_FRAGMENT_REFERENCE_PATTERN, uri.value) if fragment_match is not None: fragment_field = reference.DESCRIPTOR.fields_by_name['fragment'] fragment = proto_utils.create_message_from_descriptor(fragment_field.message_type) value_field = fragment.DESCRIPTOR.fields_by_name['value'] proto_utils.set_value_at_field(fragment, value_field, uri.value[1:]) proto_utils.copy_common_field(uri, fragment, 'id') proto_utils.copy_common_field(uri, fragment, 'extension') proto_utils.set_value_at_field(reference, fragment_field, fragment) return
If possible, parses a `Reference` `uri` into more structured fields. This is only possible for two forms of reference uris: * Relative references of the form $TYPE/$ID, e.g., "Patient/1234" In this case, this will be parsed to a proto of the form: {patient_id: {value: "1234"}} * Fragments of the form "#$FRAGMENT", e.g., "#vs1". In this case, this would be parsed into a proto of the form: {fragment: {value: "vs1"} } If the reference URI matches one of these schemas, the `uri` field will be cleared, and the appropriate structured fields set. Otherwise, the reference will be unchanged. Args: reference: The FHIR reference to potentially split. Raises: ValueError: If the message is not a valid FHIR Reference proto.
github-repos
def make_hex_texture(grid_size = 2, resolution=1): grid_x, grid_y = np.meshgrid( np.arange(grid_size), np.arange(grid_size) ) ROOT_3_OVER_2 = np.sqrt(3) / 2 ONE_HALF = 0.5 grid_x = (grid_x * np.sqrt(3) + (grid_y % 2) * ROOT_3_OVER_2).flatten() grid_y = grid_y.flatten() * 1.5 grid_points = grid_x.shape[0] x_offsets = np.interp(np.arange(4 * resolution), np.arange(4) * resolution, [ ROOT_3_OVER_2, 0., -ROOT_3_OVER_2, -ROOT_3_OVER_2, ]) y_offsets = np.interp(np.arange(4 * resolution), np.arange(4) * resolution, [ -ONE_HALF, -1., -ONE_HALF, ONE_HALF ]) tmx = 4 * resolution x_t = np.tile(grid_x, (tmx, 1)) + x_offsets.reshape((tmx, 1)) y_t = np.tile(grid_y, (tmx, 1)) + y_offsets.reshape((tmx, 1)) x_t = np.vstack([x_t, np.tile(np.nan, (1, grid_x.size))]) y_t = np.vstack([y_t, np.tile(np.nan, (1, grid_y.size))]) return fit_texture((x_t.flatten('F'), y_t.flatten('F')))
Makes a texture consisting on a grid of hexagons. Args: grid_size (int): the number of hexagons along each dimension of the grid resolution (int): the number of midpoints along the line of each hexagon Returns: A texture.
juraj-google-style
def opt(parser: Union[Parser, Sequence[Input]]) -> OptionalParser: if isinstance(parser, str): parser = lit(parser) return OptionalParser(parser)
Optionally match a parser. An ``OptionalParser`` attempts to match ``parser``. If it succeeds, it returns a list of length one with the value returned by the parser as the only element. If it fails, it returns an empty list. Args: parser: Parser or literal
juraj-google-style
def add_license(self, contents): buf_size = len(contents) buf = (ctypes.c_char * (buf_size + 1))(*contents.encode()) res = self._dll.JLINK_EMU_AddLicense(buf) if (res == (- 1)): raise errors.JLinkException('Unspecified error.') elif (res == (- 2)): raise errors.JLinkException('Failed to read/write license area.') elif (res == (- 3)): raise errors.JLinkException('J-Link out of space.') return (res == 0)
Adds the given ``contents`` as a new custom license to the J-Link. Args: self (JLink): the ``JLink`` instance contents: the string contents of the new custom license Returns: ``True`` if license was added, ``False`` if license already existed. Raises: JLinkException: if the write fails. Note: J-Link V9 and J-Link ULTRA/PRO V4 have 336 Bytes of memory for licenses, while older versions of 80 bytes.
codesearchnet
def connect_raise_node(self, node, except_guards): for guard in except_guards: if guard in self.raises: self.raises[guard].append(node) else: self.raises[guard] = [node]
Adds extra connection between a raise node and containing except guards. The node is a graph node, not an ast node. Args: node: Node except_guards: Tuple[ast.AST, ...], the except sections that guard node
github-repos
def update(self, b): hv = self.hashfunc(b) reg_index = (hv & (self.m - 1)) bits = (hv >> self.p) self.reg[reg_index] = max(self.reg[reg_index], self._get_rank(bits))
Update the HyperLogLog with a new data value in bytes. The value will be hashed using the hash function specified by the `hashfunc` argument in the constructor. Args: b: The value to be hashed using the hash function specified. Example: To update with a new string value (using the default SHA1 hash function, which requires bytes as input): .. code-block:: python hll = HyperLogLog() hll.update("new value".encode('utf-8')) We can also use a different hash function, for example, `pyfarmhash`: .. code-block:: python import farmhash def _hash_32(b): return farmhash.hash32(b) hll = HyperLogLog(hashfunc=_hash_32) hll.update("new value")
codesearchnet
def _check_module_is_text_embedding(module_spec): issues = [] input_info_dict = module_spec.get_input_info_dict() if len(input_info_dict) != 1: issues.append("Module default signature must require only one input") else: input_info, = input_info_dict.values() input_shape = input_info.get_shape() if not (input_info.dtype == tf.string and input_shape.ndims == 1 and input_shape.as_list() == [None]): issues.append( "Module default signature must have only one input " "tf.Tensor(shape=(?,), dtype=string)" ) output_info_dict = module_spec.get_output_info_dict() if "default" not in output_info_dict: issues.append("Module default signature must have a 'default' output.") else: output_info = output_info_dict["default"] output_shape = output_info.get_shape() if not (output_info.dtype == tf.float32 and output_shape.ndims == 2 and not output_shape.as_list()[0] and output_shape.as_list()[1]): issues.append( "Module default signature must have a 'default' output of " "tf.Tensor(shape=(?,K), dtype=float32)." ) if issues: raise ValueError("Module is not a text-embedding: %r" % issues)
Raises ValueError if `module_spec` is not a text-embedding module. Args: module_spec: A `ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with Tensor(string, shape=(?,)) -> Tensor(float32, shape=(?,K)).
juraj-google-style
def get_block(self, parent, config='running_config'): try: parent = ('^%s$' % parent) return self.node.section(parent, config=config) except TypeError: return None
Scans the config and returns a block of code Args: parent (str): The parent string to search the config for and return the block config (str): A text config string to be searched. Default is to search the running-config of the Node. Returns: A string object that represents the block from the config. If the parent string is not found, then this method will return None.
codesearchnet
def GetMap(self, cache_info): return self.GetParser().GetMap(cache_info, self.CreateMap())
Creates a Map from the cache_info data. Args: cache_info: file-like object containing the data to parse Returns: A child of Map containing the cache data.
github-repos
def remove_dimensions(self, dimension_names): with self._lock: for dimension in dimension_names: if dimension in self._extra_dimensions: del self._extra_dimensions[dimension]
Removes extra dimensions added by the add_dimensions() function. Ignores dimension names that don't exist. Args: dimension_names (list): List of dimension names to remove.
juraj-google-style
def if_sqlserver_disable_constraints_triggers(session: SqlASession, tablename: str) -> None: with if_sqlserver_disable_constraints(session, tablename): with if_sqlserver_disable_triggers(session, tablename): yield
If we're running under SQL Server, disable triggers AND constraints for the specified table while the resource is held. Args: session: SQLAlchemy :class:`Session` tablename: table name
juraj-google-style
def record(ekey, entry, diff=False): taskdb = active_db() taskdb.record(ekey, entry, diff) taskdb.save()
Records the specified entry to the key-value store under the specified entity key. Args: ekey (str): fqdn/uuid of the method/object to store the entry for. entry (dict): attributes and values gleaned from the execution. diff (bool): when True, the "c" element of `entry` will be diffed against previous entries under the same `ekey` if their method (attribute "m") matches.
juraj-google-style
def funds(self, term, field=None, **kwargs): params = kwargs params['q'] = term if field: params['f'] = field else: params['f'] = 'fu.org.n' baseuri = self._BASE_URI + 'funds' res = self.session.get(baseuri, params=params) self.handle_http_error(res) return res
Search for funds matching a search term. Args: term (str): Fund id to search on field (str): The field to search on. Options are title, amount, org_name and type. kwargs (dict): additional keywords passed into requests.session.get params keyword.
juraj-google-style
def __init__(self, Outer, Inner, *l): super().__init__() self.value = [Outer()] self.l = self.value[0].value self.Outer = Outer self.Inner = Inner self.add(l)
init Args: Outer (class): One of the possible outer classes. Inner (class): One of the possible inner classes. *l: To be processed and set to value
juraj-google-style
def set_number_of_shards(self, number_of_shards): for policy in self._sharding_policies: policy.set_number_of_shards(number_of_shards) policy.set_number_of_partitions(self._number_of_partitions) self._validate()
Sets the number of shards to use for the InfeedQueue. Args: number_of_shards: number of ways to shard the InfeedQueue. Raises: ValueError: if number_of_shards is not > 0; or the policies have been frozen and number_of_shards was already set to something else.
github-repos
def has_shell_command(self, command): try: output = self.shell(['command', '-v', command]).decode('utf-8').strip() return (command in output) except AdbError: return False
Checks to see if a given check command exists on the device. Args: command: A string that is the name of the command to check. Returns: A boolean that is True if the command exists and False otherwise.
codesearchnet
def execute(self, correlation_id, args): return self._intercepter.execute(_next, correlation_id, args)
Executes the command given specific arguments as an input. Args: correlation_id: a unique correlation/transaction id args: command arguments Returns: an execution result. Raises: MicroserviceError: when execution fails for whatever reason.
juraj-google-style
def ParseFileObject(self, parser_mediator, file_object): data = file_object.read(self._HEADER_READ_SIZE) if (not data.startswith(b'<?xml')): raise errors.UnableToParseFile('Not an Opera typed history file [not a XML]') (_, _, data) = data.partition(b'\n') if (not data.startswith(b'<typed_history')): raise errors.UnableToParseFile('Not an Opera typed history file [wrong XML root key]') file_object.seek(0, os.SEEK_SET) xml = ElementTree.parse(file_object) for history_item in xml.iterfind('typed_history_item'): event_data = OperaTypedHistoryEventData() event_data.entry_type = history_item.get('type', None) event_data.url = history_item.get('content', None) if (event_data.entry_type == 'selected'): event_data.entry_selection = 'Filled from autocomplete.' elif (event_data.entry_type == 'text'): event_data.entry_selection = 'Manually typed.' last_typed_time = history_item.get('last_typed', None) if (last_typed_time is None): parser_mediator.ProduceExtractionWarning('missing last typed time.') continue date_time = dfdatetime_time_elements.TimeElements() try: date_time.CopyFromStringISO8601(last_typed_time) except ValueError as exception: parser_mediator.ProduceExtractionWarning('unsupported last typed time: {0:s} with error: {1!s}.'.format(last_typed_time, exception)) continue event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_LAST_VISITED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses an Opera typed history file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
codesearchnet
def add(self, datum, location_ids): node_name = datum.node_exec_stats.node_name if node_name in self._node_name_to_sample: sample = self._node_name_to_sample[node_name] sample.location_id.extend(location_ids) else: sample = profile_pb2.Sample() sample.value.extend([0, 0, 0]) label = sample.label.add() label.key = self._string_table.index_of('node_name') label.str = self._string_table.index_of(node_name) label = sample.label.add() label.key = self._string_table.index_of('op_type') label.str = self._string_table.index_of(datum.op_type) self._node_name_to_sample[node_name] = sample sample.value[0] += 1 sample.value[1] += datum.node_exec_stats.all_end_rel_micros sample.value[2] += datum.node_exec_stats.op_end_rel_micros - datum.node_exec_stats.op_start_rel_micros
Adds a sample data point. Args: datum: `ProfileDatum` to add a sample for. location_ids: List of numberic location ids for this sample.
github-repos
def header_string_from_file(filename='feff.inp'): with zopen(filename, "r") as fobject: f = fobject.readlines() feff_header_str = [] ln = 0 try: feffpmg = f[0].find("pymatgen") except IndexError: feffpmg = False if feffpmg: nsites = int(f[8].split()[2]) for line in f: ln += 1 if ln <= nsites + 9: feff_header_str.append(line) else: end = 0 for line in f: if (line[0] == "*" or line[0] == "T") and end == 0: feff_header_str.append(line.replace("\r", "")) else: end = 1 return ''.join(feff_header_str)
Reads Header string from either a HEADER file or feff.inp file Will also read a header from a non-pymatgen generated feff.inp file Args: filename: File name containing the Header data. Returns: Reads header string.
juraj-google-style
def _craft_s3_keys(self): now = time.gmtime() stub = 'templates/{stack_name}/{version}'.format(stack_name=self._config.get('environment', {}).get('stack_name', None), version=self._config.get('codeVersion')) stub = ((stub + '/') + str(now.tm_year)) stub = ((stub + '/') + str(('%02d' % now.tm_mon))) stub = ((stub + '/') + str(('%02d' % now.tm_mday))) stub = ((stub + '/') + str(('%02d' % now.tm_hour))) stub = ((stub + ':') + str(('%02d' % now.tm_min))) stub = ((stub + ':') + str(('%02d' % now.tm_sec))) if self._yaml: template_key = (stub + '/stack.yaml') else: template_key = (stub + '/stack.json') property_key = (stub + '/stack.properties') return (template_key, property_key)
We are putting stuff into S3, were supplied the bucket. Here we craft the key of the elements we are putting up there in the internet clouds. Args: None Returns: a tuple of teplate file key and property file key
codesearchnet
def __init__(self, cache_file_name=None, update_cache=True, req_timeout=90.0): self._requests = MultiRequest(max_requests=2, req_timeout=req_timeout) self._cache = ApiCache(cache_file_name, update_cache) if cache_file_name else None
Establishes basic HTTP params and loads a cache. Args: cache_file_name: String file name of cache. update_cache: Determines whether cache should be written out back to the disk when closing it. Default is `True`. req_timeout: Maximum number of seconds to wait without reading a response byte before deciding an error has occurred. Default is 90.0 seconds.
juraj-google-style
def Process(self, parser_mediator, plist_name, top_level, **kwargs): logger.debug('Plist {0:s} plugin used for: {1:s}'.format(self.NAME, plist_name)) self.GetEntries(parser_mediator, top_level=top_level, **kwargs)
Overwrite the default Process function so it always triggers. Process() checks if the current plist being processed is a match for a plugin by comparing the PATH and KEY requirements defined by a plugin. If both match processing continues; else raise WrongPlistPlugin. The purpose of the default plugin is to always trigger on any given plist file, thus it needs to overwrite the default behavior of comparing PATH and KEY. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. plist_name (str): name of the plist. top_level (dict[str, object]): plist top-level key.
codesearchnet
def get_command_from_result(script, result, debug=False): if not debug: command = "python waf --run \"" + script + " " + " ".join( ['--%s=%s' % (param, value) for param, value in result['params'].items()]) + "\"" else: command = "python waf --run " + script + " --command-template=\"" +\ "gdb --args %s " + " ".join(['--%s=%s' % (param, value) for param, value in result['params'].items()]) + "\"" return command
Return the command that is needed to obtain a certain result. Args: params (dict): Dictionary containing parameter: value pairs. debug (bool): Whether the command should include the debugging template.
juraj-google-style
def begin_statement(self, stmt): self.active_stmts.add(stmt)
Marks the beginning of a statement. Args: stmt: Hashable, a key by which the statement can be identified in the CFG's stmt_prev and stmt_next attributes
github-repos
def _build(self, inputs, memory, treat_input_as_matrix=False): if treat_input_as_matrix: inputs = basic.BatchFlatten(preserve_dims=2)(inputs) inputs_reshape = basic.BatchApply(basic.Linear(self._mem_size), n_dims=2)(inputs) else: inputs = basic.BatchFlatten()(inputs) inputs = basic.Linear(self._mem_size)(inputs) inputs_reshape = tf.expand_dims(inputs, 1) memory_plus_input = tf.concat([memory, inputs_reshape], axis=1) next_memory = self._attend_over_memory(memory_plus_input) n = inputs_reshape.get_shape().as_list()[1] next_memory = next_memory[(:, :(- n), :)] if ((self._gate_style == 'unit') or (self._gate_style == 'memory')): (self._input_gate, self._forget_gate) = self._create_gates(inputs_reshape, memory) next_memory = (self._input_gate * tf.tanh(next_memory)) next_memory += (self._forget_gate * memory) output = basic.BatchFlatten()(next_memory) return (output, next_memory)
Adds relational memory to the TensorFlow graph. Args: inputs: Tensor input. memory: Memory output from the previous time step. treat_input_as_matrix: Optional, whether to treat `input` as a sequence of matrices. Defaulta to False, in which case the input is flattened into a vector. Returns: output: This time step's output. next_memory: The next version of memory to use.
codesearchnet
def _FindLargestIdPostfixNumber(self, schedule): postfix_number_re = re.compile('(\d+)$') def ExtractPostfixNumber(entity_id): if entity_id is None: return 0 match = postfix_number_re.search(entity_id) if match is not None: return int(match.group(1)) else: return 0 id_data_sets = {'agency_id': schedule.GetAgencyList(), 'stop_id': schedule.GetStopList(), 'route_id': schedule.GetRouteList(), 'trip_id': schedule.GetTripList(), 'service_id': schedule.GetServicePeriodList(), 'fare_id': schedule.GetFareAttributeList(), 'shape_id': schedule.GetShapeList()} max_postfix_number = 0 for id_name, entity_list in id_data_sets.items(): for entity in entity_list: entity_id = getattr(entity, id_name) postfix_number = ExtractPostfixNumber(entity_id) max_postfix_number = max(max_postfix_number, postfix_number) return max_postfix_number
Finds the largest integer used as the ending of an id in the schedule. Args: schedule: The schedule to check. Returns: The maximum integer used as an ending for an id.
juraj-google-style
def statement(self) -> Statement: (pref, kw) = self.keyword() pres = self.opt_separator() next = self.peek() if (next == ';'): arg = None sub = False elif (next == '{'): arg = None sub = True elif (not pres): raise UnexpectedInput(self, 'separator') else: self._arg = '' sub = self.argument() arg = self._arg self.offset += 1 res = Statement(kw, arg, pref=pref) if sub: res.substatements = self.substatements() for sub in res.substatements: sub.superstmt = res return res
Parse YANG statement. Raises: EndOfInput: If past the end of input. UnexpectedInput: If no syntactically correct statement is found.
codesearchnet
def insert(self, point, data=None): assert (len(point) == self.k) if (self.size == 0): if (self.region is None): self.region = ([[(- math.inf), math.inf]] * self.k) axis = 0 return self.new_node(point, self.region, axis, data) current_id = 0 while True: parent_node = self.node_list[current_id] axis = parent_node.axis if (point[axis] < parent_node.point[axis]): (next_id, left) = (parent_node.left, True) else: (next_id, left) = (parent_node.right, False) if (next_id is None): break current_id = next_id region = parent_node.region[:] region[axis] = parent_node.region[axis][:] limit = parent_node.point[axis] if left: self.node_list[current_id] = parent_node._replace(left=self.size) region[axis][1] = limit else: self.node_list[current_id] = parent_node._replace(right=self.size) region[axis][0] = limit return self.new_node(point, region, ((axis + 1) % self.k), data)
Insert a new node in the tree. Args: point (:obj:`tuple` of float or int): Stores the position of the node. data (:obj, optional): The information stored by the node. Returns: int: The identifier of the new node. Example: >>> tree = Tree(4, 800) >>> point = (3, 7) >>> data = {'name': Fresnel, 'label': blue, 'speed': 98.2} >>> node_id = tree.insert(point, data)
codesearchnet
def to_json(self): mapper_spec = self.mapper.to_json() return {'name': self.name, 'mapreduce_id': self.mapreduce_id, 'mapper_spec': mapper_spec, 'params': self.params, 'hooks_class_name': self.hooks_class_name}
Serializes all data in this mapreduce spec into json form. Returns: data in json format.
codesearchnet
def cast(self, value): if (self.type is None): return value if (self.type in (str, int, float)): try: return self.type(value) except Exception as e: raise errors.BisonError('Failed to cast {} to {}'.format(value, self.type)) from e elif (self.type == bool): return (value.lower() == 'true') else: raise errors.BisonError('Unsupported type for casting: {}'.format(self.type))
Cast a value to the type required by the option, if one is set. This is used to cast the string values gathered from environment variable into their required type. Args: value: The value to cast. Returns: The value casted to the expected type for the option.
codesearchnet
def parse_op_and_node(line): op_type = line.strip().split(' ')[0].replace('[', '').replace(']', '') node_name = line.strip().split(' ')[1] return (op_type, node_name)
Parse a line containing an op node followed by a node name. For example, if the line is " [Variable] hidden/weights", this function will return ("Variable", "hidden/weights") Args: line: The line to be parsed, as a str. Returns: Name of the parsed op type. Name of the parsed node.
github-repos
def _ProduceSingleContent(self, mod, showprivate=False, showinh=False): try: all = mod[1].__all__ except AttributeError: raise RuntimeError('Module (%s) MUST have `__all__` defined.' % mod[1].__name__) try: name = mod[1].__displayname__ except AttributeError: name = mod[0] try: category = mod[1].__category__ self.__categories.setdefault(category, 0) self.__categories[category] += 1 except AttributeError: pass feats = inspect.getmembers(mod[1]) fname = 'content/' + mod[1].__name__.replace('.', '/').replace(' ', '-')+'.rst' feats = [f for f in feats if f[0] in all and (showprivate or not f[0][0:1] == '_')] with open(fname, 'w') as fid: fid.write(Classifier.GetModuleText(name, mod[1].__name__, showprivate=showprivate)) for f in feats: if inspect.isclass(f[1]) or inspect.isfunction(f[1]): try: featname = f[1].__displayname__ except AttributeError: featname = f[1].__name__ try: category = f[1].__category__ self.__categories.setdefault(category, 0) self.__categories[category] += 1 except AttributeError: pass if inspect.isclass(f[1]): fid.write(Classifier.GetClassText(featname, '%s.%s' % (mod[1].__name__, f[1].__name__), showprivate=showprivate, showinh=showinh)) elif inspect.isfunction(f[1]): fid.write(Classifier.GetFunctionText(featname, '%s.%s' % (mod[1].__name__, f[1].__name__))) fid.close() return '\n %s' % (fname.split('/')[-1])
An internal helper to create a page for a single module. This will automatically generate the needed RSF to document the module and save the module to its own page in its appropriate location. Args: mod (module): The single module to document as its own page showprivate (bool): A flag for whether or not to display private members Returns: str: The file name ready to be appended to a toctree
juraj-google-style
def _exponent_handler_factory(ion_type, exp_chars, parse_func, first_char=None): def transition(prev, c, ctx, trans): if ((c in _SIGN) and (prev in exp_chars)): ctx.value.append(c) else: _illegal_character(c, ctx) return trans illegal = (exp_chars + _SIGN) return _numeric_handler_factory(_DIGITS, transition, (lambda c, ctx: (c in exp_chars)), illegal, parse_func, illegal_at_end=illegal, ion_type=ion_type, first_char=first_char)
Generates a handler co-routine which tokenizes an numeric exponent. Args: ion_type (IonType): The type of the value with this exponent. exp_chars (sequence): The set of ordinals of the legal exponent characters for this component. parse_func (callable): Called upon ending the numeric value. Accepts the current token value and returns a thunk that lazily parses the token. first_char (Optional[int]): The ordinal of the character that should be appended instead of the character that occurs first in this component. This is useful for preparing the token for parsing in the case where a particular character is peculiar to the Ion format (e.g. 'd' to denote the exponent of a decimal value should be replaced with 'e' for compatibility with python's Decimal type).
codesearchnet
def read(self, n=-1): self._preread_check() if n == -1: length = self.size() - self.tell() else: length = n return self._prepare_value(self._read_buf.read(length))
Returns the contents of a file as a string. Starts reading from current position in file. Args: n: Read `n` bytes if `n != -1`. If `n = -1`, reads to end of file. Returns: `n` bytes of the file (or whole file) in bytes mode or `n` bytes of the string if in string (regular) mode.
github-repos
def _value_loss(self, observ, reward, length): with tf.name_scope('value_loss'): value = self._network(observ, length).value return_ = utility.discounted_return( reward, length, self._config.discount) advantage = return_ - value value_loss = 0.5 * self._mask(advantage ** 2, length) summary = tf.summary.merge([ tf.summary.histogram('value_loss', value_loss), tf.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))]) value_loss = tf.reduce_mean(value_loss) return tf.check_numerics(value_loss, 'value_loss'), summary
Compute the loss function for the value baseline. The value loss is the difference between empirical and approximated returns over the collected episodes. Returns the loss tensor and a summary strin. Args: observ: Sequences of observations. reward: Sequences of reward. length: Batch of sequence lengths. Returns: Tuple of loss tensor and summary tensor.
juraj-google-style
def __init__(self, export_dir): self._export_dir = export_dir self._variables_path = path_helpers.get_variables_path(export_dir) self._saved_model = parse_saved_model(export_dir)
Creates a `SavedModelLoader`. Args: export_dir: Directory in which the SavedModel protocol buffer and variables to be loaded are located.
github-repos
def add_polyhedron(self, neighbors, center, color, opacity=1.0, draw_edges=False, edges_color=[0.0, 0.0, 0.0], edges_linewidth=2): points = vtk.vtkPoints() conv = vtk.vtkConvexPointSet() for i in range(len(neighbors)): x, y, z = neighbors[i].coords points.InsertPoint(i, x, y, z) conv.GetPointIds().InsertId(i, i) grid = vtk.vtkUnstructuredGrid() grid.Allocate(1, 1) grid.InsertNextCell(conv.GetCellType(), conv.GetPointIds()) grid.SetPoints(points) dsm = vtk.vtkDataSetMapper() polysites = [center] polysites.extend(neighbors) self.mapper_map[dsm] = polysites if vtk.VTK_MAJOR_VERSION <= 5: dsm.SetInputConnection(grid.GetProducerPort()) else: dsm.SetInputData(grid) ac = vtk.vtkActor() ac.SetMapper(dsm) ac.GetProperty().SetOpacity(opacity) if color == 'element': myoccu = 0.0 for specie, occu in center.species.items(): if occu > myoccu: myspecie = specie myoccu = occu color = [i / 255 for i in self.el_color_mapping[myspecie.symbol]] ac.GetProperty().SetColor(color) else: ac.GetProperty().SetColor(color) if draw_edges: ac.GetProperty().SetEdgeColor(edges_color) ac.GetProperty().SetLineWidth(edges_linewidth) ac.GetProperty().EdgeVisibilityOn() self.ren.AddActor(ac)
Adds a polyhedron. Args: neighbors: Neighbors of the polyhedron (the vertices). center: The atom in the center of the polyhedron. color: Color for text as RGB. opacity: Opacity of the polyhedron draw_edges: If set to True, the a line will be drawn at each edge edges_color: Color of the line for the edges edges_linewidth: Width of the line drawn for the edges
juraj-google-style
def CopyMicrosecondsToFractionOfSecond(cls, microseconds): if microseconds < 0 or microseconds >= definitions.MICROSECONDS_PER_SECOND: raise ValueError( 'Number of microseconds value: {0:d} out of bounds.'.format( microseconds)) milliseconds, _ = divmod( microseconds, definitions.MICROSECONDS_PER_MILLISECOND) return decimal.Decimal(milliseconds) / definitions.MILLISECONDS_PER_SECOND
Copies the number of microseconds to a fraction of second value. Args: microseconds (int): number of microseconds. Returns: decimal.Decimal: fraction of second, which must be a value between 0.0 and 1.0. Raises: ValueError: if the number of microseconds is out of bounds.
juraj-google-style
def json_using_iso8601(__obj: Dict) -> Dict: for key, value in __obj.items(): with suppress(TypeError, ValueError): __obj[key] = parse_datetime(value) with suppress(TypeError, ValueError): __obj[key] = parse_delta(value) return __obj
Parse ISO-8601 values from JSON databases. See :class:`json.JSONDecoder` Args: __obj: Object to decode
juraj-google-style
def save(self, resource): resource_type = None xid = None if isinstance(resource, dict): resource_type = resource.get('type') xid = resource.get('xid') else: resource_type = resource.type xid = resource.xid if ((resource_type is not None) and (xid is not None)): saved = True if (resource_type in self.tcex.group_types): try: self.groups_shelf[xid] = resource except Exception: saved = False if saved: try: del self._groups[xid] except KeyError: pass elif (resource_type in self.tcex.indicator_types_data.keys()): try: self.indicators_shelf[xid] = resource except Exception: saved = False if saved: try: del self._indicators[xid] except KeyError: pass
Save group|indicator dict or object to shelve. Best effort to save group/indicator data to disk. If for any reason the save fails the data will still be accessible from list in memory. Args: resource (dict|obj): The Group or Indicator dict or object.
codesearchnet
def IsAllocated(self): if (self._stat_object is None): self._stat_object = self._GetStat() return (self._stat_object and self._stat_object.is_allocated)
Determines if the file entry is allocated. Returns: bool: True if the file entry is allocated.
codesearchnet
def allan_variance(data, dt, tmax=10): allanvar = [] nmax = (len(data) if (len(data) < (tmax / dt)) else int((tmax / dt))) for i in range(1, (nmax + 1)): databis = data[(len(data) % i):] y = databis.reshape((len(data) allanvar.append((((y[1:] - y[:(- 1)]) ** 2).mean() / 2)) return ((dt * np.arange(1, (nmax + 1))), np.array(allanvar))
Calculate Allan variance. Args: data (np.ndarray): Input data. dt (float): Time between each data. tmax (float): Maximum time. Returns: vk (np.ndarray): Frequency. allanvar (np.ndarray): Allan variance.
codesearchnet
def list_of_vars(arg_plot): lovs = [[[var for var in svars.split(',') if var] for svars in pvars.split('.') if svars] for pvars in arg_plot.split('-') if pvars] lovs = [[slov for slov in lov if slov] for lov in lovs if lov] return [lov for lov in lovs if lov]
Construct list of variables per plot. Args: arg_plot (str): string with variable names separated with ``_`` (figures), ``.`` (subplots) and ``,`` (same subplot). Returns: three nested lists of str - variables on the same subplot; - subplots on the same figure; - figures.
codesearchnet
def get_sso(self, role): uri = "{}/sso?role={}".format(self.data['uri'], role) return self._helper.do_get(uri)
Builds the SSO (Single Sign-On) URL parameters for the specified enclosure. This allows the user to log in to the enclosure without providing credentials. This API is currently only supported by C7000 enclosures. Args: role: Role Returns: SSO (Single Sign-On) URL parameters.
juraj-google-style
def make_parts_for(self, field_name, field_data): typ = field_data.field_type subtyp = field_data.field_subtype if (typ in ('read', 'xadc')): writeable = False else: writeable = True if ((typ == 'time') or ((typ in ('param', 'read')) and (subtyp == 'time'))): self._make_time_parts(field_name, field_data, writeable) elif ((typ == 'write') and (subtyp == 'action')): self._make_action_part(field_name, field_data) elif (typ in ('param', 'read', 'write', 'xadc')): self._make_param_part(field_name, field_data, writeable) elif (typ == 'bit_out'): self._make_out(field_name, field_data, 'bit') elif (typ == 'pos_out'): self._make_out(field_name, field_data, 'pos') self._make_scale_offset(field_name) self._make_out_capture(field_name, field_data) elif (typ == 'ext_out'): self._make_out_capture(field_name, field_data) elif (typ == 'bit_mux'): self._make_mux(field_name, field_data, 'bit') self._make_mux_delay(field_name) elif (typ == 'pos_mux'): self._make_mux(field_name, field_data, 'pos') elif (typ == 'table'): self._make_table(field_name, field_data) else: raise ValueError(('Unknown type %r subtype %r' % (typ, subtyp)))
Create the relevant parts for this field Args: field_name (str): Short field name, e.g. VAL field_data (FieldData): Field data object
codesearchnet
class DacOutput(ModelOutput): loss: Optional[torch.FloatTensor] = None audio_values: Optional[torch.FloatTensor] = None quantized_representation: Optional[torch.FloatTensor] = None audio_codes: Optional[torch.LongTensor] = None projected_latents: Optional[torch.FloatTensor] = None
Args: loss (`torch.Tensor`): Loss from the encoder model, comprising the weighted combination of the commitment and codebook losses. audio_values (`torch.Tensor` of shape `(batch_size, input_length)`): Reconstructed audio data. quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`): Quantized continuous representation of input. audio_codes (`torch.LongTensor` of shape `(batch_size, num_codebooks, time_steps)`): Codebook indices for each codebook (quantized discrete representation of input). projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`): Projected latents (continuous representation of input before quantization).
github-repos
def _create_forward(out_node): retval = out_node.body[0].body[(- 1)] if (len(retval.value.elts) == 1): retval.value = retval.value.elts[0] return out_node
Create a user-friendly forward function. Ensures that a single value instead of a tuple is returned if the user asked for the gradient with respect to only one input. Args: out_node: The function definition AST. Returns: The function definition with potentially changed return statement.
codesearchnet
def read(self, *labels, **args): raise NotImplementedError
Return the PCollection as a list as well as the version number. Args: *labels: List of labels for PCollection instance. **args: Dict of additional arguments. Currently only 'tail' as a boolean. When tail is True, will wait and read new elements until the cache is complete. Returns: A tuple containing an iterator for the items in the PCollection and the version number. It is possible that the version numbers from read() and_latest_version() are different. This usually means that the cache's been evicted (thus unavailable => read() returns version = -1), but it had reached version n before eviction.
github-repos
def query_file(self, file_sha, verbose=False): if len(file_sha) not in [64, 40]: print('File sha looks malformed: {:s}'.format(file_sha)) return {'file_sha': file_sha, 'malformed': True} return self._query('file', file_sha, verbose)
Query the VirusTotal Service Args: file_sha (str): The file sha1 or sha256 hash url (str): The domain/url to be queried (default=None)
juraj-google-style
def functions(start=None, end=None): (start, end) = fix_addresses(start, end) for func_t in idautils.Functions(start, end): (yield Function(func_t))
Get all functions in range. Args: start: Start address of the range. Defaults to IDB start. end: End address of the range. Defaults to IDB end. Returns: This is a generator that iterates over all the functions in the IDB.
codesearchnet