code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def read_gold_standard_file(data_dir, fileroot, encoding=None, cetr=False): fname = os.path.join(data_dir, GOLD_STANDARD_DIRNAME, (fileroot + GOLD_STANDARD_EXT)) encodings = ((encoding,) if encoding else ('utf-8', 'utf-16', 'iso-8859-1')) for encoding in encodings: try: with io.open(fname, mode='rt', encoding=encoding) as f: gold_standard = f.read() break except (UnicodeDecodeError, UnicodeError): gold_standard = None if (not gold_standard): return [u'', u''] if (not cetr): content_comments = RE_COMMENTS_DELIM.split(gold_standard, maxsplit=1) if (len(content_comments) == 1): content_comments = [content_comments[0], u''] else: tree = etree.fromstring(gold_standard, parser=etree.HTMLParser()) content_comments = [u' '.join(text_from_subtree(tree)), u''] content_comments = [ftfy.fix_encoding(content_comments[0]).strip(), ftfy.fix_encoding(content_comments[1]).strip()] return content_comments
Read the gold standard content file corresponding to identifier ``fileroot`` in the gold standard directory below the root ``data_dir``. Args: data_dir (str) fileroot (str) encoding (str) cetr (bool): if True, assume no comments and parse the gold standard to remove tags Returns: List[str, str]: contents string and comments string, respectively
codesearchnet
def AsJsonString(self): return json.dumps(self.AsDict(dt=False), sort_keys=True)
A JSON string representation of this User instance. Returns: A JSON string representation of this User instance
codesearchnet
def local_symbol_table(imports=None, symbols=()): return SymbolTable( table_type=LOCAL_TABLE_TYPE, symbols=symbols, imports=imports )
Constructs a local symbol table. Args: imports (Optional[SymbolTable]): Shared symbol tables to import. symbols (Optional[Iterable[Unicode]]): Initial local symbols to add. Returns: SymbolTable: A mutable local symbol table with the seeded local symbols.
juraj-google-style
def print_tools(self, buf=sys.stdout, verbose=False, context_name=None): def _get_row(entry): context_name_ = entry["context_name"] tool_alias = entry["tool_alias"] tool_name = entry["tool_name"] properties = [] col = None variant = entry["variant"] if isinstance(variant, set): properties.append("(in conflict)") col = critical if verbose: package = ", ".join(x.qualified_package_name for x in variant) else: v = iter(variant).next() package = "%s (+%d more)" % (v.qualified_package_name, len(variant) - 1) else: package = variant.qualified_package_name if tool_name == tool_alias: tool_name = "-" else: properties.append("(aliased)") if col is None: col = alias_col msg = " ".join(properties) row = [tool_alias, tool_name, package, context_name_, msg] return row, col if context_name: self._context(context_name) context_names = [context_name] else: context_names = sorted(self.contexts.iterkeys()) rows = [["TOOL", "ALIASING", "PACKAGE", "CONTEXT", ""], ["----", "--------", "-------", "-------", ""]] colors = [None, None] entries_dict = defaultdict(list) for d in self.get_tools().itervalues(): entries_dict[d["context_name"]].append(d) if verbose: for d in self.hidden_tools: d_ = d.copy() d_["hidden"] = True entries_dict[d["context_name"]].append(d_) for docs in self.tool_conflicts.itervalues(): for d in docs: d_ = d.copy() d_["conflicting"] = True entries_dict[d["context_name"]].append(d_) for i, context_name in enumerate(context_names): entries = entries_dict.get(context_name, []) if entries: if i: rows.append(('', '', '', '', '')) colors.append(None) entries = sorted(entries, key=lambda x: x["tool_alias"].lower()) for entry in entries: row, col = _get_row(entry) if "hidden" in entry: row[-1] = "(hidden)" rows.append(row) colors.append(warning) elif "conflicting" in entry: row[-1] = "(not visible)" rows.append(row) colors.append(warning) else: rows.append(row) colors.append(col) if rows: _pr = Printer(buf) for col, line in zip(colors, columnise(rows)): _pr(line, col) else: _pr("No tools available.")
Print table of tools available in the suite. Args: context_name (str): If provided, only print the tools from this context.
juraj-google-style
def parse_section_links(self, section_title): soup = BeautifulSoup(self.html, 'html.parser') headlines = soup.find_all('span', {'class': 'mw-headline'}) tmp_soup = BeautifulSoup(section_title, 'html.parser') tmp_sec_title = tmp_soup.get_text().lower() id_tag = None for headline in headlines: tmp_id = headline.text if (tmp_id.lower() == tmp_sec_title): id_tag = headline.get('id') break if (id_tag is not None): return self._parse_section_links(id_tag) return None
Parse all links within a section Args: section_title (str): Name of the section to pull Returns: list: List of (title, url) tuples Note: Returns **None** if section title is not found Note: Side effect is to also pull the html which can be slow Note: This is a parsing operation and not part of the standard API
codesearchnet
def _filter_returned_ops(fn): returned_ops = {} def wrap_and_filter_returned_ops(*args, **kwargs): outputs = fn(*args, **kwargs) flat_outputs = nest.flatten(outputs) for n in range(len(flat_outputs)): output = flat_outputs[n] if isinstance(output, ops.Operation): returned_ops[n] = output flat_outputs[n] = None return nest.pack_sequence_as(outputs, flat_outputs) return (wrap_and_filter_returned_ops, returned_ops)
Filtering out any ops returned by function. Args: fn: a function Returns: A tuple of ( Wrapped function that returns `None` in place of any ops, dict that maps the index in the flat output structure to the returned op )
github-repos
def seek(self, n): if (self._mode != 'r'): raise UnsupportedOperation("not available in 'w' mode") if (0 <= n < self._nb_markers): self._n = n self._bed.seek(self._get_seek_position(n)) else: raise ValueError('invalid position in BED: {}'.format(n))
Gets to a certain marker position in the BED file. Args: n (int): The index of the marker to seek to.
codesearchnet
def receive(self, sequence, args): if not self._reorder: self._callback(*args) return if self._next_expected is not None and sequence < self._next_expected: print("Dropping out of order packet, seq=%d" % sequence) return self._out_of_order.append((sequence, args)) self._out_of_order.sort(key=lambda x: x[0]) while len(self._out_of_order) > 0: seq, args = self._out_of_order[0] if self._next_expected is not None and seq != self._next_expected: return self._callback(*args) self._out_of_order.pop(0) self._next_expected = seq+1
Receive one packet If the sequence number is one we've already seen before, it is dropped. If it is not the next expected sequence number, it is put into the _out_of_order queue to be processed once the holes in sequence number are filled in. Args: sequence (int): The sequence number of the received packet args (list): The list of packet contents that will be passed to callback as callback(*args)
juraj-google-style
def depth(self): if (self._depth_cache is not None): return self._depth_cache (depth, node) = (1, self) while (node.package is not None): depth += 1 node = node.package self._depth_cache = depth return depth
Property to tell the depth of the node in the tree. Returns: int: the node's depth in the tree.
codesearchnet
def _update_exit_code_from_error(self, error): for error_type, exit_code in self.ERROR_CODE_MAP.items(): if isinstance(error, error_type): self.update_exit_code(exit_code) break else: self.update_exit_code(ExitStatus.generic_error)
Set the exit code based on the error type. Args: error (:class:`Exception`): An exception instance.
juraj-google-style
async def send_heartbeat(self, name): await self.send_command(OPERATIONS.CMD_HEARTBEAT, {'name': name}, MESSAGES.HeartbeatResponse, timeout=5.0)
Send a heartbeat for a service. Args: name (string): The name of the service to send a heartbeat for
juraj-google-style
def _checkMode(mode): if not isinstance(mode, str): raise TypeError('The {0} should be a string. Given: {1!r}'.format("mode", mode)) if mode not in [MODE_RTU, MODE_ASCII]: raise ValueError("Unreconized Modbus mode given. Must be 'rtu' or 'ascii' but {0!r} was given.".format(mode))
Check that the Modbus mode is valie. Args: mode (string): The Modbus mode (MODE_RTU or MODE_ASCII) Raises: TypeError, ValueError
juraj-google-style
def _get_bonds(self, mol): num_atoms = len(mol) if self.ignore_ionic_bond: covalent_atoms = [i for i in range(num_atoms) if (mol.species[i].symbol not in self.ionic_element_list)] else: covalent_atoms = list(range(num_atoms)) all_pairs = list(itertools.combinations(covalent_atoms, 2)) pair_dists = [mol.get_distance(*p) for p in all_pairs] elements = mol.composition.as_dict().keys() unavailable_elements = list((set(elements) - set(self.covalent_radius.keys()))) if (len(unavailable_elements) > 0): raise ValueError('The covalent radius for element {} is not available'.format(unavailable_elements)) bond_13 = self.get_13_bonds(self.priority_bonds) max_length = [(((self.covalent_radius[mol.sites[p[0]].specie.symbol] + self.covalent_radius[mol.sites[p[1]].specie.symbol]) * (1 + (self.priority_cap if (p in self.priority_bonds) else (self.bond_length_cap if (p not in bond_13) else self.bond_13_cap)))) * (0.1 if (self.ignore_halogen_self_bond and (p not in self.priority_bonds) and (mol.sites[p[0]].specie.symbol in self.halogen_list) and (mol.sites[p[1]].specie.symbol in self.halogen_list)) else 1.0)) for p in all_pairs] bonds = [bond for (bond, dist, cap) in zip(all_pairs, pair_dists, max_length) if (dist <= cap)] return bonds
Find all the bond in a molcule Args: mol: the molecule. pymatgen Molecule object Returns: List of tuple. Each tuple correspond to a bond represented by the id of the two end atoms.
codesearchnet
def VerifyStructure(self, parser_mediator, line): self._line_structures = self.LINE_STRUCTURES self._day_of_month = None self._month = None self._year = None if self._SIGNATURE in line: return True return False
Verify that this file is an IIS log file. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. line (str): line from a text file. Returns: bool: True if the line was successfully parsed.
juraj-google-style
def _powerset(iterable): s = list(iterable) return itertools.chain.from_iterable((itertools.combinations(s, r) for r in range(len(s) + 1)))
Helper for generating all possible reduction_axes arguments. Example: powerset([0,1,2]): () (0,) (1,) (2,) (0,1) (0,2) (1,2) (0,1,2) Args: iterable: An iterable of items to generate the powerset of. Returns: The powerset of all items in iterable.
github-repos
def get_hash(self): if (self._hash is None): self._hash = self._source.get_hash(self._handle).strip() return self._hash
Returns the associated hash for this template version Returns: str: Hash for this version
codesearchnet
def relu_layer(x, weights, biases, name=None): with ops.name_scope(name, 'relu_layer', [x, weights, biases]) as name: x = ops.convert_to_tensor(x, name='x') weights = ops.convert_to_tensor(weights, name='weights') biases = ops.convert_to_tensor(biases, name='biases') xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases) return nn_ops.relu(xw_plus_b, name=name)
Computes Relu(x * weight + biases). Args: x: a 2D tensor. Dimensions typically: batch, in_units weights: a 2D tensor. Dimensions typically: in_units, out_units biases: a 1D tensor. Dimensions: out_units name: A name for the operation (optional). If not specified "nn_relu_layer" is used. Returns: A 2-D Tensor computing relu(matmul(x, weights) + biases). Dimensions typically: batch, out_units.
github-repos
def run_op_benchmark(self, op, iters=1, warmup=True, session_config=None): if context.executing_eagerly(): return self._run_eager_benchmark(iterable=op, iters=iters, warmup=warmup) return self._run_graph_benchmark(iterable=op, iters=iters, warmup=warmup, session_config=session_config)
Benchmarks the op. Runs the op `iters` times. In each iteration, the benchmark measures the time it takes to go execute the op. Args: op: The tf op to benchmark. iters: Number of times to repeat the timing. warmup: If true, warms up the session caches by running an untimed run. session_config: A ConfigProto protocol buffer with configuration options for the session. Applicable only for benchmarking in graph mode. Returns: A float, representing the per-execution wall time of the op in seconds. This is the median time (with respect to `iters`) it takes for the op to be executed `iters` num of times.
github-repos
def _create_variables(self, num_clusters): init_value = array_ops.placeholder_with_default([], shape=None) cluster_centers = variable_v1.VariableV1(init_value, name=CLUSTERS_VAR_NAME, validate_shape=False) cluster_centers_initialized = variable_v1.VariableV1(False, dtype=dtypes.bool, name='initialized') if self._use_mini_batch and self._mini_batch_steps_per_iteration > 1: cluster_centers_updated = variable_v1.VariableV1(init_value, name='clusters_updated', validate_shape=False) update_in_steps = variable_v1.VariableV1(self._mini_batch_steps_per_iteration, dtype=dtypes.int64, name='update_in_steps') cluster_counts = variable_v1.VariableV1(array_ops.zeros([num_clusters], dtype=dtypes.int64)) else: cluster_centers_updated = cluster_centers update_in_steps = None cluster_counts = variable_v1.VariableV1(array_ops.ones([num_clusters], dtype=dtypes.int64)) if self._use_mini_batch else None return (cluster_centers, cluster_centers_initialized, cluster_counts, cluster_centers_updated, update_in_steps)
Creates variables. Args: num_clusters: an integer Tensor providing the number of clusters. Returns: Tuple with following elements: - cluster_centers: a Tensor for storing cluster centers - cluster_centers_initialized: bool Variable indicating whether clusters are initialized. - cluster_counts: a Tensor for storing counts of points assigned to this cluster. This is used by mini-batch training. - cluster_centers_updated: Tensor representing copy of cluster centers that are updated every step. - update_in_steps: numbers of steps left before we sync cluster_centers_updated back to cluster_centers.
github-repos
def _source_is_newer(src_fs, src_path, dst_fs, dst_path): try: if dst_fs.exists(dst_path): namespace = ('details', 'modified') src_modified = src_fs.getinfo(src_path, namespace).modified if (src_modified is not None): dst_modified = dst_fs.getinfo(dst_path, namespace).modified return ((dst_modified is None) or (src_modified > dst_modified)) return True except FSError: return True
Determine if source file is newer than destination file. Arguments: src_fs (FS): Source filesystem (instance or URL). src_path (str): Path to a file on the source filesystem. dst_fs (FS): Destination filesystem (instance or URL). dst_path (str): Path to a file on the destination filesystem. Returns: bool: `True` if the source file is newer than the destination file or file modification time cannot be determined, `False` otherwise.
codesearchnet
def get_config(self): all_args = tf_inspect.getfullargspec(self.__init__).args config = {'name': self.name, 'trainable': self.trainable} if hasattr(self, '_batch_input_shape'): config['batch_input_shape'] = self._batch_input_shape config['dtype'] = policy.serialize(self._dtype_policy) if hasattr(self, 'dynamic'): if self.dynamic: config['dynamic'] = self.dynamic elif 'dynamic' in all_args: all_args.remove('dynamic') expected_args = config.keys() extra_args = [arg for arg in all_args if arg not in expected_args] if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'): raise NotImplementedError('Layer %s has arguments in `__init__` and therefore must override `get_config`.' % self.__class__.__name__) return config
Returns the config of the layer. A layer config is a Python dictionary (serializable) containing the configuration of a layer. The same layer can be reinstantiated later (without its trained weights) from this configuration. The config of a layer does not include connectivity information, nor the layer class name. These are handled by `Network` (one layer of abstraction above). Note that `get_config()` does not guarantee to return a fresh copy of dict every time it is called. The callers should make a copy of the returned dict if they want to modify it. Returns: Python dictionary.
github-repos
async def import_image(self, data, stream: bool=False): headers = {'Content-Type': 'application/x-tar'} response = (await self.docker._query_chunked_post('images/load', 'POST', data=data, headers=headers)) return (await json_stream_result(response, stream=stream))
Import tarball of image to docker. Args: data: tarball data of image to be imported Returns: Tarball of the image
codesearchnet
def initialize_or_restore(self, session=None): if context.executing_eagerly(): return if session is None: session = get_session() trackable_objects = util.list_objects(self._object_graph_view) initializers = [c.initializer for c in trackable_objects if hasattr(c, 'initializer') and c.initializer is not None and (getattr(c, '_update_uid', self._restore_uid - 1) < self._restore_uid)] session.run(initializers)
Runs initialization ops for variables. Objects which would be saved by `Saver.save` will be initialized, unless those variables are being restored by a later call to `tf.train.Checkpoint.restore()`. This method does nothing when executing eagerly (initializers get run eagerly). Args: session: The session to run initialization ops in. If `None`, uses the default session.
github-repos
def handle_run_exception(self, pipeline_key, pipeline_func, e): if isinstance(e, Retry): retry_message = str(e) logging.warning('User forced retry for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, retry_message) self.transition_retry(pipeline_key, retry_message) elif isinstance(e, Abort): abort_message = str(e) logging.warning('User forced abort for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, abort_message) pipeline_func.abort(abort_message) else: retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception('Generator %r pipeline_func, pipeline_key.name(), retry_message) self.transition_retry(pipeline_key, retry_message) return pipeline_func.task_retry
Handles an exception raised by a Pipeline's user code. Args: pipeline_key: The pipeline that raised the error. pipeline_func: The class path name of the Pipeline that was running. e: The exception that was raised. Returns: True if the exception should be re-raised up through the calling stack by the caller of this method.
juraj-google-style
def create_dir(path): full_path = abs_path(path) if not os.path.exists(full_path): try: os.makedirs(full_path) except OSError as e: if e.errno != os.errno.EEXIST: raise
Creates a directory if it does not exist already. Args: path: The path of the directory to create.
juraj-google-style
def process_event(self, event_name: str, data: dict) -> None: if event_name == "after_epoch": self.epochs_done = data["epochs_done"] self.batches_seen = data["batches_seen"] self.train_examples_seen = data["train_examples_seen"] return
Process event after epoch Args: event_name: whether event is send after epoch or batch. Set of values: ``"after_epoch", "after_batch"`` data: event data (dictionary) Returns: None
juraj-google-style
def parse_transcripts(transcript_lines): LOG.info('Parsing transcripts') if isinstance(transcript_lines, DataFrame): transcripts = parse_ensembl_transcript_request(transcript_lines) else: transcripts = parse_ensembl_transcripts(transcript_lines) parsed_transcripts = {} for tx in transcripts: tx_id = tx['ensembl_transcript_id'] ens_gene_id = tx['ensembl_gene_id'] if (not (tx_id in parsed_transcripts)): tx_info = {'chrom': tx['chrom'], 'transcript_start': tx['transcript_start'], 'transcript_end': tx['transcript_end'], 'mrna': set(), 'mrna_predicted': set(), 'nc_rna': set(), 'ensembl_gene_id': ens_gene_id, 'ensembl_transcript_id': tx_id} parsed_transcripts[tx_id] = tx_info tx_info = parsed_transcripts[tx_id] if tx.get('refseq_mrna_predicted'): tx_info['mrna_predicted'].add(tx['refseq_mrna_predicted']) if tx.get('refseq_mrna'): tx_info['mrna'].add(tx['refseq_mrna']) if tx.get('refseq_ncrna'): tx_info['nc_rna'].add(tx['refseq_ncrna']) return parsed_transcripts
Parse and massage the transcript information There could be multiple lines with information about the same transcript. This is why it is necessary to parse the transcripts first and then return a dictionary where all information has been merged. Args: transcript_lines(): This could be an iterable with strings or a pandas.DataFrame Returns: parsed_transcripts(dict): Map from enstid -> transcript info
codesearchnet
def compose_path(pub, uuid_url=False): if uuid_url: return join('/', UUID_DOWNLOAD_KEY, str(pub.uuid)) return join('/', DOWNLOAD_KEY, basename(pub.file_pointer), basename(pub.filename))
Compose absolute path for given `pub`. Args: pub (obj): :class:`.DBPublication` instance. uuid_url (bool, default False): Compose URL using UUID. Returns: str: Absolute url-path of the publication, without server's address \ and protocol. Raises: PrivatePublicationError: When the `pub` is private publication.
codesearchnet
def feedforward(inputs, num_units, scope='multihead_attention'): with tf.variable_scope(scope): params = {'inputs': inputs, 'filters': num_units[0], 'kernel_size': 1, 'activation': tf.nn.relu, 'use_bias': True} outputs = tf.layers.conv1d(**params) params = {'inputs': outputs, 'filters': num_units[1], 'kernel_size': 1, 'activation': None, 'use_bias': True} outputs = tf.layers.conv1d(**params) outputs += inputs outputs = normalize(outputs) return outputs
Point-wise feed forward net. Args: inputs: A 3d tensor with shape of [N, T, C]. num_units: A list of two integers. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A 3d tensor with the same shape and dtype as inputs
codesearchnet
def create_local_copy(self, effects=None, store=None): effects = self._build_effects(effects) store = (store or '') data = {'source': self.cdn_path(effects)} if store: data['store'] = store return rest_request('POST', 'files/', data=data)
Creates a Local File Copy on Uploadcare Storage. Args: - effects: Adds CDN image effects. If ``self.default_effects`` property is set effects will be combined with default effects. - store: If ``store`` option is set to False the copy of your file will be deleted in 24 hour period after the upload. Works only if `autostore` is enabled in the project.
codesearchnet
def ParseFileObject(self, parser_mediator, file_object): file_size = file_object.get_size() file_header_map = self._GetDataTypeMap('rp_log_file_header') try: file_header, _ = self._ReadStructureFromFileObject( file_object, 0, file_header_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile( 'Unable to parse file header with error: {0!s}'.format( exception)) file_footer_map = self._GetDataTypeMap('rp_log_file_footer') file_footer_offset = file_size - file_footer_map.GetByteSize() try: file_footer, _ = self._ReadStructureFromFileObject( file_object, file_footer_offset, file_footer_map) except (ValueError, errors.ParseError) as exception: parser_mediator.ProduceExtractionWarning( 'unable to parse file footer with error: {0!s}'.format(exception)) return description = file_header.description.rstrip('\0') if file_footer.creation_time == 0: date_time = dfdatetime_semantic_time.SemanticTime('Not set') else: date_time = dfdatetime_filetime.Filetime( timestamp=file_footer.creation_time) event_data = RestorePointEventData() event_data.description = description event_data.restore_point_event_type = file_header.event_type event_data.restore_point_type = file_header.restore_point_type event_data.sequence_number = file_header.sequence_number event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a Windows Restore Point (rp.log) log file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
juraj-google-style
def _maybe_name(obj) -> str: if obj is None: return 'None' elif hasattr(obj, 'name'): return obj.name else: return '<no name for %s>' % type(obj)
Returns object name if it has one, or a message otherwise. This is useful for names that apper in error messages. Args: obj: Object to get the name of. Returns: name, "None", or a "no name" message.
github-repos
def count(self) -> 'Builder': return self._to_builder(_evaluation.CountFunction(self.node.context, self.node, []))
The FHIRPath count() function. Returns: An expression that evaluates to the count of items in the parent.
github-repos
def _generate_schedule(self, cpn_frequency, roll_convention): if self._first_coupon_date is None and self._penultimate_coupon_date is None: cpn_dates = dates.PeriodicSchedule(start_date=self._start_date, end_date=self._end_date, tenor=cpn_frequency, roll_convention=roll_convention).dates() is_regular_cpn = tf.constant(True, dtype=bool, shape=cpn_dates[:, :-1].shape) elif self._first_coupon_date is not None: cpn_dates = dates.PeriodicSchedule(start_date=self._first_coupon_date, end_date=self._end_date, tenor=cpn_frequency, roll_convention=roll_convention).dates() cpn_dates = dates.DateTensor.concat([self._start_date.expand_dims(-1), cpn_dates], axis=1) is_irregular_cpn = tf.constant(False, dtype=bool, shape=self._start_date.shape) is_regular_cpn = tf.concat([tf.expand_dims(is_irregular_cpn, axis=-1), tf.constant(True, dtype=bool, shape=cpn_dates[:, :-2].shape)], axis=1) else: cpn_dates = dates.PeriodicSchedule(start_date=self._start_date, end_date=self._penultimate_coupon_date, backward=True, tenor=cpn_frequency, roll_convention=roll_convention).dates() cpn_dates = dates.DateTensor.concat([cpn_dates, self._end_date.expand_dims(-1)], axis=1) is_irregular_cpn = tf.constant(False, dtype=bool, shape=self._end_date.shape) is_regular_cpn = tf.concat([tf.constant(True, dtype=bool, shape=cpn_dates[:, :-2].shape), tf.expand_dims(is_irregular_cpn, axis=-1)], axis=1) return (cpn_dates, is_regular_cpn)
Method to generate coupon dates. Args: cpn_frequency: A `PeriodTensor` specifying the frequency of coupon payments. roll_convention: Scalar of type `BusinessDayConvention` specifying how dates are rolled if they fall on holidays. Returns: A tuple containing the generated date schedule and a boolean `Tensor` of the same shape as the schedule specifying whether the coupons are regular coupons.
github-repos
def __init__(self, resolver_context): super(CPIOFile, self).__init__(resolver_context) self._cpio_archive_file = None self._cpio_archive_file_entry = None self._current_offset = 0 self._file_system = None self._size = 0
Initializes a file-like object. Args: resolver_context (Context): resolver context.
juraj-google-style
def verify_profile_name(msg, cfg): if (msg.profile not in cfg.data): raise UnknownProfileError(msg.profile)
Verifies the profile name exists in the config.json file. Args: :msg: (Message class) an instance of a message class. :cfg: (jsonconfig.Config) config instance.
codesearchnet
def compute_bleu_summaries(hook_args): decode_hparams = hook_args.decode_hparams if (not (decode_hparams.decode_reference and decode_hparams.decode_to_file)): return None values = [] bleu = (100 * bleu_hook.bleu_wrapper(decode_hparams.decode_reference, decode_hparams.decode_to_file)) values.append(tf.Summary.Value(tag='BLEU', simple_value=bleu)) tf.logging.info(('%s: BLEU = %6.2f' % (decode_hparams.decode_to_file, bleu))) if hook_args.hparams.mlperf_mode: current_step = decode_hparams.mlperf_decode_step mlperf_log.transformer_print(key=mlperf_log.EVAL_TARGET, value=decode_hparams.mlperf_threshold) mlperf_log.transformer_print(key=mlperf_log.EVAL_ACCURACY, value={'epoch': max(((current_step mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP) if (bleu >= decode_hparams.mlperf_threshold): decode_hparams.set_hparam('mlperf_success', True) return values
Compute BLEU core summaries using the decoder output. Args: hook_args: DecodeHookArgs namedtuple Returns: A list of tf.Summary values if hook_args.hparams contains the reference file and the translated file.
codesearchnet
def from_api_repr(cls, resource): ref = cls() ref._proto = json_format.ParseDict(resource, types.ModelReference()) return ref
Factory: construct a model reference given its API representation Args: resource (Dict[str, object]): Model reference representation returned from the API Returns: google.cloud.bigquery.model.ModelReference: Model reference parsed from ``resource``.
juraj-google-style
def single_qubit_matrix_to_phased_x_z(mat: np.ndarray, atol: float=0) -> List[ops.SingleQubitGate]: (xy_turn, xy_phase_turn, total_z_turn) = _deconstruct_single_qubit_matrix_into_gate_turns(mat) result = [ops.PhasedXPowGate(exponent=(2 * xy_turn), phase_exponent=(2 * xy_phase_turn)), (ops.Z ** (2 * total_z_turn))] result = [g for g in result if (protocols.trace_distance_bound(g) > atol)] if ((len(result) == 2) and (abs(xy_turn) >= (0.5 - atol))): return [ops.PhasedXPowGate(phase_exponent=((2 * xy_phase_turn) + total_z_turn))] return result
Implements a single-qubit operation with a PhasedX and Z gate. If one of the gates isn't needed, it will be omitted. Args: mat: The 2x2 unitary matrix of the operation to implement. atol: A limit on the amount of error introduced by the construction. Returns: A list of gates that, when applied in order, perform the desired operation.
codesearchnet
def interface_required(interface): def _interface_required(func): 'Internal decorator that wraps around the decorated function.\n\n Args:\n func (function): function being decorated\n\n Returns:\n The wrapper function.\n ' @functools.wraps(func) def wrapper(self, *args, **kwargs): 'Wrapper function to check that the given ``JLink`` has the\n same interface as the one specified by the decorator.\n\n Args:\n self (JLink): the ``JLink`` instance\n args: list of arguments to pass to ``func``\n kwargs: key-word arguments dict to pass to ``func``\n\n Returns:\n The return value of the wrapped function.\n\n Raises:\n JLinkException: if the current interface is not supported by\n the wrapped method.\n ' if (self.tif != interface): raise errors.JLinkException('Unsupported for current interface.') return func(self, *args, **kwargs) return wrapper return _interface_required
Decorator to specify that a particular interface type is required for the given method to be used. Args: interface (int): attribute of ``JLinkInterfaces`` Returns: A decorator function.
codesearchnet
def write(self): none_type = type(None) attrs = self.attribute_string() for attr in self.__dict__.keys(): if (type(attr) == none_type): setattr(self, attr, '.') fstr = '{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}{9}'.format(self.seqid, self.source, self.type, str(self.start), str(self.end), self._score_str, self.strand, self.phase, attrs, os.linesep) return fstr
Restore GFF3 entry to original format Returns: str: properly formatted string containing the GFF3 entry
codesearchnet
def find_template_filename(self, template_name): def next_file(): filename = self.path / template_name yield filename try: exts = self.default_file_extensions except AttributeError: return strfilename = str(filename) for ext in exts: yield Path(strfilename + ext) for filename in next_file(): if filename.is_file(): return filename
Searches for a file matching the given template name. If found, this method returns the pathlib.Path object of the found template file. Args: template_name (str): Name of the template, with or without a file extension. Returns: pathlib.Path: Path to the matching filename.
juraj-google-style
def configs_for_writer(writer=None, ppp_config_dir=None): search_paths = ((ppp_config_dir,) if ppp_config_dir else tuple()) if (writer is not None): if (not isinstance(writer, (list, tuple))): writer = [writer] config_files = [(w if w.endswith('.yaml') else (w + '.yaml')) for w in writer] else: writer_configs = glob_config(os.path.join('writers', '*.yaml'), *search_paths) config_files = set(writer_configs) for config_file in config_files: config_basename = os.path.basename(config_file) writer_configs = config_search_paths(os.path.join('writers', config_basename), *search_paths) if (not writer_configs): LOG.warning("No writer configs found for '%s'", writer) continue (yield writer_configs)
Generator of writer configuration files for one or more writers Args: writer (Optional[str]): Yield configs only for this writer ppp_config_dir (Optional[str]): Additional configuration directory to search for writer configuration files. Returns: Generator of lists of configuration files
codesearchnet
def read_byte(self, do_ord=True) -> int: try: if do_ord: return ord(self.stream.read(1)) else: return self.stream.read(1) except Exception as e: raise SDKException(ErrorCode.read_byte_error(e.args[0]))
Read a single byte. Args: do_ord (bool): (default True) convert the byte to an ordinal first. Returns: bytes: a single byte if successful. 0 (int) if an exception occurred.
juraj-google-style
def maybe_center_plot(result): begin = re.search('(% .* matplotlib2tikz v.*)', result) if begin: result = ('\\begin{center}\n' + result[begin.end():] + '\n\\end{center}') return result
Embeds a possible tikz image inside a center environment. Searches for matplotlib2tikz last commend line to detect tikz images. Args: result: The code execution result Returns: The input result if no tikzpicture was found, otherwise a centered version.
juraj-google-style
def contains(self, x: int, y: int) -> bool: return ((self.x <= x < (self.x + self.width)) and (self.y <= y < (self.y + self.height)))
Returns True if this node contains these coordinates. Args: x (int): X position to check. y (int): Y position to check. Returns: bool: True if this node contains these coordinates. Otherwise False.
codesearchnet
def add(self, selected: 'SelectedMailbox', *, replace: 'SelectedMailbox' = None) -> None: if replace is not None: self._set.discard(replace) self._set.add(selected)
Add a new selected mailbox object to the set, which may then be returned by :meth:`.any_selected`. Args: selected: The new selected mailbox object. replace: An existing selected mailbox object that should be removed from the weak set.
juraj-google-style
def pkg_version_list(self, pkg_id): pkg_data = self.__reg_software.get(pkg_id, None) if not pkg_data: return [] if isinstance(pkg_data, list): return pkg_data installed_versions = list(pkg_data.get('version').keys()) return sorted(installed_versions, key=cmp_to_key(self.__oldest_to_latest_version))
Returns information on a package. Args: pkg_id (str): Package Id of the software/component. Returns: list: List of version numbers installed.
juraj-google-style
def __init__(self, dtypes, shapes, names, queue_ref): self._dtypes = dtypes if shapes is not None: if len(shapes) != len(dtypes): raise ValueError(f'Queue shapes must have the same length as dtypes, received len(shapes)={len(shapes)}, len(dtypes)={len(dtypes)}') self._shapes = [tensor_shape.TensorShape(s) for s in shapes] else: self._shapes = [tensor_shape.unknown_shape() for _ in self._dtypes] if names is not None: if len(names) != len(dtypes): raise ValueError(f'Queue names must have the same length as dtypes,received len(names)={len(names)},len {len(dtypes)}') self._names = names else: self._names = None self._queue_ref = queue_ref if isinstance(queue_ref, ops.EagerTensor): if context.context().scope_name: self._name = context.context().scope_name else: self._name = 'Empty' self._resource_deleter = resource_variable_ops.EagerResourceDeleter(queue_ref, None) else: self._name = self._queue_ref.op.name.split('/')[-1]
Constructs a queue object from a queue reference. The two optional lists, `shapes` and `names`, must be of the same length as `dtypes` if provided. The values at a given index `i` indicate the shape and name to use for the corresponding queue component in `dtypes`. Args: dtypes: A list of types. The length of dtypes must equal the number of tensors in each element. shapes: Constraints on the shapes of tensors in an element: A list of shape tuples or None. This list is the same length as dtypes. If the shape of any tensors in the element are constrained, all must be; shapes can be None if the shapes should not be constrained. names: Optional list of names. If provided, the `enqueue()` and `dequeue()` methods will use dictionaries with these names as keys. Must be None or a list or tuple of the same length as `dtypes`. queue_ref: The queue reference, i.e. the output of the queue op. Raises: ValueError: If one of the arguments is invalid.
github-repos
def waiting_config_state(self, timeout=300): t_start = time.time() while not self.check_config_state(): if time.time() - t_start > timeout: return False time.sleep(0.1) return True
waiting while real state equal config state Args: timeout - specify how long, in seconds, a command can take before server times out. return True if operation success otherwise False
juraj-google-style
def inputs_valid(self, outputs=None): if (self.operation == Transaction.CREATE): return self._inputs_valid(['dummyvalue' for _ in self.inputs]) elif (self.operation == Transaction.TRANSFER): return self._inputs_valid([output.fulfillment.condition_uri for output in outputs]) else: allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS) raise TypeError('`operation` must be one of {}'.format(allowed_ops))
Validates the Inputs in the Transaction against given Outputs. Note: Given a `CREATE` Transaction is passed, dummy values for Outputs are submitted for validation that evaluate parts of the validation-checks to `True`. Args: outputs (:obj:`list` of :class:`~bigchaindb.common. transaction.Output`): A list of Outputs to check the Inputs against. Returns: bool: If all Inputs are valid.
codesearchnet
def set_iterator_element_layouts(self, iterator_resource_dtensor, layouts: List[layout_lib.Layout]): _pywrap_dtensor_device.SetIteratorElementLayouts(context.context()._handle, iterator_resource_dtensor, [layout.to_string() for layout in layouts], self._device_info)
Sets the element layouts on an iterator resource tensor. Args: iterator_resource_dtensor: a DTensor created by packing the individiual iterator resource tensors. layouts: the flattened list of layouts to be applied to the elements emitted by the iterator resource DTensor.
github-repos
def encode(self, input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if position_ids is None: batch_size, sequence_length = input_ids.shape position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_ids, attention_mask, position_ids, **kwargs) return self.module.apply({'params': params or self.params}, input_ids=jnp.array(input_ids, dtype='i4'), attention_mask=jnp.array(attention_mask, dtype='i4'), position_ids=jnp.array(position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward)
Returns: Example: ```python >>> from transformers import AutoTokenizer, FlaxMarianMTModel >>> tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=64, return_tensors="jax") >>> encoder_outputs = model.encode(**inputs) ```
github-repos
def construct_channel(self, **kwargs): if self.compatibility_mode: config.LOGGER.info("Populating channel... ") channel = self.chef_module.construct_channel(**kwargs) return channel else: raise NotImplementedError('Your chef class must overrride the construct_channel method')
Calls chef script's construct_channel method. Used only in compatibility mode. Args: kwargs (dict): additional keyword arguments that `uploadchannel` received Returns: channel populated from construct_channel method
juraj-google-style
def _SetAllFieldTypes(self, package, desc_proto, scope): package = _PrefixWithDot(package) main_desc = self._GetTypeFromScope(package, desc_proto.name, scope) if package == '.': nested_package = _PrefixWithDot(desc_proto.name) else: nested_package = '.'.join([package, desc_proto.name]) for field_proto, field_desc in zip(desc_proto.field, main_desc.fields): self._SetFieldType(field_proto, field_desc, nested_package, scope) for extension_proto, extension_desc in ( zip(desc_proto.extension, main_desc.extensions)): extension_desc.containing_type = self._GetTypeFromScope( nested_package, extension_proto.extendee, scope) self._SetFieldType(extension_proto, extension_desc, nested_package, scope) for nested_type in desc_proto.nested_type: self._SetAllFieldTypes(nested_package, nested_type, scope)
Sets all the descriptor's fields's types. This method also sets the containing types on any extensions. Args: package: The current package of desc_proto. desc_proto: The message descriptor to update. scope: Enclosing scope of available types.
juraj-google-style
def stop_batch_gradient(cls, x: 'TensorFluent', stop_batch: tf.Tensor) -> 'TensorFluent': scope = x.scope.as_list() batch = x.batch tensor = tf.where(stop_batch, tf.stop_gradient(x.tensor), x.tensor) return TensorFluent(tensor, scope, batch)
Returns a copy of the inputs fluent with stop_gradient applied at batch level. Args: x: The input fluent. stop_batch: A boolean tf.Tensor with shape=(batch_size, ...) Returns: A TensorFluent that conditionally stops backpropagation of gradient computations.
juraj-google-style
def last_updated(self, path): if not self.exists(path): raise BeamIOError('Path does not exist: %s' % path) return os.path.getmtime(path)
Get UNIX Epoch time in seconds on the FileSystem. Args: path: string path of file. Returns: float UNIX Epoch time Raises: ``BeamIOError``: if path doesn't exist.
github-repos
def from_config(cls, config: dict): timestamp = config.get('timestamp', None) return cls(config.get('id'), config.get('type'), config.get('data', dict()), config.get('origin', None), timestamp, config.get('object_type', None), config.get('object_id', None), config.get('object_key', None))
Create an event object from an event dictionary object. Args: config (dict): Event Configuration dictionary.
codesearchnet
def get_input_at(self, node_index): return self._get_node_attribute_at_index(node_index, 'input_tensors', 'input')
Retrieves the input tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. `node_index=0` will correspond to the first input node of the layer. Returns: A tensor (or list of tensors if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode.
github-repos
def regression_errors(y, y_hat, smoothing_window=0.01, smooth=True): errors = np.abs((y - y_hat))[(:, 0)] if (not smooth): return errors smoothing_window = int((smoothing_window * len(y))) return pd.Series(errors).ewm(span=smoothing_window).mean().values
Compute an array of absolute errors comparing predictions and expected output. If smooth is True, apply EWMA to the resulting array of errors. Args: y (array): Ground truth. y_hat (array): Predictions array. smoothing_window (float): Size of the smoothing window, expressed as a proportion of the total length of y. smooth (bool): whether the returned errors should be smoothed with EWMA. Returns: (array): errors
codesearchnet
def __init__(self, sess, thread_name_filter=None, pass_through_operrors=False): _check_type(sess, (session.BaseSession, monitored_session.MonitoredSession)) self._sess = sess self._thread_name_filter_pattern = re.compile(thread_name_filter) if thread_name_filter else None self._pass_through_operrors = pass_through_operrors self._run_call_count = 0 response = self.on_session_init(OnSessionInitRequest(self._sess)) _check_type(response, OnSessionInitResponse) if response.action == OnSessionInitAction.PROCEED: pass elif response.action == OnSessionInitAction.REMOTE_INSTR_LOOP: raise NotImplementedError('OnSessionInitAction REMOTE_INSTR_LOOP has not been implemented.') else: raise ValueError('Invalid OnSessionInitAction value: %s' % response.action) self._default_session_context_manager = None self._cached_callables_from_options = {}
Constructor of `BaseDebugWrapperSession`. Args: sess: An (unwrapped) TensorFlow session instance. It should be a subtype of `BaseSession` or `tf.MonitoredSession`. thread_name_filter: Regular-expression filter (allowlist) for name(s) of thread(s) on which the wrapper session will be active. This regular expression is used in a start-anchored fashion on the thread name, i.e., by applying the `match` method of the compiled pattern. The default `None` means that the wrapper session will be active on all threads. E.g., r"MainThread$", r"QueueRunnerThread.*". pass_through_operrors: If True, all captured OpErrors will be propagated. By default this captures all OpErrors. Raises: ValueError: On invalid `OnSessionInitAction` value. NotImplementedError: If a non-DirectSession sess object is received.
github-repos
def GetPathInfo(self, timestamp=None): path_info_timestamp = self._LastEntryTimestamp(self._path_infos, timestamp) try: result = self._path_infos[path_info_timestamp].Copy() except KeyError: result = rdf_objects.PathInfo( path_type=self._path_type, components=self._components) stat_entry_timestamp = self._LastEntryTimestamp(self._stat_entries, timestamp) result.last_stat_entry_timestamp = stat_entry_timestamp result.stat_entry = self._stat_entries.get(stat_entry_timestamp) hash_entry_timestamp = self._LastEntryTimestamp(self._hash_entries, timestamp) result.last_hash_entry_timestamp = hash_entry_timestamp result.hash_entry = self._hash_entries.get(hash_entry_timestamp) return result
Generates a summary about the path record. Args: timestamp: A point in time from which the data should be retrieved. Returns: A `rdf_objects.PathInfo` instance.
juraj-google-style
def _ReadSupportedOS(self, definition_values, definition_object, name): supported_os = definition_values.get('supported_os', []) if not isinstance(supported_os, list): raise errors.FormatError( 'Invalid supported_os type: {0!s}'.format(type(supported_os))) undefined_supported_os = set(supported_os).difference(self.supported_os) if undefined_supported_os: error_string = ( 'Artifact definition: {0:s} undefined supported operating system: ' '{1:s}.').format(name, ', '.join(undefined_supported_os)) raise errors.FormatError(error_string) definition_object.supported_os = supported_os
Reads the optional artifact or source type supported OS. Args: definition_values (dict[str, object]): artifact definition values. definition_object (ArtifactDefinition|SourceType): the definition object. name (str): name of the artifact definition. Raises: FormatError: if there are undefined supported operating systems.
juraj-google-style
def page(title, description, element_list=None, tab_list=None): _page = {'Type': 'Page', 'Title': title, 'Description': description, 'Data': {}} if (element_list is not None): if isinstance(element_list, list): _page['Data']['Elements'] = element_list else: _page['Data']['Elements'] = [element_list] if (tab_list is not None): if isinstance(tab_list, list): _page['Data']['Tabs'] = tab_list else: _page['Data']['Tabs'] = [tab_list] return _page
Returns a dictionary representing a new page to display elements. This can be thought of as a simple container for displaying multiple types of information. The ``section`` method can be used to create separate tabs. Args: title: The title to display description: A description of the section element_list: The list of elements to display. If a single element is given it will be wrapped in a list. tab_list: A list of tabs to display. Returns: A dictionary with metadata specifying that it is to be rendered as a page containing multiple elements and/or tabs.
codesearchnet
def reindex(self, kdims=None, vdims=None): gridded = self.interface.gridded scalars = [] if gridded: coords = [(d, self.interface.coords(self, d.name)) for d in self.kdims] scalars = [d for (d, vs) in coords if (len(vs) == 1)] if (kdims is None): key_dims = [d for d in self.kdims if (((not vdims) or (d not in vdims)) and (not (d in scalars)))] elif (not isinstance(kdims, list)): key_dims = [self.get_dimension(kdims, strict=True)] else: key_dims = [self.get_dimension(k, strict=True) for k in kdims] dropped = [d for d in self.kdims if ((not (d in key_dims)) and (not (d in scalars)))] new_type = None if (vdims is None): val_dims = [d for d in self.vdims if ((not kdims) or (d not in kdims))] else: val_dims = [self.get_dimension(v, strict=True) for v in vdims] new_type = self._vdim_reductions.get(len(val_dims), type(self)) data = self.interface.reindex(self, key_dims, val_dims) datatype = self.datatype if (gridded and dropped): interfaces = self.interface.interfaces datatype = [dt for dt in datatype if (not getattr(interfaces.get(dt, None), 'gridded', True))] return self.clone(data, kdims=key_dims, vdims=val_dims, new_type=new_type, datatype=datatype)
Reindexes Dataset dropping static or supplied kdims Creates a new object with a reordered or reduced set of key dimensions. By default drops all non-varying key dimensions.x Args: kdims (optional): New list of key dimensionsx vdims (optional): New list of value dimensions Returns: Reindexed object
codesearchnet
def AddInformationalOptions(self, argument_group): argument_group.add_argument( '-d', '--debug', dest='debug', action='store_true', default=False, help='Enable debug output.') argument_group.add_argument( '-q', '--quiet', dest='quiet', action='store_true', default=False, help='Disable informational output.')
Adds the informational options to the argument group. Args: argument_group (argparse._ArgumentGroup): argparse argument group.
juraj-google-style
def get_category(self, column): result = pd.Series(index=column.index) for category, stats in self.probability_map.items(): start, end = stats[0] result[(start < column) & (column < end)] = category return result
Returns categories for the specified numeric values Args: column(pandas.Series): Values to transform into categories Returns: pandas.Series
juraj-google-style
def dump(self, file, payload): json.dump(payload, file, indent=2, ensure_ascii=False)
Dump json oject to open file output. Writes json with 2 spaces indentation. Args: file: Open file-like object. Must be open for writing. payload: The Json object to write to file. Returns: None.
juraj-google-style
def ignore(wrapped): @functools.wraps(wrapped) def _ignore(*args, **kwargs): return wrapped(*args, **kwargs) _ignore._splpy_optype = _OperatorType.Ignore _ignore._splpy_file = inspect.getsourcefile(wrapped) return _ignore
Decorator to ignore a Python function. If a Python callable is decorated with ``@spl.ignore`` then function is ignored by ``spl-python-extract.py``. Args: wrapped: Function that will be ignored.
codesearchnet
def parse_newsgroup(line): parts = line.split() try: group = parts[0] low = int(parts[1]) high = int(parts[2]) status = parts[3] except (IndexError, ValueError): raise ValueError("Invalid newsgroup info") return group, low, high, status
Parse a newsgroup info line to python types. Args: line: An info response line containing newsgroup info. Returns: A tuple of group name, low-water as integer, high-water as integer and posting status. Raises: ValueError: If the newsgroup info cannot be parsed. Note: Posting status is a character is one of (but not limited to): "y" posting allowed "n" posting not allowed "m" posting is moderated
juraj-google-style
def get_by_name(self, name): try: spec = self._dom.get('templates', {})[name] except KeyError: raise LagoMissingTemplateError(name, self._path) return Template( name=name, versions={ ver_name: TemplateVersion( name='%s:%s:%s' % (self.name, name, ver_name), source=self._providers[ver_spec['source']], handle=ver_spec['handle'], timestamp=ver_spec['timestamp'], ) for ver_name, ver_spec in spec['versions'].items() }, )
Retrieve a template by it's name Args: name (str): Name of the template to retrieve Raises: LagoMissingTemplateError: if no template is found
juraj-google-style
def extend(self, name, opts, info): tifo = self.info.copy() tifo.update(info) topt = self.opts.copy() topt.update(opts) tobj = self.__class__(self.modl, name, tifo, topt) tobj.subof = self.name return tobj
Extend this type to construct a sub-type. Args: name (str): The name of the new sub-type. opts (dict): The type options for the sub-type. info (dict): The type info for the sub-type. Returns: (synapse.types.Type): A new sub-type instance.
codesearchnet
def plot_to_svg(plot, width, height, unit=''): flipped_plot = [(x, -y) for x, y in plot] aspect_ratio = height / width view_box = calculate_view_box(flipped_plot, aspect_ratio=aspect_ratio) view_box_str = '{} {} {} {}'.format(*view_box) stroke_thickness = STROKE_THICKNESS * (view_box[2]) svg = ET.Element('svg', attrib={ 'xmlns': 'http: 'xmlns:inkscape': 'http: 'width': '{}{}'.format(width, unit), 'height': '{}{}'.format(height, unit), 'viewBox': view_box_str}) for i, layer in enumerate(flipped_plot): group = ET.SubElement(svg, 'g', attrib={ 'inkscape:label': '{}-layer'.format(i), 'inkscape:groupmode': 'layer', }) color = PLOT_COLORS[i % len(PLOT_COLORS)] ET.SubElement(group, 'path', attrib={ 'style': 'stroke-width: {}; stroke: {};'.format(stroke_thickness, color), 'fill': 'none', 'd': layer_to_path(layer) }) try: return ET.tostring(svg, encoding='unicode') except LookupError: return ET.tostring(svg)
Converts a plot (list of layers) into an SVG document. Args: plot (list): list of layers that make up the plot width (float): the width of the resulting image height (float): the height of the resulting image unit (str): the units of the resulting image if not pixels Returns: str: A stringified XML document representing the image
juraj-google-style
def _locate_element(dom, el_content, transformer=None): return dom.find(None, fn=utils.content_matchs(el_content, transformer))
Find element containing `el_content` in `dom`. Use `transformer` function to content of all elements in `dom` in order to correctly transforming them to match them with `el_content`. Args: dom (obj): HTMLElement tree. el_content (str): Content of element will be picked from `dom`. transformer (fn, default None): Transforming function. Note: `transformer` parameter can be for example simple lambda:: lambda x: x.strip() Returns: list: Matching HTMLElements.
codesearchnet
def _parse_alt_title(html_chunk): title = html_chunk.find("img", fn=has_param("alt")) if not title: raise UserWarning("Can't find alternative title source!") return title[0].params["alt"].strip()
Parse title from alternative location if not found where it should be. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title.
juraj-google-style
def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype: if not dtype.is_floating_point: raise ValueError(f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype") logger.info(f'Instantiating {cls.__name__} model under default dtype {dtype}.') dtype_orig = torch.get_default_dtype() torch.set_default_dtype(dtype) return dtype_orig
Change the default dtype and return the previous one. This is needed when wanting to instantiate the model under specific dtype. Args: dtype (`torch.dtype`): a floating dtype to set to. Returns: `torch.dtype`: the original `dtype` that can be used to restore `torch.set_default_dtype(dtype)` if it was modified. If it wasn't, returns `None`. Note `set_default_dtype` currently only works with floating-point types and asserts if for example, `torch.int64` is passed. So if a non-float `dtype` is passed this functions will throw an exception.
github-repos
def _ParseRecord(self, parser_mediator, file_object): header_record_offset = file_object.tell() token_type = self._ParseTokenType(file_object, header_record_offset) if (token_type not in self._HEADER_TOKEN_TYPES): raise errors.ParseError('Unsupported header token type: 0x{0:02x}'.format(token_type)) (token_type, token_data) = self._ParseToken(file_object, header_record_offset) if (token_data.format_version != 11): raise errors.ParseError('Unsupported format version type: {0:d}'.format(token_data.format_version)) timestamp = (token_data.microseconds + (token_data.timestamp * definitions.MICROSECONDS_PER_SECOND)) event_type = token_data.event_type header_record_size = token_data.record_size record_end_offset = (header_record_offset + header_record_size) event_tokens = [] return_token_values = None file_offset = file_object.tell() while (file_offset < record_end_offset): (token_type, token_data) = self._ParseToken(file_object, file_offset) if (not token_data): raise errors.ParseError('Unsupported token type: 0x{0:02x}'.format(token_type)) file_offset = file_object.tell() if (token_type == self._TOKEN_TYPE_AUT_TRAILER): break token_type_string = self._TOKEN_TYPES.get(token_type, 'UNKNOWN') token_values = self._FormatTokenData(token_type, token_data) event_tokens.append({token_type_string: token_values}) if (token_type in (self._TOKEN_TYPE_AUT_RETURN32, self._TOKEN_TYPE_AUT_RETURN64)): return_token_values = token_values if (token_data.signature != self._TRAILER_TOKEN_SIGNATURE): raise errors.ParseError('Unsupported signature in trailer token.') if (token_data.record_size != header_record_size): raise errors.ParseError('Mismatch of event record size between header and trailer token.') event_data = BSMEventData() event_data.event_type = event_type event_data.extra_tokens = event_tokens event_data.offset = header_record_offset event_data.record_length = header_record_size event_data.return_value = return_token_values date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses an event record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. Raises: ParseError: if the event record cannot be read.
codesearchnet
def AllTypes(): return [AssetType.CreditFlag, AssetType.DutyFlag, AssetType.GoverningToken, AssetType.UtilityToken, AssetType.Currency, AssetType.Share, AssetType.Invoice, AssetType.Token]
Get a list of all available asset types. Returns: list: of AssetType items.
codesearchnet
def IsCompatible(self, allow_py3=False, raise_exception=False): error = None if not self.version: error = 'ERROR: Your current version of Python is not compatible with the Google Cloud SDK. {0}\n'.format(self.SupportedVersionMessage(allow_py3)) elif self.version[0] < 3: if self.version < PythonVersion.MIN_REQUIRED_PY2_VERSION: error = 'ERROR: Python {0}.{1} is not compatible with the Google Cloud SDK. {2}\n'.format(self.version[0], self.version[1], self.SupportedVersionMessage(allow_py3)) elif not allow_py3: error = 'ERROR: Python 3 and later is not compatible with the Google Cloud SDK. {0}\n'.format(self.SupportedVersionMessage(allow_py3)) elif self.version < PythonVersion.MIN_SUPPORTED_PY3_VERSION: error = 'ERROR: Python {0}.{1} is not compatible with the Google Cloud SDK. {2}\n'.format(self.version[0], self.version[1], self.SupportedVersionMessage(allow_py3)) if error: if raise_exception: raise Error(error) sys.stderr.write(error) sys.stderr.write(PythonVersion.ENV_VAR_MESSAGE) return False if self.version >= self.MIN_REQUIRED_PY2_VERSION and self.version < self.MIN_SUPPORTED_PY2_VERSION: sys.stderr.write('WARNING: Python 2.6.x is no longer officially supported by the Google Cloud SDK\nand may not function correctly. {0}\n{1}'.format(self.SupportedVersionMessage(allow_py3), PythonVersion.ENV_VAR_MESSAGE)) return True
Ensure that the Python version we are using is compatible. This will print an error message if not compatible. Compatible versions are 2.6 and 2.7 and > 3.4 if allow_py3 is True. We don't guarantee support for 2.6 so we want to warn about it. Args: allow_py3: bool, True if we should allow a Python 3 interpreter to run gcloud. If False, this returns an error for Python 3. raise_exception: bool, True to raise an exception rather than printing the error and exiting. Raises: Error: If not compatible and raise_exception is True. Returns: bool, True if the version is valid, False otherwise.
github-repos
def __init__(self, sbi_id: str): SchedulingObject.__init__(self, SBI_KEY, sbi_id) self._check_object_exists()
Create a SBI object. Args: sbi_id (str): SBI Identifier Raises: KeyError, if the specified SBI does not exist.
juraj-google-style
def reduce_sum(x, disable_positional_args=None, output_shape=None, reduced_dim=None, name=None): output_shape = convert_to_shape(output_shape) reduced_dim = convert_to_dimension(reduced_dim) assert disable_positional_args is None output_shape = _reduction_output_shape(x, output_shape, reduced_dim) if output_shape == x.shape: return x return ReduceOperation(x, output_shape, "SUM", name=name).outputs[0]
Reduction on 1 or more axes. If reduced_dim is present, then only that dimension is reduced out. Alternatively, specify output_shape. Do not specify both reduced_dim and output_shape. If neither is specified, then all dimensions are reduced out. Args: x: a Tensor disable_positional_args: None output_shape: an optional Shape. Must be a subsequence of x.shape. reduced_dim: a mtf.Dimension name: an optional string Returns: a Tensor
juraj-google-style
def inspect(logdir='', event_file='', tag=''): print(((PRINT_SEPARATOR + 'Processing event files... (this can take a few minutes)\n') + PRINT_SEPARATOR)) inspection_units = get_inspection_units(logdir, event_file, tag) for unit in inspection_units: if tag: print('Event statistics for tag {} in {}:'.format(tag, unit.name)) else: print('These tags are in {}:'.format(unit.name)) print_dict(get_unique_tags(unit.field_to_obs)) print(PRINT_SEPARATOR) print('Event statistics for {}:'.format(unit.name)) print_dict(get_dict_to_print(unit.field_to_obs), show_missing=(not tag)) print(PRINT_SEPARATOR)
Main function for inspector that prints out a digest of event files. Args: logdir: A log directory that contains event files. event_file: Or, a particular event file path. tag: An optional tag name to query for. Raises: ValueError: If neither logdir and event_file are given, or both are given.
codesearchnet
def generate_test_run_log_path(self): self._logger_start_time = logger.get_log_file_timestamp() self.root_output_path = os.path.join(self._log_dir, self._testbed_name, self._logger_start_time) return self.root_output_path
Geneartes the log path for a test run. The log path includes a timestamp that is set in this call. There is usually a minor difference between this timestamp and the actual starting point of the test run. This is because the log path must be set up *before* the test run actually starts, so all information of a test run can be properly captured. The generated value can be accessed via `self.root_output_path`. Returns: String, the generated log path.
github-repos
def data_parallelism_from_flags(daisy_chain_variables=True, all_workers=False): dp_arg_names = inspect.getargspec(data_parallelism).args blacklist = ['daisy_chain_variables', 'all_workers'] kwargs = {} for arg in dp_arg_names: if (arg in blacklist): continue kwargs[arg] = getattr(tf.flags.FLAGS, arg) return data_parallelism(daisy_chain_variables=daisy_chain_variables, all_workers=all_workers, **kwargs)
Over which devices do we split each training batch. In old-fashioned async mode, we split the batch over all GPUs on the current worker. In sync mode, we split the batch over all the parameter server GPUs. This function returns an expert_utils.Parallelism object, which can be used to build the model. It is configured in a way that any variables created by `tf.get_variable` will be assigned to the parameter servers and shared between datashards. Args: daisy_chain_variables: whether to copy variables in a daisy chain on GPUs. all_workers: whether the devices are all async workers or just this one. Returns: a expert_utils.Parallelism.
codesearchnet
def _parse_apps_to_ignore(self): apps_to_ignore = set() section_title = 'applications_to_ignore' if self._parser.has_section(section_title): apps_to_ignore = set(self._parser.options(section_title)) return apps_to_ignore
Parse the applications to ignore in the config. Returns: set
codesearchnet
def generate_workflow_description(self): if not self.tasks: raise WorkflowError('Workflow contains no tasks, and cannot be executed.') self.definition = self.workflow_skeleton() if self.batch_values: self.definition["batch_values"] = self.batch_values all_input_port_values = [t.inputs.__getattribute__(input_port_name).value for t in self.tasks for input_port_name in t.inputs._portnames] for task in self.tasks: output_multiplex_ports_to_exclude = [] multiplex_output_port_names = [portname for portname in task.outputs._portnames if task.outputs.__getattribute__(portname).is_multiplex] for p in multiplex_output_port_names: output_port_reference = 'source:' + task.name + ':' + p if output_port_reference not in all_input_port_values: output_multiplex_ports_to_exclude.append(p) task_def = task.generate_task_workflow_json( output_multiplex_ports_to_exclude=output_multiplex_ports_to_exclude) self.definition['tasks'].append(task_def) if self.callback: self.definition['callback'] = self.callback return self.definition
Generate workflow json for launching the workflow against the gbdx api Args: None Returns: json string
juraj-google-style
def MakePartialStat(self, fd): is_dir = ('Container' in fd.behaviours) return {'pathspec': fd.Get(fd.Schema.PATHSPEC, ''), 'st_atime': fd.Get(fd.Schema.LAST, 0), 'st_blksize': 0, 'st_blocks': 0, 'st_ctime': 0, 'st_dev': 0, 'st_gid': 0, 'st_ino': 0, 'st_mode': (self.default_dir_mode if is_dir else self.default_file_mode), 'st_mtime': 0, 'st_nlink': 0, 'st_rdev': 0, 'st_size': fd.Get(fd.Schema.SIZE, 0), 'st_uid': 0}
Try and give a 'stat' for something not in the data store. Args: fd: The object with no stat. Returns: A dictionary corresponding to what we'll say the 'stat' is for objects which are not actually files, so have no OS level stat.
codesearchnet
def db996(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `db996`'.format(value)) self._db996 = value
Corresponds to IDD Field `db996` Dry-bulb temperature corresponding to 99.6% annual cumulative frequency of occurrence (cold conditions) Args: value (float): value for IDD Field `db996` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def assert_integer(x, message=None, name=None): with ops.name_scope(name, 'assert_integer', [x]): x = ops.convert_to_tensor(x, name='x') if not x.dtype.is_integer: if context.executing_eagerly(): name = 'tensor' else: name = x.name err_msg = '%sExpected "x" to be integer type. Found: %s of dtype %s' % (_message_prefix(message), name, x.dtype) raise TypeError(err_msg) return control_flow_ops.no_op('statically_determined_was_integer')
Assert that `x` is of integer dtype. Example of adding a dependency to an operation: ```python with tf.control_dependencies([tf.compat.v1.assert_integer(x)]): output = tf.reduce_sum(x) ``` Args: x: `Tensor` whose basetype is integer and is not quantized. message: A string to prefix to the default message. name: A name for this operation (optional). Defaults to "assert_integer". Raises: TypeError: If `x.dtype` is anything other than non-quantized integer. Returns: A `no_op` that does nothing. Type can be determined statically.
github-repos
def delete(self, paths): results = self._blobstorageIO().delete_paths(paths) exceptions = {path: error for path, error in results.items() if error is not None} if exceptions: raise BeamIOError('Delete operation failed', exceptions)
Deletes files or directories at the provided paths. Directories will be deleted recursively. Args: paths: list of paths that give the file objects to be deleted Raises: ``BeamIOError``: if any of the delete operations fail
github-repos
def _make_token_async(scopes, service_account_id): rpc = app_identity.create_rpc() app_identity.make_get_access_token_call(rpc, scopes, service_account_id) (token, expires_at) = (yield rpc) raise ndb.Return((token, expires_at))
Get a fresh authentication token. Args: scopes: A list of scopes. service_account_id: Internal-use only. Raises: An ndb.Return with a tuple (token, expiration_time) where expiration_time is seconds since the epoch.
codesearchnet
def FindCheckMacro(line): for macro in _CHECK_MACROS: i = line.find(macro) if (i >= 0): matched = Match((('^(.*\\b' + macro) + '\\s*)\\('), line) if (not matched): continue return (macro, len(matched.group(1))) return (None, (- 1))
Find a replaceable CHECK-like macro. Args: line: line to search on. Returns: (macro name, start position), or (None, -1) if no replaceable macro is found.
codesearchnet
def set_approvers(self, approver_ids=[], approver_group_ids=[], **kwargs): path = ('%s/%s/approvers' % (self._parent.manager.path, self._parent.get_id())) data = {'approver_ids': approver_ids, 'approver_group_ids': approver_group_ids} self.gitlab.http_put(path, post_data=data, **kwargs)
Change MR-level allowed approvers and approver groups. Args: approver_ids (list): User IDs that can approve MRs approver_group_ids (list): Group IDs whose members can approve MRs Raises: GitlabAuthenticationError: If authentication is not correct GitlabUpdateError: If the server failed to perform the request
codesearchnet
def is_feature_enabled(self, feature_key, user_id, attributes=None): if (not self.is_valid): self.logger.error(enums.Errors.INVALID_DATAFILE.format('is_feature_enabled')) return False if (not validator.is_non_empty_string(feature_key)): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('feature_key')) return False if (not isinstance(user_id, string_types)): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id')) return False if (not self._validate_user_inputs(attributes)): return False feature = self.config.get_feature_from_key(feature_key) if (not feature): return False feature_enabled = False source_info = {} decision = self.decision_service.get_variation_for_feature(feature, user_id, attributes) is_source_experiment = (decision.source == enums.DecisionSources.FEATURE_TEST) if decision.variation: if (decision.variation.featureEnabled is True): feature_enabled = True if is_source_experiment: source_info = {'experiment_key': decision.experiment.key, 'variation_key': decision.variation.key} self._send_impression_event(decision.experiment, decision.variation, user_id, attributes) if feature_enabled: self.logger.info(('Feature "%s" is enabled for user "%s".' % (feature_key, user_id))) else: self.logger.info(('Feature "%s" is not enabled for user "%s".' % (feature_key, user_id))) self.notification_center.send_notifications(enums.NotificationTypes.DECISION, enums.DecisionNotificationTypes.FEATURE, user_id, (attributes or {}), {'feature_key': feature_key, 'feature_enabled': feature_enabled, 'source': decision.source, 'source_info': source_info}) return feature_enabled
Returns true if the feature is enabled for the given user. Args: feature_key: The key of the feature for which we are determining if it is enabled or not for the given user. user_id: ID for user. attributes: Dict representing user attributes. Returns: True if the feature is enabled for the user. False otherwise.
codesearchnet
def add_c_function(self, c_func): self.ensure_initialized() pywrap_tfe.TFE_ContextAddFunction(self._handle, c_func)
Add a C API TF_Function to the context. Once added, the function (identified by its name) can be executed like any other operation. Args: c_func: A wrapped TF_Function (returned from TF_GraphToFunction_wrapper).
github-repos
def _handle_emailauth(maildomain='', message=''): print('SteamGuard requires email authentication...') emailauth = input('Please enter the code sent to your mail address at "%s": ' % maildomain) emailauth.upper() return emailauth
Called when SteamGuard requires authentication via e-mail. Asks the user to enter the code. Args: maildomain: Optional. The mail domain of the e-mail address the SteamGuard code is send to. message: Optional. A message from Steam service. Returns: A string containing the code.
juraj-google-style
def plot_tcoords(array, coords, scantypes=None, ax=None, **kwargs): if ax is None: ax = plt.gca() if scantypes is None: ax.plot(array[coords[0]], array[coords[1]], label='ALL', **kwargs) else: for scantype in scantypes: ax.plot(array[coords[0]][array.scantype == scantype], array[coords[1]][array.scantype == scantype], label=scantype, **kwargs) ax.set_xlabel(coords[0]) ax.set_ylabel(coords[1]) ax.set_title('{} vs {}'.format(coords[1], coords[0])) ax.legend() logger.info('{} vs {} has been plotted.'.format(coords[1], coords[0]))
Plot coordinates related to the time axis. Args: array (xarray.DataArray): Array which the coodinate information is included. coords (list): Name of x axis and y axis. scantypes (list): Scantypes. If None, all scantypes are used. ax (matplotlib.axes): Axis you want to plot on. kwargs (optional): Plot options passed to ax.plot().
juraj-google-style
def cctop_submit(seq_str): url = 'http: r = requests.post(url) jobid = r.text.split('ID: ')[1] return jobid
Submit a protein sequence string to CCTOP and return the job ID. Args: seq_str (str): Protein sequence as a string Returns: dict: Job ID on the CCTOP server
juraj-google-style
def Uninstall(self, package_name, keep_data=False, timeout_ms=None): cmd = ['pm uninstall'] if keep_data: cmd.append('-k') cmd.append('"%s"' % package_name) return self.Shell(' '.join(cmd), timeout_ms=timeout_ms)
Removes a package from the device. Args: package_name: Package name of target package. keep_data: whether to keep the data and cache directories timeout_ms: Expected timeout for pushing and installing. Returns: The pm uninstall output.
juraj-google-style
def latest_vcf_filename(build): ftp = FTP('ftp.ncbi.nlm.nih.gov') ftp.login() nav_to_vcf_dir(ftp, build=build) clinvar_datestamped = [f for f in ftp.nlst() if re.match('^clinvar_[0-9]{8}.vcf.gz$', f)] if len(clinvar_datestamped) == 1: return clinvar_datestamped[0] raise IOError("Unable to determine the most recent ClinVar VCF file on " + "NCBI's FTP site.")
Determine the filename for the most recent comprehensive ClinVar VCF. Args: build: (type: string) genome build, either 'b37' or 'b38' Returns: (type: string) Filename of the most recent comprehensive ClinVar VCF.
juraj-google-style