code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def search(self, search_phrase, limit=None): search_phrase = search_phrase.replace('-', '_') terms = SearchTermParser().parse(search_phrase) from_year = terms.pop('from', None) to_year = terms.pop('to', None) query, query_params = self._make_query_from_terms(terms) self._parsed_query = (query, query_params) connection = self.backend.library.database.connection connection.connection.create_function('rank', 1, _make_rank_func((1., .1, 0, 0))) results = connection.execute(query, query_params).fetchall() for result in results: vid, dataset_vid, score, db_from_year, db_to_year = result if from_year and from_year < db_from_year: continue if to_year and to_year > db_to_year: continue yield PartitionSearchResult( vid=vid, dataset_vid=dataset_vid, score=score)
Finds partitions by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to generate. None means without limit. Generates: PartitionSearchResult instances.
juraj-google-style
def sg_lookup(tensor, opt): r assert opt.emb is not None, 'emb is mandatory.' return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name)
r"""Looks up the `tensor`, which is the embedding matrix. Args: tensor: A tensor ( automatically given by chain ) opt: emb: A 2-D `Tensor`. An embedding matrix. name: If provided, replace current tensor's name. Returns: A `Tensor`.
juraj-google-style
def __driver_completer(self, toks, text, state): if (state != 0): return self.__completion_candidates[state] if ((not toks) or ((len(toks) == 1) and (text == toks[0]))): try: self.__completion_candidates = self.__complete_cmds(text) except: self.stderr.write('\n') self.stderr.write(traceback.format_exc()) self.__completion_candidates = [] return self.__completion_candidates[state] cmd = toks[0] args = (toks[1:] if (len(toks) > 1) else None) if (text and args): del args[(- 1)] if (cmd in self._completer_map.keys()): completer_name = self._completer_map[cmd] completer_method = getattr(self, completer_name) try: self.__completion_candidates = completer_method(cmd, args, text) except: self.stderr.write('\n') self.stderr.write(traceback.format_exc()) self.__completion_candidates = [] else: self.__completion_candidates = [] return self.__completion_candidates[state]
Driver level completer. Arguments: toks: A list of tokens, tokenized from the original input line. text: A string, the text to be replaced if a completion candidate is chosen. state: An integer, the index of the candidate out of the list of candidates. Returns: A string, the candidate.
codesearchnet
def get_version(package_name): module = 'prosper.' + package_name + '._version' package = importlib.import_module(module) version = package.__version__ return version
find __version__ for making package Args: package_name (str): path to _version.py folder (abspath > relpath) Returns: str: __version__ value
juraj-google-style
def delaunay_reduce(lattice, eps=1e-05): _set_no_error() delaunay_lattice = np.array(np.transpose(lattice), dtype='double', order='C') result = spg.delaunay_reduce(delaunay_lattice, float(eps)) _set_error_message() if (result == 0): return None else: return np.array(np.transpose(delaunay_lattice), dtype='double', order='C')
Run Delaunay reduction Args: lattice: Lattice parameters in the form of [[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]] symprec: float: Tolerance to check if volume is close to zero or not and if two basis vectors are orthogonal by the value of dot product being close to zero or not. Returns: if the Delaunay reduction succeeded: Reduced lattice parameters are given as a numpy 'double' array: [[a_x, a_y, a_z], [b_x, b_y, b_z], [c_x, c_y, c_z]] otherwise None is returned.
codesearchnet
def clear_worker_output(self): self.data_store.clear_worker_output() self.plugin_manager.load_all_plugins() self._store_information()
Drops all of the worker output collections Args: None Returns: Nothing
juraj-google-style
def port(alias_name, default=None, allow_none=False): warnings.warn('Will be removed in v1.0', DeprecationWarning, stacklevel=2) try: return int(_split_docker_link(alias_name)[2]) except KeyError as err: if (default or allow_none): return default else: raise err
Get the port from the docker link alias or return the default. Args: alias_name: The docker link alias default: The default value if the link isn't available allow_none: If the return value can be `None` (i.e. optional) Examples: Assuming a Docker link was created with ``docker --link postgres:db`` and the resulting environment variable is ``DB_PORT=tcp://172.17.0.82:5432``. >>> envitro.docker.port('DB') 5432
codesearchnet
def decode_list_oov(self, ids, source_oov_id_to_token): seq = reversed(ids) if self._reverse else ids tokens = [] for cur_id in seq: if cur_id in self._id_to_token: tokens.append(self._id_to_token[cur_id]) else: tokens.append(source_oov_id_to_token[cur_id - self.vocab_size]) return tokens
decode ids back to tokens, considering OOVs temporary IDs. Args: ids: vocab ids. Could possibly include source temporary OOV ID starting from vocab_size. source_oov_id_to_token: a list of source OOV tokens, with the order the same as they appear in the source. Returns: decoded tokens, possibly including source OOV tokens.
juraj-google-style
def add_loss(self, loss, name=None, regularization=False, add_summaries=True): _ = name if regularization: self._g.add_to_collection(GraphKeys.REGULARIZATION_LOSSES, loss) tf.add_to_collection(GraphKeys.LOSSES, loss) if add_summaries: self.add_scalar_summary(loss, 'loss') self.add_average_summary(loss, 'loss_average')
Append a loss to the total loss for the network. Args: loss: append this loss operation name: The name for this loss, defaults to loss.op.name regularization: Set to True if this is a regularization loss. add_summaries: Set to True if you want to see scalar and average summary.
codesearchnet
def import_subview(self, idx, subview): subview.corpus = self self._subviews[idx] = subview
Add the given subview to the corpus. Args: idx (str): An idx that is unique in the corpus for identifying the subview. If already a subview exists with the given id it will be overridden. subview (Subview): The subview to add.
juraj-google-style
def _set_verbosity_from(posarg): def decorator(f): def wrapper(*args, **kwargs): options = kwargs.get('options', args[posarg]) with config.verbosity_from(options): return f(*args, **kwargs) return wrapper return decorator
Decorator to set the verbosity for a function that takes an options arg. Assumes that the function has an argument named `options` that is a config.Options object. Arguments: posarg: The index of `options` in the positional arguments. Returns: The decorator.
github-repos
def default_search_space(): matrix = [[pg.oneof([0, 1], hints=EDGE_HINTS) if y > x else 0 for y in range(NUM_VERTICES)] for x in range(NUM_VERTICES)] return model_spec(pg.manyof(NUM_VERTICES - 2, ALLOWED_OPS, choices_distinct=False, hints=OP_HINTS), matrix)
The default search space in NAS-Bench. This equals to the default search space of NAS-Bench, which mutate candidate ops and their connections. Returns: A hyper model object that repesents a search space.
github-repos
def _is_definition_section(source): try: definitions = textwrap.dedent(source).split('\n', 1)[1].splitlines() return all((re.match('\\s\\s+((?!\\s\\s).+)\\s\\s+.+', s) for s in definitions)) except IndexError: return False
Determine if the source is a definition section. Args: source: The usage string source that may be a section. Returns: True if the source describes a definition section; otherwise, False.
codesearchnet
def pkg_config(pkg_libraries): libraries=[] library_dirs=[] include_dirs=[] for pkg in pkg_libraries: if os.system('pkg-config --exists %s 2>/dev/null' % pkg) == 0: pass else: print("Could not find library {0}".format(pkg)) sys.exit(1) if len(pkg_libraries)>0 : for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 pkg-config --libs --cflags %s" % ' '.join(pkg_libraries)).split(): if token.startswith("-l"): libraries.append(token[2:]) elif token.startswith("-L"): library_dirs.append(token[2:]) elif token.startswith("-I"): include_dirs.append(token[2:]) return libraries, library_dirs, include_dirs
Use pkg-config to query for the location of libraries, library directories, and header directories Arguments: pkg_libries(list): A list of packages as strings Returns: libraries(list), library_dirs(list), include_dirs(list)
juraj-google-style
def add_columns(tree_view, df_py_dtypes, list_store): tree_view.set_model(list_store) for (column_i, (i, dtype_i)) in df_py_dtypes[['i', 'dtype']].iterrows(): tree_column_i = gtk.TreeViewColumn(column_i) tree_column_i.set_name(column_i) if (dtype_i in (int, long)): property_name = 'text' cell_renderer_i = gtk.CellRendererSpin() elif (dtype_i == float): property_name = 'text' cell_renderer_i = gtk.CellRendererSpin() elif (dtype_i in (bool,)): property_name = 'active' cell_renderer_i = gtk.CellRendererToggle() elif (dtype_i in (str,)): property_name = 'text' cell_renderer_i = gtk.CellRendererText() else: raise ValueError(('No cell renderer for dtype: %s' % dtype_i)) cell_renderer_i.set_data('column_i', i) cell_renderer_i.set_data('column', tree_column_i) tree_column_i.pack_start(cell_renderer_i, True) tree_column_i.add_attribute(cell_renderer_i, property_name, i) tree_view.append_column(tree_column_i)
Add columns to a `gtk.TreeView` for the types listed in `df_py_dtypes`. Args: tree_view (gtk.TreeView) : Tree view to append columns to. df_py_dtypes (pandas.DataFrame) : Data frame containing type information for one or more columns in `list_store`. list_store (gtk.ListStore) : Model data. Returns: None
codesearchnet
def _bfs_path_states(self, graph, start): pathstates = {} queue = [] visited = [] queue.append([['', start]]) while queue: path = queue.pop(0) node = path[-1][1] if node.stateid not in pathstates and node.stateid != len(list(graph.states)): pathstates[node.stateid] = ''.join( [mnode[0] for mnode in path]) visited.append(node.stateid) for arc in node.arcs: char = graph.isyms.find(arc.ilabel) next_state = graph[arc.nextstate] if next_state.stateid not in visited: new_path = list(path) new_path.append([char, next_state]) queue.append(new_path) return pathstates
Find state access strings (DFA shortest paths for every state) using BFS Args: graph (DFA): The DFA states start (int): The DFA initial state Return: list: A list of all the DFA shortest paths for every state
juraj-google-style
def dataframe(self, force_refresh=False): if force_refresh: self.clear_cache() if self._dataframe is None: self._dataframe = self._fetch_dataframe() return self._dataframe
A pandas dataframe with lots of interesting results about this object. Created by calling SageMaker List and Describe APIs and converting them into a convenient tabular summary. Args: force_refresh (bool): Set to True to fetch the latest data from SageMaker API.
juraj-google-style
def wind_direction(self, value=999.0): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `wind_direction`'.format(value)) if (value < 0.0): raise ValueError('value need to be greater or equal 0.0 for field `wind_direction`') if (value > 360.0): raise ValueError('value need to be smaller 360.0 for field `wind_direction`') self._wind_direction = value
Corresponds to IDD Field `wind_direction` Args: value (float): value for IDD Field `wind_direction` Unit: degrees value >= 0.0 value <= 360.0 Missing value: 999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def size(self, name=None): with ops.name_scope(name, '%s_Size' % self.name, [self.resource_handle]): with ops.colocate_with(self.resource_handle): return gen_lookup_ops.lookup_table_size_v2(self.resource_handle)
Compute the number of elements in this table. Args: name: A name for the operation (optional). Returns: A scalar tensor containing the number of elements in this table.
github-repos
def _add_dispatcher(self, path_regex, dispatch_function): self._dispatchers.append((re.compile(path_regex), dispatch_function))
Add a request path and dispatch handler. Args: path_regex: A string regex, the path to match against incoming requests. dispatch_function: The function to call for these requests. The function should take (request, start_response) as arguments and return the contents of the response body.
codesearchnet
def add_group_maintainer(self, name, user): self.service.add_group_maintainer( name, user, self.url_prefix, self.auth, self.session, self.session_send_opts)
Add the given user to the named group. Both group and user must already exist for this to succeed. Args: name (string): Name of group. user (string): User to add to group. version (optional[string]): Version of the Boss API to use. Defaults to the latest supported version. Raises: requests.HTTPError on failure.
juraj-google-style
def check(self, check_req): self.start() res = self._check_aggregator.check(check_req) if res: _logger.debug(u'using cached check response for %s: %s', check_request, res) return res try: transport = self._create_transport() resp = transport.services.Check(check_req) self._check_aggregator.add_response(check_req, resp) return resp except exceptions.Error: _logger.error(u'direct send of check request failed %s', check_request, exc_info=True) return None
Process a check_request. The req is first passed to the check_aggregator. If there is a valid cached response, that is returned, otherwise a response is obtained from the transport. Args: check_req (``ServicecontrolServicesCheckRequest``): to be sent to the service control service Returns: ``CheckResponse``: either the cached response if one is applicable or a response from making a transport request, or None if if the request to the transport fails
codesearchnet
def with_context(cls, setup_phases, teardown_phases): setup = flatten_phases_and_groups(setup_phases) teardown = flatten_phases_and_groups(teardown_phases) def _context_wrapper(*phases): return cls(setup=setup, main=flatten_phases_and_groups(phases), teardown=teardown) return _context_wrapper
Create PhaseGroup creator function with setup and teardown phases. Args: setup_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/ callables/iterables, phases to run during the setup for the PhaseGroup returned from the created function. teardown_phases: list of phase_descriptor.PhaseDescriptors/PhaseGroups/ callables/iterables, phases to run during the teardown for the PhaseGroup returned from the created function. Returns: Function that takes *phases and returns a PhaseGroup with the predefined setup and teardown phases, with *phases as the main phases.
codesearchnet
def period(self, value: float): if value < 0: raise ValueError("Period must be greater or equal than zero.") self._period = timedelta(seconds=value)
Set the period. Args: value (float): seconds
juraj-google-style
def ReverseCloseExpression(clean_lines, linenum, pos): line = clean_lines.elided[linenum] if (line[pos] not in ')}]>'): return (line, 0, (- 1)) (start_pos, stack) = FindStartOfExpressionInLine(line, pos, []) if (start_pos > (- 1)): return (line, linenum, start_pos) while (stack and (linenum > 0)): linenum -= 1 line = clean_lines.elided[linenum] (start_pos, stack) = FindStartOfExpressionInLine(line, (len(line) - 1), stack) if (start_pos > (- 1)): return (line, linenum, start_pos) return (line, 0, (- 1))
If input points to ) or } or ] or >, finds the position that opens it. If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the linenum/pos that correspond to the opening of the expression. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. pos: A position on the line. Returns: A tuple (line, linenum, pos) pointer *at* the opening brace, or (line, 0, -1) if we never find the matching opening brace. Note we ignore strings and comments when matching; and the line we return is the 'cleansed' line at linenum.
codesearchnet
def __init__(self, package, ad): super().__init__(app_name=package, ad=ad) self.package = package self._ad = ad self._adb = ad.adb self._proc = None self._user_id = None
Initializes a SnippetClient. Args: package: (str) The package name of the apk where the snippets are defined. ad: (AndroidDevice) the device object associated with this client.
github-repos
def nb_ll_row(params, data_row): p = params[0] r = params[1] n = len(data_row) ll = (np.sum(gammaln((data_row + r))) - np.sum(gammaln((data_row + 1)))) ll -= (n * gammaln(r)) ll += (np.sum(data_row) * np.log(p)) ll += ((n * r) * np.log((1 - p))) return (- ll)
returns the negative LL of a single row. Args: params (array) - [p, r] data_row (array) - 1d array of data Returns: LL of row
codesearchnet
def decrypt(key, ciphertext): index = 0 decrypted = '' for char in ciphertext: if (char in ((string.punctuation + string.whitespace) + string.digits)): decrypted += char continue alphabet = (string.ascii_uppercase if key[index].isupper() else string.ascii_lowercase) decrypted += ''.join(shift.decrypt(int(alphabet.index(key[index])), char)) index = ((index + 1) % len(key)) return decrypted
Decrypt Vigenere encrypted ``ciphertext`` using ``key``. Example: >>> decrypt("KEY", "RIJVS") HELLO Args: key (iterable): The key to use ciphertext (str): The text to decrypt Returns: Decrypted ciphertext
codesearchnet
def add_send_last_message(self, connection, send_last_message): self._send_last_message[connection] = send_last_message LOGGER.debug('Added send_last_message function for connection %s', connection)
Adds a send_last_message function to the Dispatcher's dictionary of functions indexed by connection. Args: connection (str): A locally unique identifier provided by the receiver of messages. send_last_message (fn): The method that should be called by the dispatcher to respond to messages which arrive via connection, when the connection should be closed after the message has been sent.
codesearchnet
def __setitem__(self, key, item): if isinstance(key, str): column = item self.columns.add(key) if len(column) > len(self.rows): for i, value in enumerate(column): if i < len(self.rows): self.rows[i][key] = value else: self.rows.append({key: value}) else: for i, row in enumerate(self.rows): if i < len(column): self.rows[i][key] = column[i] else: self.rows[i][key] = None elif isinstance(key, slice): rows = item for row in rows: if not isinstance(row, dict): raise ValueError('Row must be a dict.') self.columns.update(row.keys()) self.rows[key] = rows elif isinstance(key, int): row = item if not isinstance(row, dict): raise ValueError('Row must be a dict.') self.columns.update(row.keys()) self.rows[key] = row else: raise TypeError('Invalid argument type.')
Set a column or row for a dataset. Args: key (str or int): String referencing a column or integer referencing a row item (list or dict): Column or rows to set in the dataset.
juraj-google-style
def create_in_hdx(self): self.check_required_fields() id = self.data.get('id') if (id and self._load_from_hdx('resource', id)): logger.warning(('%s exists. Updating %s' % ('resource', id))) if (self.file_to_upload and ('url' in self.data)): del self.data['url'] self._merge_hdx_update('resource', 'id', self.file_to_upload) else: self._save_to_hdx('create', 'name', self.file_to_upload)
Check if resource exists in HDX and if so, update it, otherwise create it Returns: None
codesearchnet
def molecule(lines): count_line = lines[3] num_atoms = int(count_line[0:3]) num_bonds = int(count_line[3:6]) compound = Compound() compound.graph._node = atoms(lines[4:(num_atoms + 4)]) compound.graph._adj = bonds(lines[(num_atoms + 4):((num_atoms + num_bonds) + 4)], compound.graph._node.keys()) props = properties(lines[((num_atoms + num_bonds) + 4):]) add_properties(props, compound) return compound
Parse molfile part into molecule object Args: lines (list): lines of molfile part Raises: ValueError: Symbol not defined in periodictable.yaml (Polymer expression not supported yet)
codesearchnet
def most_frequent_terms(self, depth): counts = self.term_counts() top_terms = set(list(counts.keys())[:depth]) end_count = list(counts.values())[:depth][(- 1)] bucket = self.term_count_buckets()[end_count] return top_terms.union(set(bucket))
Get the X most frequent terms in the text, and then probe down to get any other terms that have the same count as the last term. Args: depth (int): The number of terms. Returns: set: The set of frequent terms.
codesearchnet
def _AddNextStateToQueue(penalty, previous_node, newline, count, p_queue): must_split = previous_node.state.MustSplit() if newline and (not previous_node.state.CanSplit(must_split)): return count if not newline and must_split: return count node = _StateNode(previous_node.state, newline, previous_node) penalty += node.state.AddTokenToState(newline=newline, dry_run=True, must_split=must_split) heapq.heappush(p_queue, _QueueItem(_OrderedPenalty(penalty, count), node)) return count + 1
Add the following state to the analysis queue. Assume the current state is 'previous_node' and has been reached with a penalty of 'penalty'. Insert a line break if 'newline' is True. Arguments: penalty: (int) The penalty associated with the path up to this point. previous_node: (_StateNode) The last _StateNode inserted into the priority queue. newline: (bool) Add a newline if True. count: (int) The number of elements in the queue. p_queue: (heapq) The priority queue representing the solution space. Returns: The updated number of elements in the queue.
github-repos
def _save_model(self): if not file_utils.exists(self.backup_dir): file_utils.makedirs(self.backup_dir) if self.double_checkpoint and file_utils.exists(self._weights_path): file_utils.copy(self._weights_path, self._prev_weights_path) if self.double_checkpoint and file_utils.exists(self._training_metadata_path): file_utils.copy(self._training_metadata_path, self._prev_training_metadata_path) self.model.save_weights(filepath=self._weights_path, overwrite=True) with file_utils.File(self._training_metadata_path, 'w') as f: training_metadata = {'epoch': self._current_epoch, 'batch': self._last_batch_seen} f.write(json.dumps(training_metadata))
Saves the model. Args: epoch: the epoch this iteration is in. batch: the batch this iteration is in. `None` if the `save_freq` is set to `"epoch"`. logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
github-repos
def ensure_tf_install(): try: import tensorflow as tf except ImportError: print('\n\nFailed to import TensorFlow. Please note that TensorFlow is not installed by default when you install TensorFlow Datasets. This is so that users can decide whether to install the GPU-enabled TensorFlow package. To use TensorFlow Datasets, please install the most recent version of TensorFlow, by following instructions at https: raise tf_version = distutils.version.LooseVersion(tf.__version__) v_1_12 = distutils.version.LooseVersion('1.12.0') if (tf_version < v_1_12): raise ImportError('This version of TensorFlow Datasets requires TensorFlow version >= {required}; Detected an installation of version {present}. Please upgrade TensorFlow to proceed.'.format(required='1.12.0', present=tf.__version__)) _patch_tf(tf)
Attempt to import tensorflow, and ensure its version is sufficient. Raises: ImportError: if either tensorflow is not importable or its version is inadequate.
codesearchnet
def __init__(self, name="", default=None, description="", friendly_name="", hidden=False): self.name = name self.default = default self.description = description self.hidden = hidden if not friendly_name: friendly_name = name.replace("_", " ").capitalize() self.friendly_name = friendly_name
Build a TypeInfo type descriptor. Args: name: The name of the parameter that this Type info corresponds to. default: The default value that should be specified if the parameter was not set. description: A string describing this flow argument. friendly_name: A human readable name which may be provided. hidden: Should the argument be hidden from the UI.
juraj-google-style
def CreateDataTypeMap(self, definition_name): data_type_definition = self._definitions_registry.GetDefinitionByName( definition_name) if not data_type_definition: return None return DataTypeMapFactory.CreateDataTypeMapByType(data_type_definition)
Creates a specific data type map by name. Args: definition_name (str): name of the data type definition. Returns: DataTypeMap: data type map or None if the date type definition is not available.
juraj-google-style
def memory_write64(self, addr, data, zone=None): words = [] bitmask = 0xFFFFFFFF for long_word in data: words.append(long_word & bitmask) words.append((long_word >> 32) & bitmask) return self.memory_write32(addr, words, zone=zone)
Writes long words to memory of a target system. Note: This is little-endian. Args: self (JLink): the ``JLink`` instance addr (int): start address to write to data (list): list of long words to write zone (str): optional memory zone to access Returns: Number of long words written to target. Raises: JLinkException: on memory access error.
juraj-google-style
def gripper_factory(name): if name == "TwoFingerGripper": return TwoFingerGripper() if name == "LeftTwoFingerGripper": return LeftTwoFingerGripper() if name == "PR2Gripper": return PR2Gripper() if name == "RobotiqGripper": return RobotiqGripper() if name == "PushingGripper": return PushingGripper() if name == "RobotiqThreeFingerGripper": return RobotiqThreeFingerGripper() raise ValueError("Unkown gripper name {}".format(name))
Genreator for grippers Creates a Gripper instance with the provided name. Args: name: the name of the gripper class Returns: gripper: Gripper instance Raises: XMLError: [description]
juraj-google-style
def plot_kdes(self, mnemonic, alias=None, uwi_regex=None, return_fig=False): wells = self.find_wells_with_curve(mnemonic, alias=alias) (fig, axs) = plt.subplots(len(self), 1, figsize=(10, (1.5 * len(self)))) curves = [w.get_curve(mnemonic, alias=alias) for w in wells] all_data = np.hstack(curves) all_data = all_data[(~ np.isnan(all_data))] amax = np.percentile(all_data, 99) amin = np.percentile(all_data, 1) for (i, w) in enumerate(self): c = w.get_curve(mnemonic, alias=alias) if (uwi_regex is not None): label = re.sub(uwi_regex, '\\1', w.uwi) else: label = w.uwi if (c is not None): axs[i] = c.plot_kde(ax=axs[i], amax=amax, amin=amin, label=((label + '-') + str(c.mnemonic))) else: continue if return_fig: return fig else: return
Plot KDEs for all curves with the given name. Args: menmonic (str): the name of the curve to look for. alias (dict): a welly alias dictionary. uwi_regex (str): a regex pattern. Only this part of the UWI will be displayed on the plot of KDEs. return_fig (bool): whether to return the matplotlib figure object. Returns: None or figure.
codesearchnet
def GetAnalyzerInstances(cls, analyzer_names): analyzer_instances = [] for (analyzer_name, analyzer_class) in iter(cls.GetAnalyzers()): if (analyzer_name in analyzer_names): analyzer_instances.append(analyzer_class()) return analyzer_instances
Retrieves instances for all the specified analyzers. Args: analyzer_names (list[str]): names of the analyzers to retrieve. Returns: list[BaseAnalyzer]: analyzer instances.
codesearchnet
def __init__(self, solution_size, population_size=20): super(ExhaustiveBinary, self).__init__(solution_size, population_size) self._next_int = 0
Create an object that optimizes a given fitness function. Args: solution_size: The number of bits in every solution. population_size: The number of solutions in every iteration.
juraj-google-style
def delete_existing_cname(env, zone_id, dns_name): client = boto3.Session(profile_name=env).client('route53') startrecord = None newrecord_name = dns_name startrecord = find_existing_record(env, zone_id, newrecord_name, check_key='Type', check_value='CNAME') if startrecord: LOG.info("Deleting old record: %s", newrecord_name) _response = client.change_resource_record_sets( HostedZoneId=zone_id, ChangeBatch={'Changes': [{ 'Action': 'DELETE', 'ResourceRecordSet': startrecord }]}) LOG.debug('Response from deleting %s: %s', dns_name, _response)
Delete an existing CNAME record. This is used when updating to multi-region for deleting old records. The record can not just be upserted since it changes types. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. dns_name (str): FQDN of application's dns entry to add/update.
juraj-google-style
def to_qsw(orbit): (pos, vel) = _split(orbit) q = (pos / norm(pos)) w = (np.cross(pos, vel) / (norm(pos) * norm(vel))) s = np.cross(w, q) return np.array([q, s, w])
In the QSW Local Orbital Reference Frame, x is oriented along the position vector, z along the angular momentum, and y complete the frame. The frame is sometimes also called RSW (where R stands for radial) or LVLH (Local Vertical Local Horizontal). Args: orbit (list): Array of length 6 Return: numpy.ndarray: matrix to convert from inertial frame to QSW >>> delta_qsw = [1, 0, 0] >>> p = [-6142438.668, 3492467.560, -25767.25680] >>> v = [505.8479685, 942.7809215, 7435.922231] >>> pv = p + v >>> mat = to_qsw(pv).T >>> delta_inert = mat @ delta_qsw >>> all(delta_inert == p / norm(p)) True
codesearchnet
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]: if already_has_special_tokens: if token_ids_1 is not None: raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formatted with special tokens for the model.') return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0] if token_ids_1 is not None: return [1] + [0] * len(token_ids_0) + [1] + [0] * len(token_ids_1) + [1] return [1] + [0] * len(token_ids_0) + [1]
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of ids. token_ids_1 (`List[int]`, *optional*, defaults to `None`): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Set to True if the token list is already formatted with special tokens for the model Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
github-repos
def TSKVolumeGetBytesPerSector(tsk_volume): if (hasattr(tsk_volume, 'info') and (tsk_volume.info is not None)): block_size = getattr(tsk_volume.info, 'block_size', 512) else: block_size = 512 return block_size
Retrieves the number of bytes per sector from a TSK volume object. Args: tsk_volume (pytsk3.Volume_Info): TSK volume information. Returns: int: number of bytes per sector or 512 by default.
codesearchnet
def mock(self, slot, rpc_id, value): address = slot.address if address not in self.mock_rpcs: self.mock_rpcs[address] = {} self.mock_rpcs[address][rpc_id] = value
Store a mock return value for an RPC Args: slot (SlotIdentifier): The slot we are mocking rpc_id (int): The rpc we are mocking value (int): The value that should be returned when the RPC is called.
juraj-google-style
class PerKey(PTransform): def __init__(self, num_quantiles, key=None, reverse=False, weighted=False, input_batched=False): self._num_quantiles = num_quantiles self._key = key self._reverse = reverse self._weighted = weighted self._input_batched = input_batched def expand(self, pcoll): return pcoll | CombinePerKey(ApproximateQuantilesCombineFn.create(num_quantiles=self._num_quantiles, key=self._key, reverse=self._reverse, weighted=self._weighted, input_batched=self._input_batched)) def display_data(self): return ApproximateQuantiles._display_data(num_quantiles=self._num_quantiles, key=self._key, reverse=self._reverse, weighted=self._weighted, input_batched=self._input_batched)
PTransform takes PCollection of KV and returns a list based on each key whose single value is list of approximate N-tiles of the input element of the key. Args: num_quantiles: number of elements in the resulting quantiles values list. key: (optional) Key is a mapping of elements to a comparable key, similar to the key argument of Python's sorting methods. reverse: (optional) whether to order things smallest to largest, rather than largest to smallest. weighted: (optional) if set to True, the transform returns weighted quantiles. The input PCollection is then expected to contain tuples of input values with the corresponding weight. input_batched: (optional) if set to True, the transform expects each element of input PCollection to be a batch, which is a list of elements for non-weighted case and a tuple of lists of elements and weights for weighted. Provides a way to accumulate multiple elements at a time more efficiently.
github-repos
def CheckNextIncludeOrder(self, header_type): error_message = ('Found %s after %s' % (self._TYPE_NAMES[header_type], self._SECTION_NAMES[self._section])) last_section = self._section if (header_type == _C_SYS_HEADER): if (self._section <= self._C_SECTION): self._section = self._C_SECTION else: self._last_header = '' return error_message elif (header_type == _CPP_SYS_HEADER): if (self._section <= self._CPP_SECTION): self._section = self._CPP_SECTION else: self._last_header = '' return error_message elif (header_type == _LIKELY_MY_HEADER): if (self._section <= self._MY_H_SECTION): self._section = self._MY_H_SECTION else: self._section = self._OTHER_H_SECTION elif (header_type == _POSSIBLE_MY_HEADER): if (self._section <= self._MY_H_SECTION): self._section = self._MY_H_SECTION else: self._section = self._OTHER_H_SECTION else: assert (header_type == _OTHER_HEADER) self._section = self._OTHER_H_SECTION if (last_section != self._section): self._last_header = '' return ''
Returns a non-empty error message if the next header is out of order. This function also updates the internal state to be ready to check the next include. Args: header_type: One of the _XXX_HEADER constants defined above. Returns: The empty string if the header is in the right order, or an error message describing what's wrong.
codesearchnet
def get_as(access_token, subscription_id, resource_group, as_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/availabilitySets/', as_name, '?api-version=', COMP_API]) return do_get(endpoint, access_token)
Get availability set details. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. as_name (str): Name of the new availability set. Returns: HTTP response. JSON body of the availability set properties.
juraj-google-style
def list_matching(self, ref_name: str, filter_: str) -> Iterable[ListEntry]: (canonical, canonical_i) = self._get_pattern((ref_name + filter_)) for entry in self.list(): if (entry.name == 'INBOX'): if canonical_i.match('INBOX'): (yield entry) elif canonical.match(entry.name): (yield entry)
Return all the entries in the list tree that match the given query. Args: ref_name: Mailbox reference name. filter_: Mailbox name with possible wildcards.
codesearchnet
def call(self, inputs_embeds, attention_mask: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: Optional[bool]=None) -> Union[Tuple, TFBaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer(hidden_states, attention_mask, output_attentions=output_attentions, training=training) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Args: inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Embedded representation of the inputs. Should be float, not int tokens. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
github-repos
def _batched_mask_to_box(masks: 'torch.Tensor'): if torch.numel(masks) == 0: return torch.zeros(*masks.shape[:-2], 4, device=masks.device) shape = masks.shape height, width = shape[-2:] in_height, _ = torch.max(masks, dim=-1) in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :] bottom_edges, _ = torch.max(in_height_coords, dim=-1) in_height_coords = in_height_coords + height * ~in_height top_edges, _ = torch.min(in_height_coords, dim=-1) in_width, _ = torch.max(masks, dim=-2) in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :] right_edges, _ = torch.max(in_width_coords, dim=-1) in_width_coords = in_width_coords + width * ~in_width left_edges, _ = torch.min(in_width_coords, dim=-1) empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) out = out * (~empty_filter).unsqueeze(-1) out = out.reshape(*shape[:-2], 4) return out
Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which corresponds the following required indices: - LEFT: left hand side of the bounding box - TOP: top of the bounding box - RIGHT: right of the bounding box - BOTTOM: bottom of the bounding box Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape is channel_1 x channel_2 x ... x 4. Args: - masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`)
github-repos
def accountSummary(self, account: str = '') -> List[AccountValue]: if not self.wrapper.acctSummary: self.reqAccountSummary() if account: return [v for v in self.wrapper.acctSummary.values() if v.account == account] else: return list(self.wrapper.acctSummary.values())
List of account values for the given account, or of all accounts if account is left blank. This method is blocking on first run, non-blocking after that. Args: account: If specified, filter for this account name.
juraj-google-style
def stop_standing_subprocess(proc): import psutil pid = proc.pid logging.debug('Stopping standing subprocess %d', pid) process = psutil.Process(pid) failed = [] try: children = process.children(recursive=True) except AttributeError: children = process.get_children(recursive=True) for child in children: try: child.kill() child.wait(timeout=10) except psutil.NoSuchProcess: pass except: failed.append(child.pid) logging.exception('Failed to kill standing subprocess %d', child.pid) try: process.kill() process.wait(timeout=10) except psutil.NoSuchProcess: pass except: failed.append(pid) logging.exception('Failed to kill standing subprocess %d', pid) if failed: raise Error('Failed to kill standing subprocesses: %s' % failed) if proc.stdout: proc.stdout.close() if proc.stderr: proc.stderr.close() proc.wait() logging.debug('Stopped standing subprocess %d', pid)
Stops a subprocess started by start_standing_subprocess. Before killing the process, we check if the process is running, if it has terminated, Error is raised. Catches and ignores the PermissionError which only happens on Macs. Args: proc: Subprocess to terminate. Raises: Error: if the subprocess could not be stopped.
juraj-google-style
def compute_nats_and_bits_per_dim(data_dim, latent_dim, average_reconstruction, average_prior): with tf.name_scope(None, default_name='compute_nats_per_dim'): data_dim = tf.cast(data_dim, average_reconstruction.dtype) latent_dim = tf.cast(latent_dim, average_prior.dtype) negative_log_likelihood = (data_dim * average_reconstruction) negative_log_prior = (latent_dim * average_prior) negative_elbo = (negative_log_likelihood + negative_log_prior) nats_per_dim = tf.divide(negative_elbo, data_dim, name='nats_per_dim') bits_per_dim = tf.divide(nats_per_dim, tf.log(2.0), name='bits_per_dim') return (nats_per_dim, bits_per_dim)
Computes negative ELBO, which is an upper bound on the negative likelihood. Args: data_dim: int-like indicating data dimensionality. latent_dim: int-like indicating latent dimensionality. average_reconstruction: Scalar Tensor indicating the reconstruction cost averaged over all data dimensions and any data batches. average_prior: Scalar Tensor indicating the negative log-prior probability averaged over all latent dimensions and any data batches. Returns: Tuple of scalar Tensors, representing the nats and bits per data dimension (e.g., subpixels) respectively.
codesearchnet
def CreateCampaignWithBiddingStrategy(client, bidding_strategy_id, budget_id): campaign_service = client.GetService('CampaignService', version='v201809') campaign = { 'name': 'Interplanetary Cruise 'budget': { 'budgetId': budget_id }, 'biddingStrategyConfiguration': { 'biddingStrategyId': bidding_strategy_id }, 'advertisingChannelType': 'SEARCH', 'networkSetting': { 'targetGoogleSearch': 'true', 'targetSearchNetwork': 'true', 'targetContentNetwork': 'true' } } operation = { 'operator': 'ADD', 'operand': campaign } response = campaign_service.mutate([operation]) new_campaign = response['value'][0] print ('Campaign with name "%s", ID "%s" and bidding scheme ID "%s" ' 'was created.' % (new_campaign['name'], new_campaign['id'], new_campaign['biddingStrategyConfiguration']['biddingStrategyId'])) return new_campaign
Create a Campaign with a Shared Bidding Strategy. Args: client: AdWordsClient the client to run the example with. bidding_strategy_id: string the bidding strategy ID to use. budget_id: string the shared budget ID to use. Returns: dict An object representing a campaign.
juraj-google-style
def register_array_types_from_sources(self, source_files): for fname in source_files: if is_vhdl(fname): self._register_array_types(self.extract_objects(fname))
Add array type definitions from a file list to internal registry Args: source_files (list of str): Files to parse for array definitions
juraj-google-style
def _generate_legacy_type_checks(types=()): types = dict(types) def gen_type_check(pytypes): pytypes = _utils.flatten(pytypes) def type_check(checker, instance): if isinstance(instance, bool): if bool not in pytypes: return False return isinstance(instance, pytypes) return type_check definitions = {} for typename, pytypes in iteritems(types): definitions[typename] = gen_type_check(pytypes) return definitions
Generate newer-style type checks out of JSON-type-name-to-type mappings. Arguments: types (dict): A mapping of type names to their Python types Returns: A dictionary of definitions to pass to `TypeChecker`
juraj-google-style
def around(A, decimals=0): if isinstance(A, Poly): B = A.A.copy() for key in A.keys: B[key] = around(B[key], decimals) return Poly(B, A.dim, A.shape, A.dtype) return numpy.around(A, decimals)
Evenly round to the given number of decimals. Args: A (Poly, numpy.ndarray): Input data. decimals (int): Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns: (Poly, numpy.ndarray): Same type as A. Examples: >>> P = chaospy.prange(3)*2**-numpy.arange(0, 6, 2, float) >>> print(P) [1.0, 0.25q0, 0.0625q0^2] >>> print(chaospy.around(P)) [1.0, 0.0, 0.0] >>> print(chaospy.around(P, 2)) [1.0, 0.25q0, 0.06q0^2]
codesearchnet
def _ParseRecord(self, parser_mediator, file_object, record_offset): record_strings_data_offset = file_object.tell() record_strings_data_size = (record_offset - record_strings_data_offset) record_strings_data = self._ReadData(file_object, record_strings_data_offset, record_strings_data_size) record_map = self._GetDataTypeMap('asl_record') try: (record, record_data_size) = self._ReadStructureFromFileObject(file_object, record_offset, record_map) except (ValueError, errors.ParseError) as exception: raise errors.UnableToParseFile('Unable to parse record at offset: 0x{0:08x} with error: {1!s}'.format(record_offset, exception)) hostname = self._ParseRecordString(record_strings_data, record_strings_data_offset, record.hostname_string_offset) sender = self._ParseRecordString(record_strings_data, record_strings_data_offset, record.sender_string_offset) facility = self._ParseRecordString(record_strings_data, record_strings_data_offset, record.facility_string_offset) message = self._ParseRecordString(record_strings_data, record_strings_data_offset, record.message_string_offset) file_offset = (record_offset + record_data_size) additional_data_size = ((record.data_size + 6) - record_data_size) if ((additional_data_size % 8) != 0): raise errors.ParseError('Invalid record additional data size: {0:d}.'.format(additional_data_size)) additional_data = self._ReadData(file_object, file_offset, additional_data_size) extra_fields = {} for additional_data_offset in range(0, (additional_data_size - 8), 16): record_extra_field = self._ParseRecordExtraField(additional_data[additional_data_offset:], file_offset) file_offset += 16 name = self._ParseRecordString(record_strings_data, record_strings_data_offset, record_extra_field.name_string_offset) value = self._ParseRecordString(record_strings_data, record_strings_data_offset, record_extra_field.value_string_offset) if (name is not None): extra_fields[name] = value event_data = ASLEventData() event_data.computer_name = hostname event_data.extra_information = ', '.join(['{0:s}: {1:s}'.format(name, value) for (name, value) in sorted(extra_fields.items())]) event_data.facility = facility event_data.group_id = record.group_identifier event_data.level = record.alert_level event_data.message_id = record.message_identifier event_data.message = message event_data.pid = record.process_identifier event_data.read_gid = record.real_group_identifier event_data.read_uid = record.real_user_identifier event_data.record_position = record_offset event_data.sender = sender event_data.user_sid = '{0:d}'.format(record.user_identifier) (microseconds, _) = divmod(record.written_time_nanoseconds, 1000) timestamp = ((record.written_time * 1000000) + microseconds) date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) return record.next_record_offset
Parses a record and produces events. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (file): file-like object. record_offset (int): offset of the record relative to the start of the file. Returns: int: next record offset. Raises: ParseError: if the record cannot be parsed.
codesearchnet
def apply_gradients(self, grads_and_vars, global_step=None, name=None): (self._grad, self._vars) = zip(*[(g, t) for (g, t) in grads_and_vars if (g is not None)]) with tf.variable_scope('apply_updates'): if (self._clip_thresh_var is not None): (self._grad, _) = tf.clip_by_global_norm(self._grad, self._clip_thresh_var) apply_grad_op = self._momentum_optimizer.apply_gradients(zip(self._grad, self._vars), global_step=global_step, name=name) else: apply_grad_op = self._momentum_optimizer.apply_gradients(zip(self._grad, self._vars), global_step=global_step, name=name) with tf.variable_scope('prepare_yellowFin_variables'): with tf.control_dependencies([apply_grad_op]): prepare_variables_op = self._prepare_variables() with tf.variable_scope('yellowfin'): with tf.control_dependencies([prepare_variables_op]): yellowfin_op = self._yellowfin() with tf.control_dependencies([yellowfin_op]): self._increment_step_op = tf.assign_add(self._step, 1).op return tf.group(apply_grad_op, prepare_variables_op, yellowfin_op, self._increment_step_op)
Applying gradients and tune hyperparams with YellowFin. Args: grads_and_vars: List of (gradient, variable) pairs as returned by compute_gradients(). global_step: Optional Variable to increment by one after the variables have been updated. name: Optional name for the returned operation. Default to the name passed to the Optimizer constructor. Returns: (A group of operations) Variable Update with Momentum ops, YellowFin ops(Curvature, Variance, Distance) ops, SingleStep and lr_mu tuning ops, Step increment ops.
codesearchnet
def _retrieve_info(self, http): if self.invalid: info = _metadata.get_service_account_info(http, service_account=(self.service_account_email or 'default')) self.invalid = False self.service_account_email = info['email'] self.scopes = info['scopes']
Retrieves service account info for invalid credentials. Args: http: an object to be used to make HTTP requests.
codesearchnet
def attachment_to_multidim_measurement(attachment, name=None): data = json.loads(attachment.data) name = name or data.get('name') attachment_dims = data.get('dimensions', []) attachment_values = data.get('value') attachment_outcome_str = data.get('outcome') if attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME: try: attachment_outcome_str = test_runs_pb2.Status.Name( int(attachment_outcome_str)) except ValueError: attachment_outcome_str = None outcome = TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME.get( attachment_outcome_str) _lazy_load_units_by_code() dims = [] for d in attachment_dims: unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE) description = d.get('name', '') dims.append(measurements.Dimension(description=description, unit=unit)) if attachment_values and len(dims) == len(attachment_values[0]): units_ = dims[-1].unit dimensions = dims[:-1] else: units_ = None dimensions = dims measured_value = measurements.DimensionedMeasuredValue( name=name, num_dimensions=len(dimensions) ) for row in attachment_values: coordinates = tuple(row[:-1]) val = row[-1] measured_value[coordinates] = val measurement = measurements.Measurement( name=name, units=units_, dimensions=tuple(dimensions), measured_value=measured_value, outcome=outcome ) return measurement
Convert an OpenHTF test record attachment to a multi-dim measurement. This is a best effort attempt to reverse, as some data is lost in converting from a multidim to an attachment. Args: attachment: an `openhtf.test_record.Attachment` from a multi-dim. name: an optional name for the measurement. If not provided will use the name included in the attachment. Returns: An multi-dim `openhtf.Measurement`.
juraj-google-style
def _hash_sequence(self, sighash_type, anyone_can_pay): if (anyone_can_pay or (sighash_type == shared.SIGHASH_SINGLE)): return (b'\x00' * 32) else: sequences = ByteData() for tx_in in self.tx_ins: sequences += tx_in.sequence return utils.hash256(sequences.to_bytes())
BIP143 hashSequence implementation Args: sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL anyone_can_pay (bool): true if ANYONECANPAY should be set Returns: (bytes): the hashSequence, a 32 byte hash
codesearchnet
def _uniform_correlation_like_matrix(num_rows, batch_shape, dtype, seed): num_entries = ((num_rows * (num_rows + 1)) / 2) ones = tf.ones(shape=[num_entries], dtype=dtype) unifs = uniform.Uniform((- ones), ones).sample(batch_shape, seed=seed) tril = util.fill_triangular(unifs) symmetric = (tril + tf.linalg.matrix_transpose(tril)) diagonal_ones = tf.ones(shape=util.pad(batch_shape, axis=0, back=True, value=num_rows), dtype=dtype) return tf.linalg.set_diag(symmetric, diagonal_ones)
Returns a uniformly random `Tensor` of "correlation-like" matrices. A "correlation-like" matrix is a symmetric square matrix with all entries between -1 and 1 (inclusive) and 1s on the main diagonal. Of these, the ones that are positive semi-definite are exactly the correlation matrices. Args: num_rows: Python `int` dimension of the correlation-like matrices. batch_shape: `Tensor` or Python `tuple` of `int` shape of the batch to return. dtype: `dtype` of the `Tensor` to return. seed: Random seed. Returns: matrices: A `Tensor` of shape `batch_shape + [num_rows, num_rows]` and dtype `dtype`. Each entry is in [-1, 1], and each matrix along the bottom two dimensions is symmetric and has 1s on the main diagonal.
codesearchnet
def __setitem__(self, key, value): if key == 'resources': self.add_update_resources(value, ignore_datasetid=True) return super(Dataset, self).__setitem__(key, value)
Set dictionary items but do not allow setting of resources Args: key (Any): Key in dictionary value (Any): Value to put in dictionary Returns: None
juraj-google-style
def cmap_from_color(color, dark=False): if dark: return sns.dark_palette(color, as_cmap=True) else: return sns.light_palette(color, as_cmap=True)
Generates a matplotlib colormap from a single color. Colormap will be built, by default, from white to ``color``. Args: color: Can be one of several things: 1. Hex code 2. HTML color name 3. RGB tuple dark (bool): If ``True``, colormap will be built from ``color`` to black. Default is ``False``, which builds a colormap from white to ``color``. Returns: colormap: A matplotlib colormap
juraj-google-style
async def get_participants(self, force_update=False) -> list: if (force_update or (self.participants is None)): res = (await self.connection('GET', 'tournaments/{}/participants'.format(self._id))) self._refresh_participants_from_json(res) return (self.participants or [])
get all participants |methcoro| Args: force_update (default=False): True to force an update to the Challonge API Returns: list[Participant]: Raises: APIException
codesearchnet
def scatter_sub(self, sparse_delta, use_locking=False, name=None): if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}') return self._lazy_read(gen_resource_variable_ops.resource_scatter_sub(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name=name))
Subtracts `tf.IndexedSlices` from this variable. Args: sparse_delta: `tf.IndexedSlices` to be subtracted from this variable. use_locking: If `True`, use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if `sparse_delta` is not an `IndexedSlices`.
github-repos
def asdict(self): timestamp_str = None if (self.reading_time is not None): timestamp_str = self.reading_time.isoformat() return {'stream': self.stream, 'device_timestamp': self.raw_time, 'streamer_local_id': self.reading_id, 'timestamp': timestamp_str, 'value': self.value}
Encode the data in this reading into a dictionary. Returns: dict: A dictionary containing the information from this reading.
codesearchnet
def flownet2_sd(self, x): with argscope([tf.layers.conv2d], activation=lambda x: tf.nn.leaky_relu(x, 0.1), padding='valid', strides=2, kernel_size=3, data_format='channels_first'), \ argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity, data_format='channels_first', strides=2, kernel_size=4): x = tf.layers.conv2d(pad(x, 1), 64, name='conv0', strides=1) x = tf.layers.conv2d(pad(x, 1), 64, name='conv1') conv1 = tf.layers.conv2d(pad(x, 1), 128, name='conv1_1', strides=1) x = tf.layers.conv2d(pad(conv1, 1), 128, name='conv2') conv2 = tf.layers.conv2d(pad(x, 1), 128, name='conv2_1', strides=1) x = tf.layers.conv2d(pad(conv2, 1), 256, name='conv3') conv3 = tf.layers.conv2d(pad(x, 1), 256, name='conv3_1', strides=1) x = tf.layers.conv2d(pad(conv3, 1), 512, name='conv4') conv4 = tf.layers.conv2d(pad(x, 1), 512, name='conv4_1', strides=1) x = tf.layers.conv2d(pad(conv4, 1), 512, name='conv5') conv5 = tf.layers.conv2d(pad(x, 1), 512, name='conv5_1', strides=1) x = tf.layers.conv2d(pad(conv5, 1), 1024, name='conv6') conv6 = tf.layers.conv2d(pad(x, 1), 1024, name='conv6_1', strides=1) flow6 = tf.layers.conv2d(pad(conv6, 1), 2, name='predict_flow6', strides=1, activation=tf.identity) flow6_up = tf.layers.conv2d_transpose(flow6, 2, name='upsampled_flow6_to_5') x = tf.layers.conv2d_transpose(conv6, 512, name='deconv5', activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat5 = tf.concat([conv5, x, flow6_up], axis=1, name='concat5') interconv5 = tf.layers.conv2d(pad(concat5, 1), 512, strides=1, name='inter_conv5', activation=tf.identity) flow5 = tf.layers.conv2d(pad(interconv5, 1), 2, name='predict_flow5', strides=1, activation=tf.identity) flow5_up = tf.layers.conv2d_transpose(flow5, 2, name='upsampled_flow5_to_4') x = tf.layers.conv2d_transpose(concat5, 256, name='deconv4', activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat4 = tf.concat([conv4, x, flow5_up], axis=1, name='concat4') interconv4 = tf.layers.conv2d(pad(concat4, 1), 256, strides=1, name='inter_conv4', activation=tf.identity) flow4 = tf.layers.conv2d(pad(interconv4, 1), 2, name='predict_flow4', strides=1, activation=tf.identity) flow4_up = tf.layers.conv2d_transpose(flow4, 2, name='upsampled_flow4_to_3') x = tf.layers.conv2d_transpose(concat4, 128, name='deconv3', activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat3 = tf.concat([conv3, x, flow4_up], axis=1, name='concat3') interconv3 = tf.layers.conv2d(pad(concat3, 1), 128, strides=1, name='inter_conv3', activation=tf.identity) flow3 = tf.layers.conv2d(pad(interconv3, 1), 2, name='predict_flow3', strides=1, activation=tf.identity) flow3_up = tf.layers.conv2d_transpose(flow3, 2, name='upsampled_flow3_to_2') x = tf.layers.conv2d_transpose(concat3, 64, name='deconv2', activation=lambda x: tf.nn.leaky_relu(x, 0.1)) concat2 = tf.concat([conv2, x, flow3_up], axis=1, name='concat2') interconv2 = tf.layers.conv2d(pad(concat2, 1), 64, strides=1, name='inter_conv2', activation=tf.identity) flow2 = tf.layers.conv2d(pad(interconv2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity) return resize(flow2 / DISP_SCALE, mode='nearest')
Architecture in Table 3 of FlowNet 2.0. Args: x: concatenation of two inputs, of shape [1, 2xC, H, W]
juraj-google-style
def _add_namespace(marc_xml): dom = marc_xml if isinstance(dom, basestring): dom = dhtmlparser.parseString(marc_xml) root = dom.find("root") if root: root[0].params = {} for record in dom.find("record"): record.params = {} collections = dom.find("collection") if not collections: record = dom.find("record")[0] return XML_TEMPLATE.replace("$CONTENT", str(record)) for col in collections: col.params["xmlns"] = "http: col.params["xmlns:xsi"] = "http: col.params["xsi:schemaLocation"] = "http: "http: return str(dom)
Add proper XML namespace to the `marc_xml` record. Args: marc_xml (str): String representation of the XML record. Returns: str: XML with namespace.
juraj-google-style
def iterate_ngrams(text, n): if n <= 0: raise ValueError("n must be a positive integer") return [text[i: i + n] for i in range(len(text) - n + 1)]
Generator to yield ngrams in ``text``. Example: >>> for ngram in iterate_ngrams("example", 4): ... print(ngram) exam xamp ampl mple Args: text (str): text to iterate over n (int): size of window for iteration Returns: Generator expression to yield the next ngram in the text Raises: ValueError: If n is non positive
juraj-google-style
def gnuplot_2d(x, y, filename, title='', x_label='', y_label=''): (_, ext) = os.path.splitext(filename) if (ext != '.png'): filename += '.png' gnuplot_cmds = '\n set datafile separator ","\n set term pngcairo size 30cm,25cm\n set out filename\n\n unset key\n set border lw 1.5\n set grid lt -1 lc rgb "gray80"\n\n set title title\n set xlabel x_label\n set ylabel y_label\n\n plot filename_data u 1:2 w lp pt 6 ps 0.5\n ' scr = _GnuplotScriptTemp(gnuplot_cmds) data = _GnuplotDataTemp(x, y) args_dict = {'filename': filename, 'filename_data': data.name, 'title': title, 'x_label': x_label, 'y_label': y_label} gnuplot(scr.name, args_dict)
Function to produce a general 2D plot. Args: x (list): x points. y (list): y points. filename (str): Filename of the output image. title (str): Title of the plot. Default is '' (no title). x_label (str): x-axis label. y_label (str): y-axis label.
codesearchnet
def _CreateClassTemplate(cls, data_type_definition): type_name = data_type_definition.name type_description = data_type_definition.description or type_name while type_description.endswith('.'): type_description = type_description[:-1] class_attributes_description = [] init_arguments = [] instance_attributes = [] for member_definition in data_type_definition.members: attribute_name = member_definition.name description = member_definition.description or attribute_name while description.endswith('.'): description = description[:-1] member_data_type = getattr(member_definition, 'member_data_type', '') if isinstance(member_definition, data_types.MemberDataTypeDefinition): member_definition = member_definition.member_data_type_definition member_type_indicator = member_definition.TYPE_INDICATOR if member_type_indicator == definitions.TYPE_INDICATOR_SEQUENCE: element_type_indicator = member_definition.element_data_type member_type_indicator = 'tuple[{0:s}]'.format(element_type_indicator) else: member_type_indicator = cls._PYTHON_NATIVE_TYPES.get( member_type_indicator, member_data_type) argument = '{0:s}=None'.format(attribute_name) definition = ' self.{0:s} = {0:s}'.format(attribute_name) description = ' {0:s} ({1:s}): {2:s}.'.format( attribute_name, member_type_indicator, description) class_attributes_description.append(description) init_arguments.append(argument) instance_attributes.append(definition) class_attributes_description = '\n'.join( sorted(class_attributes_description)) init_arguments = ', '.join(init_arguments) instance_attributes = '\n'.join(sorted(instance_attributes)) template_values = { 'class_attributes_description': class_attributes_description, 'init_arguments': init_arguments, 'instance_attributes': instance_attributes, 'type_description': type_description, 'type_name': type_name} return cls._CLASS_TEMPLATE.format(**template_values)
Creates the class template. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: str: class template.
juraj-google-style
def conditionally_create_security_groups(env, service_name, service_type): if service_type not in SG_SERVICE_TYPES: print_if_verbose("not eligible for security group(s); service type: {}".format(service_type)) return target_name = "{}-{}".format(env, service_name) if service_type == "aws_ec2": sg_names = ["{}-ec2".format(target_name)] elif service_type == "aws_lambda": sg_names = ["{}-lambda".format(target_name)] elif service_type == "http_service": sg_names = [ "{}-ec2".format(target_name), "{}-elb".format(target_name) ] elif service_type == "aws_security_group": sg_names = [target_name] else: fail("Unexpected service_type: {} when creating security group for: {}".format(service_type, target_name)) for sg_name in sg_names: if not AWS_RESOLVER.ec2_security_group_security_group_id(sg_name): vpc_name = "vpc-{}".format(env) print("Create security group: {} in vpc: {}".format(sg_name, vpc_name)) vpc = AWS_RESOLVER.ec2_vpc_vpc_id(vpc_name) if not vpc: fail("Error: could not get VPC by name: {}".format(vpc_name)) if CONTEXT.commit: try: new_sg = CLIENTS["ec2"].create_security_group(GroupName=sg_name, VpcId=vpc, Description=sg_name) except: fail("Exception creating security group named: {} in VpcId: {}".format(sg_name, vpc_name), sys.exc_info()) print(new_sg["GroupId"]) else: print_if_verbose("security group already exists: {}".format(sg_name))
Create security groups as needed; name and number created depend on service_type Args: env: the environment the SG will be created in service_name: name of the service in service registry service_type: service registry service type: 'aws_ec2', 'aws_lambda', 'aws_security_group', or 'http_service'
juraj-google-style
def get_current_human_time(): return time.strftime('%m-%d-%Y %H:%M:%S ')
Returns the current time in human readable format. Returns: The current time stamp in Month-Day-Year Hour:Min:Sec format.
github-repos
def split_key(key, max_keys=0): parts = [x for x in re.split(SPLIT_REGEX, key) if (x != '.')] result = [] while (len(parts) > 0): if ((max_keys > 0) and (len(result) == max_keys)): break result.append(parts.pop(0)) if (len(parts) > 0): result.append('.'.join(parts)) return result
Splits a key but allows dots in the key name if they're scaped properly. Splitting this complex key: complex_key = ".dont\.splitme.d\.o\. origen.splitme\.dontsplit.splitme." split_key(complex_key) results in: ['', 'dont\.splitme', 'd\.o\. origen', 'splitme\.dontsplit', 'splitme', ''] Args: key (basestring): The key to be splitted. max_keys (int): The maximum number of keys to be extracted. 0 means no limits. Returns: A list of keys
codesearchnet
def __init__(self, func, type): self.func = func self.type = type
Instantiates a bound method object. Args: func (types.FunctionType): The method's underlying function type (type): The class of the method.
github-repos
def minimize_peak_memory(graph, scheduler_alg): if scheduler_alg == 'NAIVE': return _minimize_peak_memory_naive(graph) elif scheduler_alg == 'LIST': return _minimize_peak_memory_list(graph) else: raise NotImplementedError('{} is not a scheduler algorithm. It should be ' 'one of NAIVE or LIST.' .format(scheduler_alg))
Computes a schedule to minimize peak memory. Args: graph: an mtf.auto_mtf.graph_interface.GraphInterface. scheduler_alg: a string, one of 'NAIVE' or 'LIST' Returns: an iterable of integers representing the schedule.
juraj-google-style
def set_nsxcontroller_port(self, **kwargs): name = kwargs.pop('name') port = str(kwargs.pop('port')) port_args = dict(name=name, port=port) method_name = 'nsx_controller_connection_addr_port' method_class = self._brocade_tunnels nsxcontroller_attr = getattr(method_class, method_name) config = nsxcontroller_attr(**port_args) output = self._callback(config) return output
Set Nsx Controller pot on the switch Args: port (int): 1 to 65535. callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
juraj-google-style
def _print_contained_resource(self, contained_resource: message.Message) -> None: for _, set_field_value in contained_resource.ListFields(): if self.json_format == _FhirJsonFormat.ANALYTIC: structure_definition_url = annotation_utils.get_structure_definition_url(set_field_value) self.generator.push(f'"{structure_definition_url}"') else: self._print(set_field_value)
Prints the set fields of the contained resource. If the _FhirJsonFormat is set to ANALYTIC, this method only prints the url. Args: contained_resource: The contained resource to iterate over and print.
github-repos
def market_if_touched_replace(self, accountID, orderID, **kwargs): return self.replace(accountID, orderID, order=MarketIfTouchedOrderRequest(**kwargs))
Shortcut to replace a pending MarketIfTouched Order in an Account Args: accountID : The ID of the Account orderID : The ID of the MarketIfTouched Order to replace kwargs : The arguments to create a MarketIfTouchedOrderRequest Returns: v20.response.Response containing the results from submitting the request
codesearchnet
def is_significant(sample1, sample2): deg_freedom = ((len(sample1) + len(sample2)) - 2) critical_value = tdist95conf_level(deg_freedom) t_score = tscore(sample1, sample2) return ((abs(t_score) >= critical_value), t_score)
Determine whether two samples differ significantly. This uses a Student's two-sample, two-tailed t-test with alpha=0.95. Args: sample1: one sample. sample2: the other sample. Returns: (significant, t_score) where significant is a bool indicating whether the two samples differ significantly; t_score is the score from the two-sample T test.
codesearchnet
def covariance_to_correlations(covariance): diagonal_ind = np.arange(covariance.shape[1]) diagonal_els = covariance[:, diagonal_ind, diagonal_ind] result = covariance / np.sqrt(diagonal_els[:, :, None] * diagonal_els[:, None, :]) result[np.isinf(result)] = 0 return np.clip(np.nan_to_num(result), -1, 1)
Transform a covariance matrix into a correlations matrix. This can be seen as dividing a covariance matrix by the outer product of the diagonal. As post processing we replace the infinities and the NaNs with zeros and clip the result to [-1, 1]. Args: covariance (ndarray): a matrix of shape (n, p, p) with for n problems the covariance matrix of shape (p, p). Returns: ndarray: the correlations matrix
juraj-google-style
def test_antithetic_sample_paths_mean_2d(self, random_type, seed): mu = np.array([0.2, 0.7]) a = np.array([[0.4, 0.1], [0.3, 0.2]]) b = np.array([[0.33, -0.03], [0.21, 0.5]]) def drift_fn(t, x): del x return mu * tf.sqrt(t) def vol_fn(t, x): del x return (a * t + b) * tf.ones([2, 2], dtype=t.dtype) times = np.array([0.1, 0.21, 0.32, 0.43, 0.55]) num_samples = 5000 x0 = np.array([0.1, -1.1]) paths = self.evaluate(euler_sampling.sample(dim=2, drift_fn=drift_fn, volatility_fn=vol_fn, times=times, num_samples=num_samples, initial_state=x0, random_type=random_type, time_step=0.01, seed=seed)) self.assertAllClose(paths.shape, (num_samples, 5, 2), atol=0) means = np.mean(paths, axis=0) times = np.reshape(times, [-1, 1]) expected_means = x0 + 2.0 / 3.0 * mu * np.power(times, 1.5) self.assertAllClose(means, expected_means, rtol=0.005, atol=0.005)
Tests path properties for 2-dimentional anthithetic variates method. The same test as above but with `PSEUDO_ANTITHETIC` random type. We construct the following Ito processes. dX_1 = mu_1 sqrt(t) dt + s11 dW_1 + s12 dW_2 dX_2 = mu_2 sqrt(t) dt + s21 dW_1 + s22 dW_2 mu_1, mu_2 are constants. s_ij = a_ij t + b_ij For this process expected value at time t is (x_0)_i + 2/3 * mu_i * t^1.5. Args: random_type: Random number type defined by tff.math.random.RandomType enum. seed: Random seed.
github-repos
def get_metrics_namespace(self) -> str: return 'BeamML_HuggingFaceModelHandler_KeyedTensor'
Returns: A namespace for metrics collected by the RunInference transform.
github-repos
def get_value(value_proto): field = value_proto.WhichOneof('value_type') if (field in __native_value_types): return getattr(value_proto, field) if (field == 'timestamp_value'): return from_timestamp(value_proto.timestamp_value) if (field == 'array_value'): return [get_value(sub_value) for sub_value in value_proto.array_value.values] return None
Gets the python object equivalent for the given value proto. Args: value_proto: datastore.Value proto message. Returns: the corresponding python object value. timestamps are converted to datetime, and datastore.Value is returned for blob_key_value.
codesearchnet
def get_time_series(sdat, var, tstart, tend): tseries = sdat.tseries_between(tstart, tend) if (var in tseries.columns): series = tseries[var] time = None if (var in phyvars.TIME): meta = phyvars.TIME[var] else: meta = phyvars.Vart(var, None, '1') elif (var in phyvars.TIME_EXTRA): meta = phyvars.TIME_EXTRA[var] (series, time) = meta.description(sdat, tstart, tend) meta = phyvars.Vart(misc.baredoc(meta.description), meta.kind, meta.dim) else: raise UnknownTimeVarError(var) (series, _) = sdat.scale(series, meta.dim) if (time is not None): (time, _) = sdat.scale(time, 's') return (series, time, meta)
Extract or compute and rescale a time series. Args: sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance. var (str): time series name, a key of :data:`stagpy.phyvars.TIME` or :data:`stagpy.phyvars.TIME_EXTRA`. tstart (float): starting time of desired series. Set to None to start at the beginning of available data. tend (float): ending time of desired series. Set to None to stop at the end of available data. Returns: tuple of :class:`numpy.array` and :class:`stagpy.phyvars.Vart`: series, time, meta series is the requested time series, time the time at which it is evaluated (set to None if it is the one of time series output by StagYY), and meta is a :class:`stagpy.phyvars.Vart` instance holding metadata of the requested variable.
codesearchnet
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader): params = _get_params(mapper_spec) blob_key = params[cls.BLOB_KEY_PARAM] zip_input = zipfile.ZipFile(_reader(blob_key)) zfiles = zip_input.infolist() total_size = sum(x.file_size for x in zfiles) num_shards = min(mapper_spec.shard_count, cls._MAX_SHARD_COUNT) size_per_shard = total_size shard_start_indexes = [0] current_shard_size = 0 for i, fileinfo in enumerate(zfiles): current_shard_size += fileinfo.file_size if current_shard_size >= size_per_shard: shard_start_indexes.append(i + 1) current_shard_size = 0 if shard_start_indexes[-1] != len(zfiles): shard_start_indexes.append(len(zfiles)) return [cls(blob_key, start_index, end_index, _reader) for start_index, end_index in zip(shard_start_indexes, shard_start_indexes[1:])]
Returns a list of input shard states for the input spec. Args: mapper_spec: The MapperSpec for this InputReader. Must contain 'blob_key' parameter with one blob key. _reader: a callable that returns a file-like object for reading blobs. Used for dependency injection. Returns: A list of InputReaders spanning files within the zip.
juraj-google-style
def remove_temp_dirpath(dirpath, strategy): if strategy is None: strategy = distribute_lib.get_strategy() if strategy is None: return if strategy.extended._in_multi_worker_mode() and (not strategy.extended.should_checkpoint): file_io.delete_recursively(_get_temp_dir(dirpath, strategy))
Removes the temp path after writing is finished. Args: dirpath: Original dirpath that would be used without distribution. strategy: The tf.distribute strategy object currently used.
github-repos
def _GenerateUniqueRandomInputTensor(self, shape): num_elements = 1 for size in shape: num_elements *= size x = np.arange(num_elements, dtype=np.float32) self._PRNG.shuffle(x) return x.reshape(shape)
Generate 'unique' random input tensor. 'Unique' means there's no collision values in the tensor, all elements are different. This is done by generating sequence of integers with step of 1 and then randomly shuffle these integers. Args: shape: Shape of the tensor desired. Returns: A numpy ndarray with size = shape and dtype = numpy.float32.
github-repos
def add_other_location(self, location, exact=True, alterror=None, locations=None): (hdx_code, match) = Locations.get_HDX_code_from_location_partial(location, locations=locations, configuration=self.configuration) if ((hdx_code is None) or ((exact is True) and (match is False))): if (alterror is None): raise HDXError(('Location: %s - cannot find in HDX!' % location)) else: raise HDXError(alterror) groups = self.data.get('groups', None) hdx_code = hdx_code.lower() if groups: if (hdx_code in [x['name'] for x in groups]): return False else: groups = list() groups.append({'name': hdx_code}) self.data['groups'] = groups return True
Add a location which is not a country or region. Value is parsed and compared to existing locations in HDX. If the location is already added, it is ignored. Args: location (str): Location to add exact (bool): True for exact matching or False to allow fuzzy matching. Defaults to True. alterror (Optional[str]): Alternative error message to builtin if location not found. Defaults to None. locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. Returns: bool: True if location added or False if location already present
codesearchnet
def feat(self, subset): r = None for f in self: if (isinstance(f, Feature) and (f.subset == subset)): if r: if isinstance(r, list): r.append(f.cls) else: r = [r, f.cls] else: r = f.cls if (r is None): raise NoSuchAnnotation else: return r
Obtain the feature class value of the specific subset. If a feature occurs multiple times, the values will be returned in a list. Example:: sense = word.annotation(folia.Sense) synset = sense.feat('synset') Returns: str or list
codesearchnet
def execute_before(self, sensor_graph, scope_stack): parent = scope_stack[-1] alloc = parent.allocator stream_a, trigger_a = self._convert_trigger(self.trigger_a, parent) if self.trigger_b is None: new_scope = TriggerScope(sensor_graph, scope_stack, (stream_a, trigger_a)) else: stream_b, trigger_b = self._convert_trigger(self.trigger_b, parent) trigger_stream = alloc.allocate_stream(DataStream.UnbufferedType) if self.combiner == u'and': combiner = '&&' else: combiner = '||' if stream_a.input and not stream_b.input: unbuffered_stream = alloc.allocate_stream(DataStream.UnbufferedType, attach=True) sensor_graph.add_node(u"({} always) => {} using copy_latest_a".format(stream_a, unbuffered_stream)) sensor_graph.add_node(u"({} {} {} {} {}) => {} using copy_latest_a".format(unbuffered_stream, trigger_a, combiner, stream_b, trigger_b, trigger_stream)) elif stream_b.input and not stream_a.input: unbuffered_stream = alloc.allocate_stream(DataStream.UnbufferedType, attach=True) sensor_graph.add_node(u"({} always) => {} using copy_latest_a".format(stream_b, unbuffered_stream)) sensor_graph.add_node(u"({} {} {} {} {}) => {} using copy_latest_a".format(stream_a, trigger_a, combiner, unbuffered_stream, trigger_b, trigger_stream)) else: sensor_graph.add_node(u"({} {} {} {} {}) => {} using copy_latest_a".format(stream_a, trigger_a, combiner, stream_b, trigger_b, trigger_stream)) new_scope = TriggerScope(sensor_graph, scope_stack, (trigger_stream, TrueTrigger())) scope_stack.append(new_scope)
Execute statement before children are executed. Args: sensor_graph (SensorGraph): The sensor graph that we are building or modifying scope_stack (list(Scope)): A stack of nested scopes that may influence how this statement allocates clocks or other stream resources.
juraj-google-style
def list_profile(self, args, screen_info=None): screen_cols = 80 if screen_info and 'cols' in screen_info: screen_cols = screen_info['cols'] parsed = self._arg_parsers['list_profile'].parse_args(args) op_time_interval = command_parser.parse_time_interval(parsed.op_time) if parsed.op_time else None exec_time_interval = command_parser.parse_time_interval(parsed.execution_time) if parsed.execution_time else None node_name_regex = re.compile(parsed.node_name_filter) if parsed.node_name_filter else None file_path_regex = re.compile(parsed.file_path_filter) if parsed.file_path_filter else None op_type_regex = re.compile(parsed.op_type_filter) if parsed.op_type_filter else None output = debugger_cli_common.RichTextLines(['']) device_name_regex = re.compile(parsed.device_name_filter) if parsed.device_name_filter else None data_generator = self._get_profile_data_generator() device_count = len(self._run_metadata.step_stats.dev_stats) for index in range(device_count): device_stats = self._run_metadata.step_stats.dev_stats[index] if not device_name_regex or device_name_regex.match(device_stats.device): profile_data = [datum for datum in data_generator(device_stats) if _list_profile_filter(datum, node_name_regex, file_path_regex, op_type_regex, op_time_interval, exec_time_interval, min_lineno=parsed.min_lineno, max_lineno=parsed.max_lineno)] profile_data = sorted(profile_data, key=lambda datum: _list_profile_sort_key(datum, parsed.sort_by), reverse=parsed.reverse) output.extend(self._get_list_profile_lines(device_stats.device, index, device_count, profile_data, parsed.sort_by, parsed.reverse, parsed.time_unit, device_name_filter=parsed.device_name_filter, node_name_filter=parsed.node_name_filter, op_type_filter=parsed.op_type_filter, screen_cols=screen_cols)) return output
Command handler for list_profile. List per-operation profile information. Args: args: Command-line arguments, excluding the command prefix, as a list of str. screen_info: Optional dict input containing screen information such as cols. Returns: Output text lines as a RichTextLines object.
github-repos
def _broadcast_and_set_attrs(self, local_dict): del local_dict['self'] self.remove_axis = False max_length = 0 for key in local_dict: try: length = len(local_dict[key]) if (length > max_length): max_length = length except TypeError: pass if (max_length == 0): self.remove_axis = True for key in local_dict: setattr(self, key, np.array([local_dict[key]])) else: for key in local_dict: try: if ((len(local_dict[key]) < max_length) and (len(local_dict[key]) > 1)): raise ValueError((('Casting parameters not correct.' + ' Need all at a maximum shape and the rest being') + 'len-1 arrays or scalars')) except TypeError: pass for key in local_dict: try: if (len(local_dict[key]) == max_length): setattr(self, key, local_dict[key]) elif (len(local_dict[key]) == 1): setattr(self, key, np.full((max_length,), local_dict[key][0])) except TypeError: setattr(self, key, np.full((max_length,), local_dict[key])) return
Cast all inputs to correct dimensions. This method fixes inputs who have different lengths. Namely one input as an array and others that are scalara or of len-1. Raises: Value Error: Multiple length arrays of len>1
codesearchnet
def do_state(args): rest_client = RestClient(args.url, args.user) if (args.subcommand == 'list'): response = rest_client.list_state(args.subtree, args.head) leaves = response['data'] head = response['head'] keys = ('address', 'size', 'data') headers = tuple((k.upper() for k in keys)) def parse_leaf_row(leaf, decode=True): decoded = b64decode(leaf['data']) return (leaf['address'], len(decoded), (str(decoded) if decode else leaf['data'])) if (args.format == 'default'): fmt.print_terminal_table(headers, leaves, parse_leaf_row) print('HEAD BLOCK: "{}"'.format(head)) elif (args.format == 'csv'): fmt.print_csv(headers, leaves, parse_leaf_row) print('(data for head block: "{}")'.format(head)) elif ((args.format == 'json') or (args.format == 'yaml')): state_data = {'head': head, 'data': [{k: d for (k, d) in zip(keys, parse_leaf_row(l, False))} for l in leaves]} if (args.format == 'yaml'): fmt.print_yaml(state_data) elif (args.format == 'json'): fmt.print_json(state_data) else: raise AssertionError('Missing handler: {}'.format(args.format)) else: raise AssertionError('Missing handler: {}'.format(args.format)) if (args.subcommand == 'show'): output = rest_client.get_leaf(args.address, args.head) if (output is not None): print('DATA: "{}"'.format(b64decode(output['data']))) print('HEAD: "{}"'.format(output['head'])) else: raise CliException('No data available at {}'.format(args.address))
Runs the batch list or batch show command, printing output to the console Args: args: The parsed arguments sent to the command at runtime
codesearchnet