code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def most_recent_n(self, n): return self._commands[-n:]
Look up the n most recent commands. Args: n: Number of most recent commands to look up. Returns: A list of n most recent commands, or all available most recent commands, if n exceeds size of the command history, in chronological order.
github-repos
def ResetSection(self, directive): self._section = self._INITIAL_SECTION self._last_header = '' if directive in ('if', 'ifdef', 'ifndef'): self.include_list.append([]) elif directive in ('else', 'elif'): self.include_list[-1] = []
Reset section checking for preprocessor directive. Args: directive: preprocessor directive (e.g. "if", "else").
juraj-google-style
def get_untranscribed_prefixes_from_file(target_directory: Path) -> List[str]: untranscribed_prefix_fn = (target_directory / 'untranscribed_prefixes.txt') if untranscribed_prefix_fn.exists(): with untranscribed_prefix_fn.open() as f: prefixes = f.readlines() return [prefix.strip() for prefix in prefixes] else: pass return []
The file "untranscribed_prefixes.txt" will specify prefixes which do not have an associated transcription file if placed in the target directory. This will fetch those prefixes from that file and will return an empty list if that file does not exist. See find_untranscribed_wavs function for finding untranscribed prefixes in an experiment directory. Returns: A list of all untranscribed prefixes as specified in the file
codesearchnet
def tokenize(template, def_ldel='{{', def_rdel='}}'): global _CURRENT_LINE, _LAST_TAG_LINE _CURRENT_LINE = 1 _LAST_TAG_LINE = None try: template = template.read() except AttributeError: pass is_standalone = True open_sections = [] l_del = def_ldel r_del = def_rdel while template: (literal, template) = grab_literal(template, l_del) if (not template): (yield ('literal', literal)) break is_standalone = l_sa_check(template, literal, is_standalone) (tag, template) = parse_tag(template, l_del, r_del) (tag_type, tag_key) = tag if (tag_type == 'set delimiter'): dels = tag_key.strip().split(' ') (l_del, r_del) = (dels[0], dels[(- 1)]) elif (tag_type in ['section', 'inverted section']): open_sections.append(tag_key) _LAST_TAG_LINE = _CURRENT_LINE elif (tag_type == 'end'): try: last_section = open_sections.pop() except IndexError: raise ChevronError('Trying to close tag "{0}"\nLooks like it was not opened.\nline {1}'.format(tag_key, (_CURRENT_LINE + 1))) if (tag_key != last_section): raise ChevronError('Trying to close tag "{0}"\nlast open tag is "{1}"\nline {2}'.format(tag_key, last_section, (_CURRENT_LINE + 1))) is_standalone = r_sa_check(template, tag_type, is_standalone) if is_standalone: template = template.split('\n', 1)[(- 1)] if (tag_type != 'partial'): literal = literal.rstrip(' ') if (literal != ''): (yield ('literal', literal)) if (tag_type not in ['comment', 'set delimiter?']): (yield (tag_type, tag_key)) if open_sections: raise ChevronError('Unexpected EOF\nthe tag "{0}" was never closed\nwas opened at line {1}'.format(open_sections[(- 1)], _LAST_TAG_LINE))
Tokenize a mustache template Tokenizes a mustache template in a generator fashion, using file-like objects. It also accepts a string containing the template. Arguments: template -- a file-like object, or a string of a mustache template def_ldel -- The default left delimiter ("{{" by default, as in spec compliant mustache) def_rdel -- The default right delimiter ("}}" by default, as in spec compliant mustache) Returns: A generator of mustache tags in the form of a tuple -- (tag_type, tag_key) Where tag_type is one of: * literal * section * inverted section * end * partial * no escape And tag_key is either the key or in the case of a literal tag, the literal itself.
codesearchnet
def get_tag(self, tag_name, **kwargs): return self._get_object_by_name(self._TAG_ENDPOINT_SUFFIX, tag_name, **kwargs)
get a tag by name Args: tag_name (string): name of tag to get Returns: dictionary of the response
juraj-google-style
def update(self, roomId, title=None, **request_parameters): check_type(roomId, basestring, may_be_none=False) check_type(roomId, basestring) put_data = dict_from_items_with_values(request_parameters, title=title) json_data = self._session.put(((API_ENDPOINT + '/') + roomId), json=put_data) return self._object_factory(OBJECT_TYPE, json_data)
Update details for a room, by ID. Args: roomId(basestring): The room ID. title(basestring): A user-friendly name for the room. **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: Room: A Room object with the updated Webex Teams room details. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
codesearchnet
def get_op_traceback(self, op_name): if not self._graph_traceback: raise ValueError('No graph traceback has been received yet.') for op_log_entry in self._graph_traceback.log_entries: if op_log_entry.name == op_name: return self._code_def_to_traceback_list(op_log_entry.code_def) raise ValueError( 'No op named "%s" can be found in the graph of the latest version ' ' (%d).' % (op_name, self._graph_version))
Get the traceback of an op in the latest version of the TF graph. Args: op_name: Name of the op. Returns: Creation traceback of the op, in the form of a list of 2-tuples: (file_path, lineno) Raises: ValueError: If the op with the given name cannot be found in the latest version of the graph that this SourceManager instance has received, or if this SourceManager instance has not received any graph traceback yet.
juraj-google-style
def before_run(self, run_context): if not self._grpc_debug_wrapper_session: self._grpc_debug_wrapper_session = grpc_wrapper.GrpcDebugWrapperSession(run_context.session, self._grpc_debug_server_addresses, watch_fn=self._watch_fn, thread_name_filter=self._thread_name_filter) fetches = run_context.original_args.fetches feed_dict = run_context.original_args.feed_dict watch_options = self._watch_fn(fetches, feed_dict) run_options = config_pb2.RunOptions() debug_utils.watch_graph(run_options, run_context.session.graph, debug_urls=self._grpc_debug_wrapper_session.prepare_run_debug_urls(fetches, feed_dict), debug_ops=watch_options.debug_ops, node_name_regex_allowlist=watch_options.node_name_regex_allowlist, op_type_regex_allowlist=watch_options.op_type_regex_allowlist, tensor_dtype_regex_allowlist=watch_options.tensor_dtype_regex_allowlist, tolerate_debug_op_creation_failures=watch_options.tolerate_debug_op_creation_failures) return session_run_hook.SessionRunArgs(None, feed_dict=None, options=run_options)
Called right before a session is run. Args: run_context: A session_run_hook.SessionRunContext. Encapsulates information on the run. Returns: A session_run_hook.SessionRunArgs object.
github-repos
def __init__(self, env): if 'NSSCACHE_CONFIG' in env: self.config_file = env['NSSCACHE_CONFIG'] else: self.config_file = self.NSSCACHE_CONFIG self.command = None self.help_command = None self.maps = [] self.options = {} self.lockfile = None self.timestamp_dir = None self.log = logging.getLogger(__name__)
Initialize defaults for data we hold. Args: env: dictionary of environment variables (typically os.environ)
github-repos
def save(self, path, verbose=False): path = os.path.realpath(path) if os.path.exists(path): if self.load_path and self.load_path == path: if verbose: print "saving over previous suite..." for context_name in self.context_names: self.context(context_name) shutil.rmtree(path) else: raise SuiteError("Cannot save, path exists: %r" % path) contexts_path = os.path.join(path, "contexts") os.makedirs(contexts_path) data = self.to_dict() filepath = os.path.join(path, "suite.yaml") with open(filepath, "w") as f: f.write(dump_yaml(data)) for context_name in self.context_names: context = self.context(context_name) context._set_parent_suite(path, context_name) filepath = self._context_path(context_name, path) if verbose: print "writing %r..." % filepath context.save(filepath) tools_path = os.path.join(path, "bin") os.makedirs(tools_path) if verbose: print "creating alias wrappers in %r..." % tools_path tools = self.get_tools() for tool_alias, d in tools.iteritems(): tool_name = d["tool_name"] context_name = d["context_name"] data = self._context(context_name) prefix_char = data.get("prefix_char") if verbose: print ("creating %r -> %r (%s context)..." % (tool_alias, tool_name, context_name)) filepath = os.path.join(tools_path, tool_alias) create_forwarding_script(filepath, module="suite", func_name="_FWD__invoke_suite_tool_alias", context_name=context_name, tool_name=tool_name, prefix_char=prefix_char)
Save the suite to disk. Args: path (str): Path to save the suite to. If a suite is already saved at `path`, then it will be overwritten. Otherwise, if `path` exists, an error is raised.
juraj-google-style
def dead_code_elimination(node): to_remove = set((def_[1] for def_ in annotate.unused(node) if (not isinstance(def_[1], (gast.arguments, gast.For))))) for n in list(to_remove): for succ in gast.walk(n): if anno.getanno(succ, 'push', False): to_remove.add(anno.getanno(succ, 'push')) transformers.Remove(to_remove).visit(node) anno.clearanno(node) return node
Perform a simple form of dead code elimination on a Python AST. This method performs reaching definitions analysis on all function definitions. It then looks for the definition of variables that are not used elsewhere and removes those definitions. This function takes into consideration push and pop statements; if a pop statement is removed, it will also try to remove the accompanying push statement. Note that this *requires dead code elimination to be performed on the primal and adjoint simultaneously*. Args: node: The AST to optimize. Returns: The optimized AST.
codesearchnet
def formula_balance(model): compound_formula = {} for compound in model.compounds: if compound.formula is not None: try: f = Formula.parse(compound.formula).flattened() compound_formula[compound.id] = f except ParseError as e: msg = 'Error parsing formula for compound {}:\n{}\n{}'.format( compound.id, e, compound.formula) if e.indicator is not None: msg += '\n{}'.format(e.indicator) logger.warning(msg) for reaction in model.reactions: yield reaction, reaction_formula(reaction.equation, compound_formula)
Calculate formula compositions for each reaction. Call :func:`reaction_formula` for each reaction. Yield (reaction, result) pairs, where result has two formula compositions or `None`. Args: model: :class:`psamm.datasource.native.NativeModel`.
juraj-google-style
def _check_domain(tokens) -> bool: idx = None for e in tokens: if e.text == "@": idx = e.i break if not idx or tokens[idx+1].text in FILTER_PROVIDER: return False else: return True
Check if the email provider should be filtered Args: tokens: Returns: Bool
juraj-google-style
def run(self, args): jlink = self.create_jlink(args) if args.downgrade: if not jlink.firmware_newer(): print('DLL firmware is not older than J-Link firmware.') else: jlink.invalidate_firmware() try: jlink.update_firmware() except pylink.JLinkException as e: jlink = self.create_jlink(args) print('Firmware Downgraded: %s' % jlink.firmware_version) elif args.upgrade: if not jlink.firmware_outdated(): print('DLL firmware is not newer than J-Link firmware.') else: try: jlink.update_firmware() except pylink.JLinkException as e: jlink = self.create_jlink(args) print('Firmware Updated: %s' % jlink.firmware_version) return None
Runs the firmware command. Args: self (FirmwareCommand): the ``FirmwareCommand`` instance args (Namespace): arguments to parse Returns: ``None``
juraj-google-style
def event(self, name, owner=None, **kwargs): return Event(self.tcex, name, owner=owner, **kwargs)
Create the Event TI object. Args: name: **kwargs: Return:
juraj-google-style
def get_column(column_name, node, context): column = try_get_column(column_name, node, context) if (column is None): selectable = get_node_selectable(node, context) raise AssertionError(u'Column "{}" not found in selectable "{}". Columns present are {}. Context is {}.'.format(column_name, selectable.original, [col.name for col in selectable.c], context)) return column
Get a column by name from the selectable. Args: column_name: str, name of the column to retrieve. node: SqlNode, the node the column is being retrieved for. context: CompilationContext, compilation specific metadata. Returns: column, the SQLAlchemy column if found. Raises an AssertionError otherwise.
codesearchnet
def get_shifted_center_blocks(x, indices): center_x = gather_blocks_2d(x, indices) def shift_right_2d_blocks(x): shifted_targets = ( tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :, :-1, :]) return shifted_targets x_shifted = shift_right_2d_blocks(center_x) return x_shifted
Get right shifted blocks for masked local attention 2d. Args: x: A tensor with shape [batch, heads, height, width, depth] indices: The indices to gather blocks Returns: x_shifted: a tensor of extracted blocks, each block right shifted along length.
juraj-google-style
def _get_id_token_user(token, issuers, audiences, allowed_client_ids, time_now, cache): for issuer_key, issuer in issuers.items(): issuer_cert_uri = convert_jwks_uri(issuer.jwks_uri) try: parsed_token = _verify_signed_jwt_with_certs( token, time_now, cache, cert_uri=issuer_cert_uri) except Exception: _logger.debug( 'id_token verification failed for issuer %s', issuer_key, exc_info=True) continue issuer_values = _listlike_guard(issuer.issuer, 'issuer', log_warning=False) if isinstance(audiences, _Mapping): audiences = audiences[issuer_key] if _verify_parsed_token( parsed_token, issuer_values, audiences, allowed_client_ids, is_legacy_google_auth=(issuer.issuer == _ISSUERS)): email = parsed_token['email'] return users.User(email)
Get a User for the given id token, if the token is valid. Args: token: The id_token to check. issuers: dict of Issuers audiences: List of audiences that are acceptable. allowed_client_ids: List of client IDs that are acceptable. time_now: The current time as a long (eg. long(time.time())). cache: Cache to use (eg. the memcache module). Returns: A User if the token is valid, None otherwise.
juraj-google-style
def create_asymmetric_key_pair(self, algorithm, length): if (algorithm not in self._asymmetric_key_algorithms.keys()): raise exceptions.InvalidField('The cryptographic algorithm ({0}) is not a supported asymmetric key algorithm.'.format(algorithm)) engine_method = self._asymmetric_key_algorithms.get(algorithm) return engine_method(length)
Create an asymmetric key pair. Args: algorithm(CryptographicAlgorithm): An enumeration specifying the algorithm for which the created keys will be compliant. length(int): The length of the keys to be created. This value must be compliant with the constraints of the provided algorithm. Returns: dict: A dictionary containing the public key data, with at least the following key/value fields: * value - the bytes of the key * format - a KeyFormatType enumeration for the bytes format dict: A dictionary containing the private key data, identical in structure to the one above. Raises: InvalidField: Raised when the algorithm is unsupported or the length is incompatible with the algorithm. CryptographicFailure: Raised when the key generation process fails. Example: >>> engine = CryptographyEngine() >>> key = engine.create_asymmetric_key( ... CryptographicAlgorithm.RSA, 2048)
codesearchnet
def CreateStyleFromConfig(style_config): def GlobalStyles(): for style, _ in _DEFAULT_STYLE_TO_FACTORY: yield style def_style = False if style_config is None: for style in GlobalStyles(): if _style == style: def_style = True break if not def_style: return _style return _GLOBAL_STYLE_FACTORY() if isinstance(style_config, dict): config = _CreateConfigParserFromConfigDict(style_config) elif isinstance(style_config, str): style_factory = _STYLE_NAME_TO_FACTORY.get(style_config.lower()) if style_factory is not None: return style_factory() if style_config.startswith('{'): config = _CreateConfigParserFromConfigString(style_config) else: config = _CreateConfigParserFromConfigFile(style_config) return _CreateStyleFromConfigParser(config)
Create a style dict from the given config. Arguments: style_config: either a style name or a file name. The file is expected to contain settings. It can have a special BASED_ON_STYLE setting naming the style which it derives from. If no such setting is found, it derives from the default style. When style_config is None, the _GLOBAL_STYLE_FACTORY config is created. Returns: A style dict. Raises: StyleConfigError: if an unknown style option was encountered.
github-repos
def mb45(msg): d = hex2bin(data(msg)) if d[6] == '0': return None mb = bin2int(d[7:9]) return mb
Microburst. Args: msg (String): 28 bytes hexadecimal message string Returns: int: Microburst level. 0=NIL, 1=Light, 2=Moderate, 3=Severe
juraj-google-style
def find_kv(pcoll, regex, keyGroup, valueGroup=0): regex = Regex._regex_compile(regex) def _process(element): matches = regex.finditer(element) if matches: for match in matches: yield (match.group(keyGroup), match.group(valueGroup)) return pcoll | FlatMap(_process)
Returns the matches if a portion of the line matches the Regex. Returns the specified groups as the key and value pair. Args: regex: the regular expression string or (re.compile) pattern. keyGroup: The Regex group to use as the key. Can be int or str. valueGroup: (optional) Regex group to use the value. Can be int or str. The default value "0" returns entire matched string.
github-repos
def _frame_advance(self, action): self.controllers[0][:] = action _LIB.Step(self._env)
Advance a frame in the emulator with an action. Args: action (byte): the action to press on the joy-pad Returns: None
juraj-google-style
def _CreateClassTemplate(cls, data_type_definition): type_name = data_type_definition.name type_description = (data_type_definition.description or type_name) while type_description.endswith('.'): type_description = type_description[:(- 1)] class_attributes_description = [] init_arguments = [] instance_attributes = [] for member_definition in data_type_definition.members: attribute_name = member_definition.name description = (member_definition.description or attribute_name) while description.endswith('.'): description = description[:(- 1)] member_data_type = getattr(member_definition, 'member_data_type', '') if isinstance(member_definition, data_types.MemberDataTypeDefinition): member_definition = member_definition.member_data_type_definition member_type_indicator = member_definition.TYPE_INDICATOR if (member_type_indicator == definitions.TYPE_INDICATOR_SEQUENCE): element_type_indicator = member_definition.element_data_type member_type_indicator = 'tuple[{0:s}]'.format(element_type_indicator) else: member_type_indicator = cls._PYTHON_NATIVE_TYPES.get(member_type_indicator, member_data_type) argument = '{0:s}=None'.format(attribute_name) definition = ' self.{0:s} = {0:s}'.format(attribute_name) description = ' {0:s} ({1:s}): {2:s}.'.format(attribute_name, member_type_indicator, description) class_attributes_description.append(description) init_arguments.append(argument) instance_attributes.append(definition) class_attributes_description = '\n'.join(sorted(class_attributes_description)) init_arguments = ', '.join(init_arguments) instance_attributes = '\n'.join(sorted(instance_attributes)) template_values = {'class_attributes_description': class_attributes_description, 'init_arguments': init_arguments, 'instance_attributes': instance_attributes, 'type_description': type_description, 'type_name': type_name} return cls._CLASS_TEMPLATE.format(**template_values)
Creates the class template. Args: data_type_definition (DataTypeDefinition): data type definition. Returns: str: class template.
codesearchnet
def matches(pcoll, regex, group=0): regex = Regex._regex_compile(regex) def _process(element): m = regex.match(element) if m: yield m.group(group) return pcoll | FlatMap(_process)
Returns the matches (group 0 by default) if zero or more characters at the beginning of string match the regular expression. To match the entire string, add "$" sign at the end of regex expression. Group can be integer value or a string value. Args: regex: the regular expression string or (re.compile) pattern. group: (optional) name/number of the group, it can be integer or a string value. Defaults to 0, meaning the entire matched string will be returned.
github-repos
def usufyToJsonExport(d, fPath): oldData = [] try: with open(fPath) as iF: oldText = iF.read() if (oldText != ''): oldData = json.loads(oldText) except: pass jsonText = json.dumps((oldData + d), indent=2, sort_keys=True) with open(fPath, 'w') as oF: oF.write(jsonText)
Workaround to export to a json file. Args: ----- d: Data to export. fPath: File path for the output file.
codesearchnet
def _make_nonce(self): chars = (string.digits + string.ascii_letters) nonce = ''.join((random.choice(chars) for i in range(25))) if self._logging: utils.log(('nonce created: %s' % nonce)) return nonce
Generate a unique ID for the request, 25 chars in length Returns: - str: Cryptographic nonce
codesearchnet
def source_file_list(self): return tuple(self._host_name_file_path_to_offset.keys())
Get a list of source files known to the debugger data reader. Returns: A tuple of `(host_name, file_path)` tuples.
github-repos
def execute(source, optimize=True, output=sys.stdout, input=sys.stdin, steps=-1): from crianza import compiler code = compiler.compile(parser.parse(source), optimize=optimize) machine = Machine(code, output=output, input=input) return machine.run(steps)
Compiles and runs program, returning the machine used to execute the code. Args: optimize: Whether to optimize the code after parsing it. output: Stream which program can write output to. input: Stream which program can read input from. steps: An optional maximum number of instructions to execute on the virtual machine. Set to -1 for no limit. Returns: A Machine instance.
juraj-google-style
def ascii_tree(self, no_types: bool = False, val_count: bool = False) -> str: return self.schema._ascii_tree("", no_types, val_count)
Generate ASCII art representation of the schema tree. Args: no_types: Suppress output of data type info. val_count: Show accumulated validation counts. Returns: String with the ASCII tree.
juraj-google-style
def _build(self, inputs): batch_size = inputs.get_shape()[0] output_sequence, _ = tf.nn.dynamic_rnn( cell=self._core, inputs=inputs, time_major=False, initial_state=self._core.initial_state( batch_size, trainable=False) ) outputs = snt.BatchFlatten()(output_sequence[:, -1, :]) outputs = self._final_mlp(outputs) logits = snt.Linear(self._target_size)(outputs) return logits
Dynamic unroll across input objects. Args: inputs: tensor (batch x num_objects x feature). Objects to sort. Returns: Tensor (batch x num_objects); logits indicating the reference objects.
juraj-google-style
def group_molecules(self, mol_list): mol_hash = [(i, self._mapper.get_molecule_hash(m)) for i, m in enumerate(mol_list)] mol_hash.sort(key=lambda x: x[1]) raw_groups = tuple([tuple([m[0] for m in g]) for k, g in itertools.groupby(mol_hash, key=lambda x: x[1])]) group_indices = [] for rg in raw_groups: mol_eq_test = [(p[0], p[1], self.fit(mol_list[p[0]], mol_list[p[1]])) for p in itertools.combinations(sorted(rg), 2)] mol_eq = set([(p[0], p[1]) for p in mol_eq_test if p[2]]) not_alone_mols = set(itertools.chain.from_iterable(mol_eq)) alone_mols = set(rg) - not_alone_mols group_indices.extend([[m] for m in alone_mols]) while len(not_alone_mols) > 0: current_group = {not_alone_mols.pop()} while len(not_alone_mols) > 0: candidate_pairs = set( [tuple(sorted(p)) for p in itertools.product(current_group, not_alone_mols)]) mutual_pairs = candidate_pairs & mol_eq if len(mutual_pairs) == 0: break mutual_mols = set(itertools.chain .from_iterable(mutual_pairs)) current_group |= mutual_mols not_alone_mols -= mutual_mols group_indices.append(sorted(current_group)) group_indices.sort(key=lambda x: (len(x), -x[0]), reverse=True) all_groups = [[mol_list[i] for i in g] for g in group_indices] return all_groups
Group molecules by structural equality. Args: mol_list: List of OpenBabel OBMol or pymatgen objects Returns: A list of lists of matched molecules Assumption: if s1=s2 and s2=s3, then s1=s3 This may not be true for small tolerances.
juraj-google-style
def docx_table_from_xml_node(table_node: ElementTree.Element, level: int, config: TextProcessingConfig) -> str: table = CustomDocxTable() for row_node in table_node: if (row_node.tag != DOCX_TABLE_ROW): continue table.new_row() for cell_node in row_node: if (cell_node.tag != DOCX_TABLE_CELL): continue table.new_cell() for para_node in cell_node: text = docx_text_from_xml_node(para_node, level, config) if text: table.add_paragraph(text) return docx_process_table(table, config)
Converts an XML node representing a DOCX table into a textual representation. Args: table_node: XML node level: current level in XML hierarchy (used for recursion; start level is 0) config: :class:`TextProcessingConfig` control object Returns: string representation
codesearchnet
def section(self, section_title): section = '== {0} =='.format(section_title) try: content = self.content index = (content.index(section) + len(section)) while True: if (content[(index + 1)] == '='): index += 1 else: break except ValueError: return None except IndexError: pass try: next_index = self.content.index('==', index) except ValueError: next_index = len(self.content) return self.content[index:next_index].lstrip('=').strip()
Plain text section content Args: section_title (str): Name of the section to pull Returns: str: The content of the section Note: Returns **None** if section title is not found; only text \ between title and next section or sub-section title is returned Note: Side effect is to also pull the content which can be slow Note: This is a parsing operation and not part of the standard API
codesearchnet
def atomic_write_string_to_file(filename, contents, overwrite=True): if not has_atomic_move(filename): write_string_to_file(filename, contents) else: temp_pathname = filename + '.tmp' + uuid.uuid4().hex write_string_to_file(temp_pathname, contents) try: rename(temp_pathname, filename, overwrite) except errors.OpError: delete_file(temp_pathname) raise
Writes to `filename` atomically. This means that when `filename` appears in the filesystem, it will contain all of `contents`. With write_string_to_file, it is possible for the file to appear in the filesystem with `contents` only partially written. Accomplished by writing to a temp file and then renaming it. Args: filename: string, pathname for a file contents: string, contents that need to be written to the file overwrite: boolean, if false it's an error for `filename` to be occupied by an existing file.
github-repos
def baseline_optimizer_arguments(self, states, internals, reward): arguments = dict( time=self.global_timestep, variables=self.baseline.get_variables(), arguments=dict( states=states, internals=internals, reward=reward, update=tf.constant(value=True), ), fn_reference=self.baseline.reference, fn_loss=self.fn_baseline_loss, ) if self.global_model is not None: arguments['global_variables'] = self.global_model.baseline.get_variables() return arguments
Returns the baseline optimizer arguments including the time, the list of variables to optimize, and various functions which the optimizer might require to perform an update step. Args: states: Dict of state tensors. internals: List of prior internal state tensors. reward: Reward tensor. Returns: Baseline optimizer arguments as dict.
juraj-google-style
def resetAndRejoin(self, timeout): print '%s call resetAndRejoin' % self.port print timeout try: self._sendline('reset') self.isPowerDown = True time.sleep(timeout) if self.deviceRole == Thread_Device_Role.SED: self.setPollingRate(self.sedPollingRate) self.__startOpenThread() time.sleep(3) if self.__sendCommand('state')[0] == 'disabled': print '[FAIL] reset and rejoin' return False return True except Exception, e: ModuleHelper.WriteIntoDebugLogger("resetAndRejoin() Error: " + str(e))
reset and join back Thread Network with a given timeout delay Args: timeout: a timeout interval before rejoin Thread Network Returns: True: successful to reset and rejoin Thread Network False: fail to reset and rejoin the Thread Network
juraj-google-style
def get_torque_state(self): data = [] data.append(9) data.append(self.servoid) data.append(RAM_READ_REQ) data.append(TORQUE_CONTROL_RAM) data.append(BYTE2) send_data(data) rxdata = [] try: rxdata = SERPORT.read(13) return bool(ord(rxdata[9])) except HerkulexError: raise HerkulexError('could not communicate with motors')
get the torque state of motor Returns: bool: True if torque is enabled, else False
codesearchnet
def teredo(self): if ((self._ip >> 96) != 536936448): return None return (IPv4Address(((self._ip >> 64) & 4294967295)), IPv4Address(((~ self._ip) & 4294967295)))
Tuple of embedded teredo IPs. Returns: Tuple of the (server, client) IPs or None if the address doesn't appear to be a teredo address (doesn't start with 2001::/32)
codesearchnet
def set_presence(self, state=None, status=None, priority=None): state = state if state is not None else self.state status = status if status is not None else self.status priority = priority if priority is not None else self.priority self.presenceserver.set_presence(state, status, priority)
Change the presence broadcast by the client. If the client is currently connected, the new presence is broadcast immediately. Args: state(aioxmpp.PresenceState, optional): New presence state to broadcast (Default value = None) status(dict or str, optional): New status information to broadcast (Default value = None) priority (int, optional): New priority for the resource (Default value = None)
juraj-google-style
def _mac(model, obs, h): (B, n_agents) = (obs.size(0), obs.size(1)) obs_flat = obs.reshape([(B * n_agents), (- 1)]) h_flat = [s.reshape([(B * n_agents), (- 1)]) for s in h] (q_flat, _, _, h_flat) = model.forward({'obs': obs_flat}, h_flat) return (q_flat.reshape([B, n_agents, (- 1)]), [s.reshape([B, n_agents, (- 1)]) for s in h_flat])
Forward pass of the multi-agent controller. Arguments: model: TorchModel class obs: Tensor of shape [B, n_agents, obs_size] h: List of tensors of shape [B, n_agents, h_size] Returns: q_vals: Tensor of shape [B, n_agents, n_actions] h: Tensor of shape [B, n_agents, h_size]
codesearchnet
def authenticate(self, code: str) -> 'Preston': headers = self._get_authorization_headers() data = {'grant_type': 'authorization_code', 'code': code} r = self.session.post(self.TOKEN_URL, headers=headers, data=data) if (not (r.status_code == 200)): raise Exception(f'Could not authenticate, got repsonse code {r.status_code}') new_kwargs = dict(self._kwargs) response_data = r.json() new_kwargs['access_token'] = response_data['access_token'] new_kwargs['access_expiration'] = (time.time() + float(response_data['expires_in'])) new_kwargs['refresh_token'] = response_data['refresh_token'] return Preston(**new_kwargs)
Authenticates using the code from the EVE SSO. A new Preston object is returned; this object is not modified. The intended usage is: auth = preston.authenticate('some_code_here') Args: code: SSO code Returns: new Preston, authenticated
codesearchnet
def create(self, path, mime_type='application/octet-stream', compression_type=CompressionTypes.AUTO) -> BinaryIO: raise NotImplementedError
Returns a write channel for the given file path. Args: path: string path of the file object to be written to the system mime_type: MIME type to specify the type of content in the file object compression_type: Type of compression to be used for this object Returns: file handle with a close function for the user to use
github-repos
def sign(x): if any_symbolic_tensors((x,)): return Sign().symbolic_call(x) return backend.numpy.sign(x)
Returns a tensor with the signs of the elements of `x`. Args: x: Input tensor. Returns: Output tensor of same shape as `x`.
github-repos
def report_schema(headers): schema = [] for header_name in headers: header_sanitized = column_header_sanitize(header_name) header_type = DCM_Field_Lookup.get(header_sanitized) if header_type is None: for field_name, field_type in DCM_Field_Lookup.items(): if header_sanitized.endswith('_' + field_name): header_type = field_type break if header_type is None: header_type = 'STRING' schema.append({'name': header_sanitized, 'type': header_type, 'mode': 'NULLABLE'}) return schema
Helper to determine the schema of a given set of report headers. Using a match table generated from the DCM proto, each report header is matched to its type and a schema is assembled. If not found defaults to STRING. Usage example: ``` filename, report = report_file(...) rows = report_to_rows(report) rows = report_clean(rows) schema = report_schema(next(rows)) ``` Args: * headers: (list) First row of a report. Returns: * JSON schema definition.
github-repos
def uninstalled(name): ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} old = __salt__['flatpak.is_installed'](name) if (not old): ret['comment'] = 'Package {0} is not installed'.format(name) ret['result'] = True return ret else: if __opts__['test']: ret['comment'] = 'Package {0} would have been uninstalled'.format(name) ret['changes']['old'] = old[0]['version'] ret['changes']['new'] = None ret['result'] = None return ret __salt__['flatpak.uninstall'](name) if (not __salt__['flatpak.is_installed'](name)): ret['comment'] = 'Package {0} uninstalled'.format(name) ret['changes']['old'] = old[0]['version'] ret['changes']['new'] = None ret['result'] = True return ret
Ensure that the named package is not installed. Args: name (str): The flatpak package. Returns: dict: The ``result`` and ``output``. Example: .. code-block:: yaml uninstall_package: flatpack.uninstalled: - name: gimp
codesearchnet
def save_state_regularly(self, fname, frequency=600): self.save_state(fname) loop = asyncio.get_event_loop() self.save_state_loop = loop.call_later(frequency, self.save_state_regularly, fname, frequency)
Save the state of node with a given regularity to the given filename. Args: fname: File name to save retularly to frequency: Frequency in seconds that the state should be saved. By default, 10 minutes.
codesearchnet
def __init__(self, context): self._histograms_plugin = histograms_plugin.HistogramsPlugin(context) self._multiplexer = context.multiplexer
Instantiates DistributionsPlugin via TensorBoard core. Args: context: A base_plugin.TBContext instance.
juraj-google-style
def _is_every_steps(self, phase_step, batch, every): if (not every): return False covered_steps = range(phase_step, (phase_step + batch)) return any(((((step + 1) % every) == 0) for step in covered_steps))
Determine whether a periodic event should happen at this step. Args: phase_step: The incrementing step. batch: The number of steps progressed at once. every: The interval of the period. Returns: Boolean of whether the event should happen.
codesearchnet
def _response_good(self, respond): if respond.status_code != requests.codes.ok: log.warning('Got a {} code response to {}: {}'.format( respond.status_code, respond.url, respond.text)) if respond.status_code in self.errorsNotRetry: raise ApiError(usr_msg='Got a {} code response to {}: {}'.format( respond.status_code, respond.url, respond.text)) else: return self._parse_response(respond) try: if (str(respond.headers['content-type']).startswith("text/html;")): self.result = respond.text return True else: self.result = respond.json() except (json.JSONDecodeError, ValueError): usr_msg = 'device server returned unexpected http response' dev_msg = usr_msg + ': ' + respond.text raise ApiError(usr_msg=usr_msg, dev_msg=dev_msg) if not isinstance(self.result, (list, dict)): msg = ('JSON not a list or dict: url: {0},' 'status: {1}, reason: {2}, text: {3}') raise ApiError( usr_msg=msg.format(respond.url, respond.status_code, respond.reason, respond.text)) if ('error' not in self.result or ('status' not in self.result['error'] or self.result['error']['status'] != 400)): return True else: log.warning("Got a 400 code JSON response to %s", respond.url) return False
check response Args: respond (str): HTTP response. Returns: bool: True if the response is good, else False. Raises: ApiError: response isn't formatted properly.
juraj-google-style
def get_cols_to_keep(gctoo, cid=None, col_bool=None, cidx=None, exclude_cid=None): if (cid is not None): assert (type(cid) == list), 'cid must be a list. cid: {}'.format(cid) cols_to_keep = [gctoo_col for gctoo_col in gctoo.data_df.columns if (gctoo_col in cid)] num_missing_cids = (len(cid) - len(cols_to_keep)) if (num_missing_cids != 0): logger.info('{} cids were not found in the GCT.'.format(num_missing_cids)) elif (col_bool is not None): assert (len(col_bool) == gctoo.data_df.shape[1]), ('col_bool must have length equal to gctoo.data_df.shape[1]. ' + 'len(col_bool): {}, gctoo.data_df.shape[1]: {}'.format(len(col_bool), gctoo.data_df.shape[1])) cols_to_keep = gctoo.data_df.columns[col_bool].values elif (cidx is not None): assert (type(cidx[0]) is int), ('cidx must be a list of integers. cidx[0]: {}, ' + 'type(cidx[0]): {}').format(cidx[0], type(cidx[0])) assert (max(cidx) <= gctoo.data_df.shape[1]), ('cidx contains an integer larger than the number of columns in ' + 'the GCToo. max(cidx): {}, gctoo.data_df.shape[1]: {}').format(max(cidx), gctoo.data_df.shape[1]) cols_to_keep = gctoo.data_df.columns[cidx].values else: cols_to_keep = gctoo.data_df.columns.values if (exclude_cid is not None): cols_to_keep = [col_to_keep for col_to_keep in cols_to_keep if (col_to_keep not in exclude_cid)] return cols_to_keep
Figure out based on the possible columns inputs which columns to keep. Args: gctoo (GCToo object): cid (list of strings): col_bool (boolean array): cidx (list of integers): exclude_cid (list of strings): Returns: cols_to_keep (list of strings): col ids to be kept
codesearchnet
def Environ(variable, default): precondition.AssertType(variable, Text) value = os.environ.get(variable, default) if value is None: return default if PY2: value = value.decode("utf-8") return value
A wrapper for `os.environ.get` that works the same way in both Pythons. Args: variable: A name of the variable to get the value of. default: A default value to return in case no value for the given variable is set. Returns: An environment value of the given variable.
juraj-google-style
def update_state(self, state_arr, action_arr): x, y = np.where(action_arr[-1] == 1) self.__agent_pos = (x[0], y[0]) self.__route_memory_list.append((x[0], y[0])) self.__route_long_memory_list.append((x[0], y[0])) self.__route_long_memory_list = list(set(self.__route_long_memory_list)) while len(self.__route_memory_list) > self.__memory_num: self.__route_memory_list = self.__route_memory_list[1:] return self.extract_now_state()
Update state. Override. Args: state_arr: `np.ndarray` of state in `self.t`. action_arr: `np.ndarray` of action in `self.t`. Returns: `np.ndarray` of state in `self.t+1`.
juraj-google-style
class NotebookTrainingTracker(NotebookProgressBar): def __init__(self, num_steps, column_names=None): super().__init__(num_steps) self.inner_table = None if column_names is None else [column_names] self.child_bar = None def display(self): self.html_code = html_progress_bar(self.value, self.total, self.prefix, self.label, self.width) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: self.output = disp.display(disp.HTML(self.html_code), display_id=True) else: self.output.update(disp.HTML(self.html_code)) def write_line(self, values): if self.inner_table is None: self.inner_table = [list(values.keys()), list(values.values())] else: columns = self.inner_table[0] for key in values.keys(): if key not in columns: columns.append(key) self.inner_table[0] = columns if len(self.inner_table) > 1: last_values = self.inner_table[-1] first_column = self.inner_table[0][0] if last_values[0] != values[first_column]: self.inner_table.append([values[c] if c in values else 'No Log' for c in columns]) else: new_values = values for c in columns: if c not in new_values.keys(): new_values[c] = last_values[columns.index(c)] self.inner_table[-1] = [new_values[c] for c in columns] else: self.inner_table.append([values[c] for c in columns]) def add_child(self, total, prefix=None, width=300): self.child_bar = NotebookProgressBar(total, prefix=prefix, parent=self, width=width) return self.child_bar def remove_child(self): self.child_bar = None self.display()
An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics. Args: num_steps (`int`): The number of steps during training. column_names (`List[str]`, *optional*): The list of column names for the metrics table (will be inferred from the first call to [`~utils.notebook.NotebookTrainingTracker.write_line`] if not set).
github-repos
def setMood(self, mood): self.conn('POST', '{0}/users/{1}/profile/partial'.format(SkypeConnection.API_USER, self.userId), auth=SkypeConnection.Auth.SkypeToken, json={'payload': {'mood': (mood or '')}}) self.user.mood = (SkypeUser.Mood(plain=mood) if mood else None)
Update the activity message for the current user. Args: mood (str): new mood message
codesearchnet
def node_filter(self, name, **kwargs): def decorator(func): self.filters[name] = NodeFilter(name, func, **kwargs) return decorator
Returns a decorator function for adding a node filter. Args: name (str): The name of the filter. **kwargs: Variable keyword arguments for the filter. Returns: Callable[[Callable[[Element, Any], bool]]]: A decorator function for adding a node filter.
juraj-google-style
def forward(self, inputs, expert_size): input_list = inputs.split(expert_size, dim=0) output_list = [] for i in range(self.num_experts): output_list.append(F.linear(input_list[i], self.weight[i])) results = torch.cat(output_list, dim=0) return results
Forward pass of the GraniteMoeSharedParallelExperts module. Args: inputs (Tensor): Input tensor. expert_size: Expert size information. Returns: Tensor: Output tensor.
github-repos
def _sync_call(self, func): @functools.wraps(func) def wrapper(*args, **kwargs): if self._start_msg: self._start_print() result = func(*args, **kwargs) if self._end_msg: print(self._end_msg) return result setattr(wrapper, ANNOTATED, True) return wrapper
__call__ function for regular synchronous functions. Args: func: The annotated function. args: Arguments for func. kwargs: Keyword arguments for func.
juraj-google-style
def get_data_excel_xml(file_name, file_contents=None, on_demand=False): if file_contents: xml_file = BytesIO(file_contents) else: xml_file = file_name book = xmlparse.ParseExcelXMLFile(xml_file) row_builder = lambda s, r: list(s.row_values(r)) return [XMLSheetYielder(book, index, row_builder) for index in range(len(book))]
Loads xml excel format files. Args: file_name: The name of the local file, or the holder for the extension type when the file_contents are supplied. file_contents: The file-like object holding contents of file_name. If left as None, then file_name is directly loaded. on_demand: Requests that a yielder be used in place of a full data copy (will be ignored).
juraj-google-style
def GenerateLibSig(short_name): with _UTILITY_LOCK: utilities_used = ', '.join([utility for utility in sorted(_utility_registry)]) _utility_registry.Clear() if utilities_used: return ' (%s, %s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION, utilities_used) else: return ' (%s, %s, %s)' % (short_name, _COMMON_LIB_SIG, _PYTHON_VERSION)
Generates a library signature suitable for a user agent field. Args: short_name: The short, product-specific string name for the library. Returns: A library signature string to append to user-supplied user-agent value.
juraj-google-style
def merge(self, other_cluster): new_cluster = Cluster((self.sites | other_cluster.sites)) new_cluster.neighbours = (self.neighbours | other_cluster.neighbours).difference(new_cluster.sites) return new_cluster
Combine two clusters into a single cluster. Args: other_cluster (Cluster): The second cluster to combine. Returns: (Cluster): The combination of both clusters.
codesearchnet
def ndtri(p, name='ndtri'): with tf.name_scope(name): p = tf.convert_to_tensor(value=p, name='p') if (dtype_util.as_numpy_dtype(p.dtype) not in [np.float32, np.float64]): raise TypeError(('p.dtype=%s is not handled, see docstring for supported types.' % p.dtype)) return _ndtri(p)
The inverse of the CDF of the Normal distribution function. Returns x such that the area under the pdf from minus infinity to x is equal to p. A piece-wise rational approximation is done for the function. This is a port of the implementation in netlib. Args: p: `Tensor` of type `float32`, `float64`. name: Python string. A name for the operation (default="ndtri"). Returns: x: `Tensor` with `dtype=p.dtype`. Raises: TypeError: if `p` is not floating-type.
codesearchnet
def ParseFileObject(self, parser_mediator, file_object): filename = parser_mediator.GetFilename() if (not self._CACHE_FILENAME_RE.match(filename) and not filename.startswith('_CACHE_00')): raise errors.UnableToParseFile('Not a Firefox cache1 file.') display_name = parser_mediator.GetDisplayName() firefox_config = self._GetFirefoxConfig(file_object, display_name) file_object.seek(firefox_config.first_record_offset) while file_object.get_offset() < file_object.get_size(): try: self._ParseCacheEntry( parser_mediator, file_object, display_name, firefox_config.block_size) except IOError: file_offset = file_object.get_offset() - self._MINIMUM_BLOCK_SIZE logger.debug(( '[{0:s}] Invalid cache record in file: {1:s} at offset: ' '{2:d}.').format(self.NAME, display_name, file_offset))
Parses a Firefox cache file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
juraj-google-style
async def set(self, name, valu, init=False): with s_editatom.EditAtom(self.snap.core.bldgbuids) as editatom: retn = await self._setops(name, valu, editatom, init) if not retn: return False await editatom.commit(self.snap) return True
Set a property on the node. Args: name (str): The name of the property. valu (obj): The value of the property. init (bool): Set to True to disable read-only enforcement Returns: (bool): True if the property was changed.
juraj-google-style
def enable_napps(cls, napps): mgr = NAppsManager() for napp in napps: mgr.set_napp(*napp) LOG.info('NApp %s:', mgr.napp_id) cls.enable_napp(mgr)
Enable a list of NApps. Args: napps (list): List of NApps.
juraj-google-style
def parse_longitude(longitude, hemisphere): longitude = int(longitude[:3]) + float(longitude[3:]) / 60 if hemisphere == 'W': longitude = -longitude elif not hemisphere == 'E': raise ValueError('Incorrect North/South value %r' % hemisphere) return longitude
Parse a NMEA-formatted longitude pair. Args: longitude (str): Longitude in DDDMM.MMMM hemisphere (str): East or West Returns: float: Decimal representation of longitude
juraj-google-style
def usergroups_disable(self, *, usergroup: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"usergroup": usergroup}) return self.api_call("usergroups.disable", json=kwargs)
Disable an existing User Group Args: usergroup (str): The encoded ID of the User Group to disable. e.g. 'S0604QSJC'
juraj-google-style
def write_fasta_file(seq_records, outname, outdir=None, outext='.faa', force_rerun=False): if not outdir: outdir = '' outfile = ssbio.utils.outfile_maker(inname='', outname=outname, outdir=outdir, outext=outext) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): SeqIO.write(seq_records, outfile, "fasta") return outfile
Write a FASTA file for a SeqRecord or a list of SeqRecord objects. Args: seq_records (SeqRecord, list): SeqRecord or a list of SeqRecord objects outname: Name of the output file which will have outext appended to it outdir: Path to directory to output sequences to outext: Extension of FASTA file, default ".faa" force_rerun: If file should be overwritten if it exists Returns: str: Path to output FASTA file.
juraj-google-style
def _instruction_list(self, filters): return '\n\n'.join(([self.INSTRUCTIONS.strip(), '*Supported methods:*', 'If you send "@{}: help" to me I reply with these instructions.'.format(self.user), 'If you send "@{}: version" to me I reply with my current version.'.format(self.user)] + [filter.description() for filter in filters]))
Generates the instructions for a bot and its filters. Note: The guidance for each filter is generated by combining the docstrings of the predicate filter and resulting dispatch function with a single space between. The class's :py:attr:`INSTRUCTIONS` and the default help command are added. Arguments: filters (:py:class:`list`): The filters to apply to incoming messages. Returns: :py:class:`str`: The bot's instructions.
codesearchnet
def append_flag_values(self, flag_values): for flag_name, flag in six.iteritems(flag_values._flags()): if flag_name == flag.name: try: self[flag_name] = flag except _exceptions.DuplicateFlagError: raise _exceptions.DuplicateFlagError.from_flag( flag_name, self, other_flag_values=flag_values)
Appends flags registered in another FlagValues instance. Args: flag_values: FlagValues, the FlagValues instance from which to copy flags.
juraj-google-style
def listTemplates(data={}): conn = Qubole.agent() url_path = Template.rest_entity_path page_attr = [] if (('page' in data) and (data['page'] is not None)): page_attr.append(('page=%s' % data['page'])) if (('per_page' in data) and (data['per_page'] is not None)): page_attr.append(('per_page=%s' % data['per_page'])) if page_attr: url_path = ('%s?%s' % (url_path, '&'.join(page_attr))) return conn.get(url_path)
Fetch existing Templates details. Args: `data`: dictionary containing the value of page number and per-page value Returns: Dictionary containing paging_info and command_templates details
codesearchnet
def clear_errors(): data = [] data.append(11) data.append(BROADCAST_ID) data.append(RAM_WRITE_REQ) data.append(STATUS_ERROR_RAM) data.append(BYTE2) data.append(0) data.append(0) send_data(data)
Clears the errors register of all Herkulex servos Args: none
codesearchnet
def s3_download(url, dst): url = parse.urlparse(url) if (url.scheme != 's3'): raise ValueError(("Expecting 's3' scheme, got: %s in %s" % (url.scheme, url))) (bucket, key) = (url.netloc, url.path.lstrip('/')) region = os.environ.get('AWS_REGION', os.environ.get(_params.REGION_NAME_ENV)) s3 = boto3.resource('s3', region_name=region) s3.Bucket(bucket).download_file(key, dst)
Download a file from S3. Args: url (str): the s3 url of the file. dst (str): the destination where the file will be saved.
codesearchnet
def __init__(self, scope, parent, name): CodeStatement.__init__(self, scope, parent) self.name = name self.condition = True self.body = CodeBlock(scope, self, explicit=False)
Constructor for control flow structures. Args: scope (CodeEntity): The program scope where this object belongs. parent (CodeEntity): This object's parent in the program tree. name (str): The name of the control flow statement in the program.
juraj-google-style
def _update_libdata(self, line): if re.match('^Comment.*$', line, re.IGNORECASE): comments = re.findall('"([^"]*)"', line) for c in comments: self._parse_meta_info(c) self._parse_compound_info(c) self._parse_meta_info(line) self._parse_compound_info(line) if self.collect_meta and (re.match('^Num Peaks(.*)$', line, re.IGNORECASE) or re.match('^PK\$PEAK:(.*)', line, re.IGNORECASE) or re.match('^PK\$ANNOTATION(.*)', line, re.IGNORECASE)): self._store_compound_info() self._store_meta_info() self.meta_info = get_blank_dict(self.meta_regex) self.compound_info = get_blank_dict(self.compound_regex) self.other_names = [] self.collect_meta = False if re.match('^PK\$PEAK: m/z int\. rel\.int\.$', line, re.IGNORECASE): self.ignore_additional_spectra_info = True if re.match('^Num Peaks(.*)$', line, re.IGNORECASE) or re.match('^PK\$PEAK:(.*)', line, re.IGNORECASE): self.start_spectra = True return elif re.match('^PK\$ANNOTATION(.*)', line, re.IGNORECASE): self.start_spectra_annotation = True match = re.match('^PK\$ANNOTATION:(.*)', line, re.IGNORECASE) columns = match.group(1) cl = columns.split() self.spectra_annotation_indexes = {i: cl.index(i) for i in cl} return if self.start_spectra_annotation: self._parse_spectra_annotation(line) if self.start_spectra: self._parse_spectra(line)
Update the library meta data from the current line being parsed Args: line (str): The current line of the of the file being parsed
juraj-google-style
def add_time(data): payload = data['data'] updated = data['updated'].date() if updated == date.today(): payload['last_updated'] = data['updated'].strftime('today at %H:%M:%S') elif updated >= (date.today() - timedelta(days=1)): payload['last_updated'] = 'yesterday' elif updated >= (date.today() - timedelta(days=7)): payload['last_updated'] = updated.strftime('on %A') else: payload['last_updated'] = updated.strftime('%Y-%m-%d') return payload
And a friendly update time to the supplied data. Arguments: data (:py:class:`dict`): The response data and its update time. Returns: :py:class:`dict`: The data with a friendly update time.
juraj-google-style
def _UpdateUserGroups(self, user, groups): groups = ','.join(groups) self.logger.debug('Updating user %s with groups %s.', user, groups) command = self.usermod_cmd.format(user=user, groups=groups) try: subprocess.check_call(command.split(' ')) except subprocess.CalledProcessError as e: self.logger.warning('Could not update user %s. %s.', user, str(e)) return False else: self.logger.debug('Updated user account %s.', user) return True
Update group membership for a Linux user. Args: user: string, the name of the Linux user account. groups: list, the group names to add the user as a member. Returns: bool, True if user update succeeded.
codesearchnet
def record(self, flat_outputs, inference_args, input_tangents): backward_function, to_record = self._wrap_backward_function(self._forward_graph, self._backward, flat_outputs) if self._forwardprop_output_indices: record.record_operation_backprop_only(self._forward.cached_definition.signature.name, to_record, inference_args, backward_function) record.record_operation_forwardprop_only(self._forward.cached_definition.signature.name, flat_outputs, inference_args + input_tangents, backward_function, self._forwardprop_output_indices) else: record.record_operation(self._forward.cached_definition.signature.name, to_record, inference_args + input_tangents, backward_function)
Record the function call operation. For backprop, indicates the backward function to use and which new Tensors must be watched. For forwardprop from eager, the function call itself will have produced tangents which need to be recorded. Args: flat_outputs: The result of running `forward`. inference_args: A flat list of Tensors with inference inputs to the operation. input_tangents: A flat list of Tensors with input tangents consumed by the operation.
github-repos
def list_vm_images_sub(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/images', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
List VM images in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of a list of VM images.
juraj-google-style
def __init__(self, sid=None): self.final = False self.initial = False self.stateid = sid self.arcs = []
Initialization function Args: sid (int): The state identifier Returns: None
juraj-google-style
def get_first(self, status): items = self.get_all(status) if items: return list(items.items())[0][1] return None
Get the first item in the queue that has the given status. Args: status (str): return the first item with this status. Returns: :class:`nyawc.QueueItem`: The first queue item with the given status.
juraj-google-style
def _maxSizeCheck(cls, obj): fail = False size = 0 if isinstance(obj, numbers.Number): if obj > constants.MAX_FRAME_SIZE: fail = True size = obj elif hasattr(obj, '__len__'): size = len(obj) fail = size > constants.MAX_FRAME_SIZE if fail: raise MaxSizeException('Frame size %s > %s (MAX_FRAME_SIZE)' \ % (size, constants.MAX_FRAME_SIZE))
Raise a MaxSizeException if ``obj`` exceeds MAX_FRAME_SIZE Args: obj (numbers.Number or collection): Raises: :class:`fileseq.exceptions.MaxSizeException`:
juraj-google-style
def accountValues(self, account: str = '') -> List[AccountValue]: if account: return [v for v in self.wrapper.accountValues.values() if v.account == account] else: return list(self.wrapper.accountValues.values())
List of account values for the given account, or of all accounts if account is left blank. Args: account: If specified, filter for this account name.
juraj-google-style
def generate(self, model, outfolder, *, exclude=None): with pythonic_names(): super().generate(model, outfolder) check_dependency = (self.with_dependencies and model.eResource) if check_dependency: if (exclude is None): exclude = set() resource = model.eResource exclude.add(resource) rset = resource.resource_set direct_resources = {r for r in rset.resources.values() if (r not in exclude)} for resource in direct_resources: self.generate(resource.contents[0], outfolder, exclude=exclude)
Generate model code. Args: model: The meta-model to generate code for. outfolder: Path to the directoty that will contain the generated code. exclude: List of referenced resources for which code was already generated (to prevent regeneration).
codesearchnet
def from_steps(step1, step2, normalization_els): working_ion_entry = step1["element_reference"] working_ion = working_ion_entry.composition.elements[0].symbol working_ion_valence = max(Element(working_ion).oxidation_states) voltage = (-step1["chempot"] + working_ion_entry.energy_per_atom)/working_ion_valence mAh = (step2["evolution"] - step1["evolution"]) \ * Charge(1, "e").to("C") * Time(1, "s").to("h") * N_A * 1000*working_ion_valence licomp = Composition(working_ion) prev_rxn = step1["reaction"] reactants = {comp: abs(prev_rxn.get_coeff(comp)) for comp in prev_rxn.products if comp != licomp} curr_rxn = step2["reaction"] products = {comp: abs(curr_rxn.get_coeff(comp)) for comp in curr_rxn.products if comp != licomp} reactants[licomp] = (step2["evolution"] - step1["evolution"]) rxn = BalancedReaction(reactants, products) for el, amt in normalization_els.items(): if rxn.get_el_amount(el) > 1e-6: rxn.normalize_to_element(el, amt) break prev_mass_dischg = sum([prev_rxn.all_comp[i].weight * abs(prev_rxn.coeffs[i]) for i in range(len(prev_rxn.all_comp))]) / 2 vol_charge = sum([abs(prev_rxn.get_coeff(e.composition)) * e.structure.volume for e in step1["entries"] if e.composition.reduced_formula != working_ion]) mass_discharge = sum([curr_rxn.all_comp[i].weight * abs(curr_rxn.coeffs[i]) for i in range(len(curr_rxn.all_comp))]) / 2 mass_charge = prev_mass_dischg mass_discharge = mass_discharge vol_discharge = sum([abs(curr_rxn.get_coeff(e.composition)) * e.structure.volume for e in step2["entries"] if e.composition.reduced_formula != working_ion]) totalcomp = Composition({}) for comp in prev_rxn.products: if comp.reduced_formula != working_ion: totalcomp += comp * abs(prev_rxn.get_coeff(comp)) frac_charge = totalcomp.get_atomic_fraction(Element(working_ion)) totalcomp = Composition({}) for comp in curr_rxn.products: if comp.reduced_formula != working_ion: totalcomp += comp * abs(curr_rxn.get_coeff(comp)) frac_discharge = totalcomp.get_atomic_fraction(Element(working_ion)) rxn = rxn entries_charge = step2["entries"] entries_discharge = step1["entries"] return ConversionVoltagePair(rxn, voltage, mAh, vol_charge, vol_discharge, mass_charge, mass_discharge, frac_charge, frac_discharge, entries_charge, entries_discharge, working_ion_entry)
Creates a ConversionVoltagePair from two steps in the element profile from a PD analysis. Args: step1: Starting step step2: Ending step normalization_els: Elements to normalize the reaction by. To ensure correct capacities.
juraj-google-style
def download_data_impl(self, run, tag, response_format): scalars_plugin_instance = self._get_scalars_plugin() if (not scalars_plugin_instance): raise ValueError('Failed to respond to request for /download_data. The scalars plugin is oddly not registered.') (body, mime_type) = scalars_plugin_instance.scalars_impl(tag, run, None, response_format) return (body, mime_type)
Provides a response for downloading scalars data for a data series. Args: run: The run. tag: The specific tag. response_format: A string. One of the values of the OutputFormat enum of the scalar plugin. Raises: ValueError: If the scalars plugin is not registered. Returns: 2 entities: - A JSON object response body. - A mime type (string) for the response.
codesearchnet
def search_stack_for_localvar(varname): curr_frame = inspect.currentframe() print((' * Searching parent frames for: ' + six.text_type(varname))) frame_no = 0 while (curr_frame.f_back is not None): if (varname in curr_frame.f_locals.keys()): print((' * Found in frame: ' + six.text_type(frame_no))) return curr_frame.f_locals[varname] frame_no += 1 curr_frame = curr_frame.f_back print((('... Found nothing in all ' + six.text_type(frame_no)) + ' frames.')) return None
Finds a local varable somewhere in the stack and returns the value Args: varname (str): variable name Returns: None if varname is not found else its value
codesearchnet
def apply(self, parent_environ=None): interpreter = Python(target_environ=os.environ) executor = self._create_executor(interpreter, parent_environ) self._execute(executor) interpreter.apply_environ()
Apply the context to the current python session. Note that this updates os.environ and possibly sys.path, if `parent_environ` is not provided. Args: parent_environ: Environment to interpret the context within, defaults to os.environ if None.
juraj-google-style
def remove_triple(self, p, o, auto_refresh=True): self.rdf.graph.remove((self.uri, p, self._handle_object(o))) self._handle_triple_refresh(auto_refresh)
remove triple by supplying p,o Args: p (rdflib.term.URIRef): predicate o (): object auto_refresh (bool): whether or not to update object-like self.rdf.triples Returns: None: removes triple from self.rdf.graph
juraj-google-style
class Pooling1D(Layer): def __init__(self, pool_function, pool_size, strides, padding='valid', data_format='channels_last', name=None, **kwargs): super(Pooling1D, self).__init__(name=name, **kwargs) if data_format is None: data_format = backend.image_data_format() if strides is None: strides = pool_size self.pool_function = pool_function self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size') self.strides = conv_utils.normalize_tuple(strides, 1, 'strides') self.padding = conv_utils.normalize_padding(padding) self.data_format = conv_utils.normalize_data_format(data_format) self.input_spec = InputSpec(ndim=3) def call(self, inputs): pad_axis = 2 if self.data_format == 'channels_last' else 3 inputs = array_ops.expand_dims(inputs, pad_axis) outputs = self.pool_function(inputs, self.pool_size + (1,), strides=self.strides + (1,), padding=self.padding, data_format=self.data_format) return array_ops.squeeze(outputs, pad_axis) def compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if self.data_format == 'channels_first': steps = input_shape[2] features = input_shape[1] else: steps = input_shape[1] features = input_shape[2] length = conv_utils.conv_output_length(steps, self.pool_size[0], self.padding, self.strides[0]) if self.data_format == 'channels_first': return tensor_shape.TensorShape([input_shape[0], features, length]) else: return tensor_shape.TensorShape([input_shape[0], length, features]) def get_config(self): config = {'strides': self.strides, 'pool_size': self.pool_size, 'padding': self.padding, 'data_format': self.data_format} base_config = super(Pooling1D, self).get_config() return dict(list(base_config.items()) + list(config.items()))
Pooling layer for arbitrary pooling functions, for 1D inputs. This class only exists for code reuse. It will never be an exposed API. Args: pool_function: The pooling function to apply, e.g. `tf.nn.max_pool2d`. pool_size: An integer or tuple/list of a single integer, representing the size of the pooling window. strides: An integer or tuple/list of a single integer, specifying the strides of the pooling operation. padding: A string. The padding method, either 'valid' or 'same'. Case-insensitive. data_format: A string, one of `channels_last` (default) or `channels_first`. The ordering of the dimensions in the inputs. `channels_last` corresponds to inputs with shape `(batch, steps, features)` while `channels_first` corresponds to inputs with shape `(batch, features, steps)`. name: A string, the name of the layer.
github-repos
def _create_position_ids_from_inputs_embeds(inputs_embeds: tf.Tensor, past_key_values_length: int, padding_idx: Optional[int]) -> tf.Tensor: input_shape = shape_list(inputs_embeds)[:-1] sequence_length = input_shape[1] position_ids = tf.range(padding_idx + 1, sequence_length + padding_idx + 1, dtype=tf.int64) return tf.broadcast_to(tf.expand_dims(position_ids, axis=0), input_shape) + past_key_values_length
Args: We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. inputs_embeds: tf.Tensor Returns: tf.Tensor
github-repos
def path_fraction_id_offset(points, fraction, relative_offset=False): if not (0. <= fraction <= 1.0): raise ValueError("Invalid fraction: %.3f" % fraction) pts = np.array(points)[:, COLS.XYZ] lengths = np.linalg.norm(np.diff(pts, axis=0), axis=1) cum_lengths = np.cumsum(lengths) offset = cum_lengths[-1] * fraction seg_id = np.argmin(cum_lengths < offset) if seg_id > 0: offset -= cum_lengths[seg_id - 1] if relative_offset: offset /= lengths[seg_id] return seg_id, offset
Find the segment which corresponds to the fraction of the path length along the piecewise linear curve which is constructed from the set of points. Args: points: an iterable of indexable objects with indices 0, 1, 2 correspoding to 3D cartesian coordinates fraction: path length fraction (0.0 <= fraction <= 1.0) relative_offset: return absolute or relative segment distance Returns: (segment ID, segment offset) pair.
juraj-google-style
def __logical_source__(self, map_iri): logical_source = SimpleNamespace() logical_src_bnode = self.rml.value( subject=map_iri, predicate=NS_MGR.rml.logicalSource.rdflib) if logical_src_bnode is None: return logical_source.source = self.rml.value( subject=logical_src_bnode, predicate=NS_MGR.rml.source.rdflib) logical_source.reference_formulations = [r for r in self.rml.objects( subject=logical_src_bnode, predicate=NS_MGR.rml.referenceFormulation.rdflib)] logical_source.iterator = self.rml.value( subject=logical_src_bnode, predicate=NS_MGR.rml.iterator.rdflib) query = self.rml.value( subject=logical_src_bnode, predicate=NS_MGR.rml.query.rdflib) json_query = self.rml.value( subject=logical_src_bnode, predicate=NS_MGR.rml.reference.rdflib) json_key = self.rml.value( subject=logical_src_bnode, predicate=NS_MGR.rml.key.rdflib) if query is not None: logical_source.query = query if json_query is not None: self.use_json_qry = True self.default_use_json_qry = True logical_source.json_query = json_query logical_source.json_key = json_key return logical_source
Creates a SimpleNamespace for the TripelMap's logicalSource Args: ----- map_iri: URIRef
juraj-google-style
def run_inference_on_image(image): if not tf.gfile.Exists(image): tf.logging.fatal('File does not exist %s', image) image_data = tf.gfile.FastGFile(image, 'rb').read() create_graph() with tf.Session() as sess: softmax_tensor = sess.graph.get_tensor_by_name('softmax:0') predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions) node_lookup = NodeLookup() top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1] for node_id in top_k: human_string = node_lookup.id_to_string(node_id) score = predictions[node_id] print('%s (score = %.5f)' % (human_string, score))
Runs inference on an image. Args: image: Image file name. Returns: Nothing
juraj-google-style
def sum(x, axis=None, keepdims=False): from .function_bases import sum as sum_base if axis is None: axis = range(x.ndim) elif not hasattr(axis, '__iter__'): axis = [axis] return sum_base(x, axis, keepdims)
Reduction along axes with sum operation. Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which the sum is calculated. Passing the default value `None` will reduce all dimensions. keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element. Returns: ~nnabla.Variable: N-D array.
juraj-google-style
def read_tree(input, schema): schema_to_function = {'dendropy': read_tree_dendropy, 'newick': read_tree_newick, 'nexml': read_tree_nexml, 'nexus': read_tree_nexus} if (schema.lower() not in schema_to_function): raise ValueError(('Invalid schema: %s (valid options: %s)' % (schema, ', '.join(sorted(schema_to_function.keys()))))) return schema_to_function[schema.lower()](input)
Read a tree from a string or file Args: ``input`` (``str``): Either a tree string, a path to a tree file (plain-text or gzipped), or a DendroPy Tree object ``schema`` (``str``): The schema of ``input`` (DendroPy, Newick, NeXML, or Nexus) Returns: * If the input is Newick, either a ``Tree`` object if ``input`` contains a single tree, or a ``list`` of ``Tree`` objects if ``input`` contains multiple trees (one per line) * If the input is NeXML or Nexus, a ``dict`` of trees represented by ``input``, where keys are tree names (``str``) and values are ``Tree`` objects
codesearchnet
def __init__(self, instruments = None, scripts = None, name=None, settings=None, log_function = None, data_path = None): super(ScriptDummyWrapper, self).__init__(self, name, settings, log_function= log_function, data_path=data_path)
Example of a script Args: name (optional): name of script, if empty same as class name settings (optional): settings for this script, if empty same as default settings
juraj-google-style
def add_key_value(self, key, value): if key == 'unique_id': self._unique_id = str(value) else: self._data[key] = value
Converts the value and adds it as a data field. Args: key: value:
juraj-google-style
def create_line_plot(df): fig = Figure("/mg/line_plot/", "mg_line_plot") fig.graphics.transition_on_update(True) fig.graphics.animate_on_load() fig.layout.set_size(width=450, height=200) fig.layout.set_margin(left=40, right=40) return LineChart(df, fig, "Date", ["value"], init_params={"Data": "Steps"}, timeseries=True)
create a mg line plot Args: df (pandas.DataFrame): data to plot
juraj-google-style
def _gen_save_and_restore_functions(checkpoint_factory_map: object_identity.ObjectIdentityDictionary) -> object_identity.ObjectIdentityDictionary: saveable_fn_map = object_identity.ObjectIdentityDictionary() for obj, factory_data_list in checkpoint_factory_map.items(): if resource_variable_ops.is_resource_variable(obj) or not factory_data_list: continue if factory_data_list[0].name == trackable_utils.SERIALIZE_TO_TENSORS_NAME: assert len(factory_data_list) == 1 saveable_fn_map[obj] = {trackable_utils.SERIALIZE_TO_TENSORS_NAME: tracing_utils.trace_save_and_restore(obj)} else: saveable_fn_map[obj] = trace_saveable_util.trace_save_restore_function_map(obj, factory_data_list) return saveable_fn_map
Generates global and individual save/restore concrete functions. The global functions records the ops to save and restore the entire object to a file prefix, while the individual functions save and restore value tensors for resources. This function is intended to run on the output of `save_util_v1.get_checkpoint_factories_and_keys(object_names)`, which returns the generated a map of `_CheckpointFactoryData`. Args: checkpoint_factory_map: A dictionary mapping trackable objects to a list of `_CheckpointFactoryData`. Returns: Tuple of ( saveable_fn_map: Maps obj -> factory name -> (concrete save, restore) )
github-repos