code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def get_transaction(self, transaction_id): payload = self._get_data_by_id( transaction_id, 'commit_store_get_transaction') txn = Transaction() txn.ParseFromString(payload) return txn
Returns a Transaction object from the block store by its id. Params: transaction_id (str): The header_signature of the desired txn Returns: Transaction: The specified transaction Raises: ValueError: The transaction is not in the block store
juraj-google-style
def keyword_args_only(func): decorator_utils.validate_callable(func, 'keyword_args_only') @functools.wraps(func) def new_func(*args, **kwargs): if args: raise ValueError(f'The function {func.__name__} only accepts keyword arguments. Do not pass positional arguments. Received the following positional arguments: {args}') return func(**kwargs) return new_func
Decorator for marking specific function accepting keyword args only. This decorator raises a `ValueError` if the input `func` is called with any non-keyword args. This prevents the caller from providing the arguments in wrong order. Args: func: The function or method needed to be decorated. Returns: Decorated function or method. Raises: ValueError: If `func` is not callable.
github-repos
def get_frequency_shift(self, grid_points, temperatures=np.arange(0, 1001, 10, dtype='double'), epsilons=None, output_filename=None): if (self._interaction is None): self.set_phph_interaction() if (epsilons is None): _epsilons = [0.1] else: _epsilons = epsilons self._grid_points = grid_points get_frequency_shift(self._interaction, self._grid_points, self._band_indices, _epsilons, temperatures, output_filename=output_filename, log_level=self._log_level)
Frequency shift from lowest order diagram is calculated. Args: epslins(list of float): The value to avoid divergence. When multiple values are given frequency shifts for those values are returned.
codesearchnet
def set(self, name, value): if name not in self._options: raise AttributeError("Option {0} does not exist.".format(name)) return self._options[name].__set__(self, value)
Set an option value. Args: name (str): The name of the option. value: The value to set the option to. Raises: AttributeError: If the name is not registered. TypeError: If the value is not a string or appropriate native type. ValueError: If the value is a string but cannot be coerced.
juraj-google-style
def init_app(self, app): app.config.setdefault('FEDORA_BASE_URL', 'http: if hasattr(app, 'teardown_appcontext'): app.teardown_appcontext(self.teardown) else: app.teardown_request(self.teardown)
Initializes a Flask app object for the extension. Args: app(Flask): Flask app
juraj-google-style
def get_site_spd_dos(self, site): spd_dos = dict() for (orb, pdos) in self.pdos[site].items(): orbital_type = _get_orb_type(orb) if (orbital_type in spd_dos): spd_dos[orbital_type] = add_densities(spd_dos[orbital_type], pdos) else: spd_dos[orbital_type] = pdos return {orb: Dos(self.efermi, self.energies, densities) for (orb, densities) in spd_dos.items()}
Get orbital projected Dos of a particular site Args: site: Site in Structure associated with CompleteDos. Returns: dict of {orbital: Dos}, e.g. {"s": Dos object, ...}
codesearchnet
def label_count(self, label_list_ids=None): count = collections.defaultdict(int) for label_list in self.label_lists.values(): if label_list_ids is None or label_list.idx in label_list_ids: for label_value, label_count in label_list.label_count().items(): count[label_value] += label_count return count
Return a dictionary containing the number of times, every label-value in this utterance is occurring. Args: label_list_ids (list): If not None, only labels from label-lists with an id contained in this list are considered. Returns: dict: A dictionary containing the number of occurrences with the label-value as key.
juraj-google-style
def maybe_scheduled_sampling(self, features, logits, losses): hparams = self.hparams problem_hparams = self._problem_hparams if (hparams.scheduled_sampling_prob == 0.0): return (logits, losses) modality = problem_hparams.modality['targets'] if (modality != modalities.ModalityType.SYMBOL): assert (hparams.scheduled_sampling_prob == 0), 'Scheduled sampling only applies to ModalityType.SYMBOL. Set hparams.scheduled_sampling_prob == 0.0.' return (logits, losses) is_training = (hparams.mode == tf.estimator.ModeKeys.TRAIN) if (not is_training): tf.logging.info('Running in %s mode. Not using scheduled sampling.', hparams.mode) return (logits, losses) vocab_size = problem_hparams.vocab_size['targets'] assert (vocab_size is not None) assert (hparams.vocab_divisor == 1) def sample(x): 'Multinomial sampling from a n-dimensional tensor.' samples = tf.multinomial(tf.reshape(x, [(- 1), vocab_size]), 1) reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:(- 1)]) return tf.to_int32(reshaped_samples) def mix_gold_sampled(gold_targets, sampled_targets, mixin_prob): 'Interleave sampled and gold tokens randomly.' return tf.where(tf.less(tf.random_uniform(common_layers.shape_list(sampled_targets)), mixin_prob), sampled_targets, gold_targets) def sampled_results(features, logits, mixin_prob): 'Generate scheduled sampling results.' sampled_targets = sample(logits) new_targets = mix_gold_sampled(features['targets'], sampled_targets, mixin_prob) new_targets = tf.stop_gradient(new_targets) new_features = copy.copy(features) new_features['targets'] = new_targets with tf.variable_scope(tf.get_variable_scope(), reuse=True): new_transformed_features = self.bottom(new_features) with tf.variable_scope('body'): (new_body_outputs, new_losses) = self._normalize_body_output(self.body(new_transformed_features)) assert ('training' not in new_losses) new_logits = self.top(new_body_outputs, new_features) if ((hparams.mode != tf.estimator.ModeKeys.PREDICT) and (hparams.mode != 'attack')): new_losses['training'] = self.loss(new_logits, features) else: new_losses['training'] = 0.0 return (new_logits, new_losses) tf.logging.info('Using scheduled sampling.') assert (hparams.scheduled_sampling_prob == 1.0), 'hparams.scheduled_sampling_prob must be 0 or 1.' mixin_prob = (hparams.scheduled_sampling_gold_mixin_prob * common_layers.inverse_exp_decay(hparams.scheduled_sampling_warmup_steps, min_value=0.001)) scheduled_sampling_num_passes = getattr(hparams, 'scheduled_sampling_num_passes', 1) assert (scheduled_sampling_num_passes > 0), 'hparams.scheduled_sampling_num_passes must be > 0 if hparams.scheduled_sampling_prob > 0.0' new_logits = logits new_losses = losses for _ in range(scheduled_sampling_num_passes): (new_logits, new_losses) = sampled_results(features, new_logits, mixin_prob) return (new_logits, new_losses)
Scheduled sampling. Performs forward inference again with "targets" feature replaced with values sampled from the model. This is the identity unless self.hparams.scheduled_sampling_prob > 0 (default). **WARNING**: This is not a faithful implementation of scheduled sampling. This implementation samples tokens for timestep t condtioned on gold tokens 1...t-1. A proper implementation must condition on a mix of gold and sampled tokens. Doing so is not efficient for models such like Transformer. Args: features: {str: Tensor}. Features sharded along batch dimension. logits: Tensor. Logits for each shard of data. losses: 0-D Tensor or (num: 0-D Tensor, denom: 0-D Tensor). Loss Tensor Returns: new_logits: Tensor. new_losses: {str: loss} where loss is one of (i) a 0-D Tensor or (ii) a (num: 0-D Tensor, denom: 0-D Tensor) pair to be used in a weighted average.
codesearchnet
def shutdown(self, message=None): for name, server in self.servers.items(): server.quit(message)
Disconnect all servers with a message. Args: message (str): Quit message to use on each connection.
juraj-google-style
def find_file(search_dir, file_pattern): for root, dirnames, fnames in os.walk(search_dir): for fname in fnames: if fnmatch.fnmatch(fname, file_pattern): return os.path.join(root, fname) return ""
Search for a file in a directory, and return the first match. If the file is not found return an empty string Args: search_dir: The root directory to search in file_pattern: A unix-style wildcard pattern representing the file to find Returns: The path to the file if it was found, otherwise an empty string
juraj-google-style
def path_new_using_map(m: tcod.map.Map, dcost: float=1.41) -> tcod.path.AStar: return tcod.path.AStar(m, dcost)
Return a new AStar using the given Map. Args: m (Map): A Map instance. dcost (float): The path-finding cost of diagonal movement. Can be set to 0 to disable diagonal movement. Returns: AStar: A new AStar instance.
codesearchnet
def display_as(self, name_type): if (rname_rfc6680 is None): raise NotImplementedError('Your GSSAPI implementation does not support RFC 6680 (the GSSAPI naming extensions)') return rname_rfc6680.display_name_ext(self, name_type).decode(_utils._get_encoding())
Display this name as the given name type. This method attempts to display the current :class:`Name` using the syntax of the given :class:`NameType`, if possible. Warning: In MIT krb5 versions below 1.13.3, this method can segfault if the name was not *originally* created with a `name_type` that was not ``None`` (even in cases when a ``name_type`` is later "added", such as via :meth:`canonicalize`). **Do not use this method unless you are sure the above conditions can never happen in your code.** Warning: In addition to the above warning, current versions of MIT krb5 do not actually fully implement this method, and it may return incorrect results in the case of canonicalized names. :requires-ext:`rfc6680` Args: name_type (OID): the :class:`NameType` to use to display the given name Returns: str: the displayed name Raises: OperationUnavailableError
codesearchnet
def take_indexed_slices_grad(self, num_required, name=None): return_val = gen_data_flow_ops.sparse_accumulator_take_gradient(self._accumulator_ref, num_required, dtype=self._dtype, name=name) return indexed_slices.IndexedSlices(indices=return_val.indices, values=return_val.values, dense_shape=return_val.shape)
Attempts to extract the average gradient from the accumulator. The operation blocks until sufficient number of gradients have been successfully applied to the accumulator. Once successful, the following actions are also triggered: - Counter of accumulated gradients is reset to 0. - Aggregated gradient is reset to 0 tensor. - Accumulator's internal time step is incremented by 1. Args: num_required: Number of gradients that needs to have been aggregated name: Optional name for the operation Returns: An `IndexedSlices` holding the value of the average gradient. Raises: InvalidArgumentError: If `num_required` < 1
github-repos
def l1_distance(t1, t2, name=None): with tf.name_scope(name, 'l1_distance', [t1, t2]) as scope: t1 = tf.convert_to_tensor(t1, name='t1') t2 = tf.convert_to_tensor(t2, name='t2') sub = tf.subtract(t1, t2) reduction_dim = _last_index(sub, 1) return tf.reduce_sum(tf.abs(sub), reduction_dim, name=scope)
l1 distance between t1 and t2. Args: t1: A tensor. t2: A tensor that is the same size as t1. name: Optional name for this op. Returns: The l1 distance between t1 and t2.
juraj-google-style
def _append_commands(dct, module_name, commands ): for command in commands: entry_point = '{command}{subcommand} = {module}{callable}'.format( command=command.command, subcommand=(':{}'.format(command.subcommand) if command.subcommand else ''), module=module_name, callable=(':{}'.format(command.callable) if command.callable else ''), ) dct.setdefault(command.command, set()).add(entry_point)
Append entry point strings representing the given Command objects. Args: dct: The dictionary to append with entry point strings. Each key will be a primary command with a value containing a list of entry point strings representing a Command. module_name: The name of the module in which the command object resides. commands: A list of Command objects to convert to entry point strings.
juraj-google-style
def rename(source_file_names, destination_file_names): if len(source_file_names) == 0: return filesystem = FileSystems.get_filesystem(source_file_names[0]) return filesystem.rename(source_file_names, destination_file_names)
Rename the files at the source list to the destination list. Source and destination lists should be of the same size. Args: source_file_names: List of file paths that need to be moved destination_file_names: List of destination_file_names for the files Raises: ``BeamIOError``: if any of the rename operations fail
github-repos
def list_inputs(self): doc = [] for (inp, typ) in self.input_types.items(): if isinstance(typ, six.string_types): typ = "'{}'".format(typ) doc.append('{}: {}'.format(inp, typ)) return '\n'.join(doc)
Return a string listing all the Step's input names and their types. The types are returned in a copy/pastable format, so if the type is `string`, `'string'` (with single quotes) is returned. Returns: str containing all input names and types.
codesearchnet
class GaussianNoise(layers.Layer): def __init__(self, stddev, seed=None, **kwargs): super().__init__(**kwargs) if not 0 <= stddev <= 1: raise ValueError(f'Invalid value received for argument `stddev`. Expected a float value between 0 and 1. Received: stddev={stddev}') self.stddev = stddev self.seed = seed if stddev > 0: self.seed_generator = backend.random.SeedGenerator(seed) self.supports_masking = True self._build_at_init() def call(self, inputs, training=False): if training and self.stddev > 0: return inputs + backend.random.normal(shape=ops.shape(inputs), mean=0.0, stddev=self.stddev, dtype=self.compute_dtype, seed=self.seed_generator) return inputs def compute_output_shape(self, input_shape): return input_shape def get_config(self): base_config = super().get_config() config = {'stddev': self.stddev, 'seed': self.seed} return {**base_config, **config}
Apply additive zero-centered Gaussian noise. This is useful to mitigate overfitting (you could see it as a form of random data augmentation). Gaussian Noise (GS) is a natural choice as corruption process for real valued inputs. As it is a regularization layer, it is only active at training time. Args: stddev: Float, standard deviation of the noise distribution. seed: Integer, optional random seed to enable deterministic behavior. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode (adding noise) or in inference mode (doing nothing).
github-repos
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
Create a mask from the two sequences passed to be used in a sequence-pair classification task. BlenderbotSmall does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros.
github-repos
def copy(src, dst): (src, src_is_storage) = format_and_is_storage(src) (dst, dst_is_storage) = format_and_is_storage(dst) if ((not src_is_storage) and (not dst_is_storage)): return shutil_copy(src, dst) with handle_os_exceptions(): if (not hasattr(dst, 'read')): try: if isdir(dst): dst = join(dst, basename(src)) elif (not isdir(dirname(dst))): raise IOError(("No such file or directory: '%s'" % dst)) except ObjectPermissionError: pass _copy(src, dst, src_is_storage, dst_is_storage)
Copies a source file to a destination file or directory. Equivalent to "shutil.copy". Source and destination can also be binary opened file-like objects. Args: src (path-like object or file-like object): Source file. dst (path-like object or file-like object): Destination file or directory. Raises: IOError: Destination directory not found.
codesearchnet
def ng(self, wavelength): return self.n(wavelength) - (wavelength*1.e-9)*self.nDer1(wavelength)
The group index with respect to wavelength. Args: wavelength (float, list, None): The wavelength(s) the group index will be evaluated at. Returns: float, list: The group index at the target wavelength(s).
juraj-google-style
def _parse_redistribution(self, config): redistributions = list() regexp = r'redistribute .*' matches = re.findall(regexp, config) for line in matches: ospf_redist = line.split() if len(ospf_redist) == 2: protocol = ospf_redist[1] redistributions.append(dict(protocol=protocol)) if len(ospf_redist) == 4: protocol = ospf_redist[1] route_map_name = ospf_redist[3] redistributions.append(dict(protocol=protocol, route_map=route_map_name)) return dict(redistributions=redistributions)
Parses config file for the OSPF router ID Args: config (str): Running configuration Returns: list: dict: keys: protocol (str) route-map (optional) (str)
juraj-google-style
def resolve_one_of(tags, at_least_one): if len(tags) < len(at_least_one): return None for possible_resolution in choose_1_from_each(at_least_one): resolution = {} pr = possible_resolution[:] for entity_type in pr: last_end_index = -1 if entity_type in resolution: last_end_index = resolution.get[entity_type][-1].get('end_token') tag, value, c = find_first_tag(tags, entity_type, after_index=last_end_index) if not tag: break else: if entity_type not in resolution: resolution[entity_type] = [] resolution[entity_type].append(tag) if len(resolution) == len(possible_resolution): return resolution return None
This searches tags for Entites in at_least_one and returns any match Args: tags(list): List of tags with Entities to search for Entities at_least_one(list): List of Entities to find in tags Returns: object: returns None if no match is found but returns any match as an object
juraj-google-style
def Process(self, parser_mediator, registry_key, **kwargs): if (registry_key is None): raise ValueError('Windows Registry key is not set.') super(WindowsRegistryPlugin, self).Process(parser_mediator, **kwargs) self.ExtractEvents(parser_mediator, registry_key, **kwargs)
Processes a Windows Registry key or value. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key. Raises: ValueError: If the Windows Registry key is not set.
codesearchnet
def __init__(self, do_lower_case=False, never_split=None, normalize_text=True, mecab_dic: Optional[str]='unidic_lite', mecab_option: Optional[str]=None): self.do_lower_case = do_lower_case self.never_split = never_split if never_split is not None else [] self.normalize_text = normalize_text try: import fugashi except ModuleNotFoundError as error: raise error.__class__('You need to install fugashi to use MecabTokenizer. See https: mecab_option = mecab_option or '' if mecab_dic is not None: if mecab_dic == 'ipadic': try: import ipadic except ModuleNotFoundError as error: raise error.__class__('The ipadic dictionary is not installed. See https: dic_dir = ipadic.DICDIR elif mecab_dic == 'unidic_lite': try: import unidic_lite except ModuleNotFoundError as error: raise error.__class__('The unidic_lite dictionary is not installed. See https: dic_dir = unidic_lite.DICDIR elif mecab_dic == 'unidic': try: import unidic except ModuleNotFoundError as error: raise error.__class__('The unidic dictionary is not installed. See https: dic_dir = unidic.DICDIR if not os.path.isdir(dic_dir): raise RuntimeError('The unidic dictionary itself is not found. See https: else: raise ValueError('Invalid mecab_dic is specified.') mecabrc = os.path.join(dic_dir, 'mecabrc') mecab_option = f'-d "{dic_dir}" -r "{mecabrc}" ' + mecab_option self.mecab = fugashi.GenericTagger(mecab_option)
Constructs a MecabTokenizer. Args: **do_lower_case**: (*optional*) boolean (default True) Whether to lowercase the input. **never_split**: (*optional*) list of str Kept for backward compatibility purposes. Now implemented directly at the base class level (see [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. **normalize_text**: (*optional*) boolean (default True) Whether to apply unicode normalization to text before tokenization. **mecab_dic**: (*optional*) string (default "ipadic") Name of dictionary to be used for MeCab initialization. If you are using a system-installed dictionary, set this option to `None` and modify *mecab_option*. **mecab_option**: (*optional*) string String passed to MeCab constructor.
github-repos
async def populate_projects(self, force=False): if (force or (not self.projects)): with tempfile.TemporaryDirectory() as tmpdirname: self.projects = (await load_json_or_yaml_from_url(self, self.config['project_configuration_url'], os.path.join(tmpdirname, 'projects.yml')))
Download the ``projects.yml`` file and populate ``self.projects``. This only sets it once, unless ``force`` is set. Args: force (bool, optional): Re-run the download, even if ``self.projects`` is already defined. Defaults to False.
codesearchnet
def _get_bond_data(line): orb_labs = ['s', 'p_y', 'p_z', 'p_x', 'd_xy', 'd_yz', 'd_z^2', 'd_xz', 'd_x^2-y^2', 'f_y(3x^2-y^2)', 'f_xyz', 'f_yz^2', 'f_z^3', 'f_xz^2', 'f_z(x^2-y^2)', 'f_x(x^2-3y^2)'] line = line.rsplit('(', 1) length = float(line[(- 1)][:(- 1)]) sites = line[0].replace('->', ':').split(':')[1:3] site_indices = tuple(((int(re.split('\\D+', site)[1]) - 1) for site in sites)) if ('[' in sites[0]): orbs = [re.findall('\\[(.*)\\]', site)[0] for site in sites] orbitals = [tuple((int(orb[0]), Orbital(orb_labs.index(orb[1:])))) for orb in orbs] orb_label = ('%d%s-%d%s' % (orbitals[0][0], orbitals[0][1].name, orbitals[1][0], orbitals[1][1].name)) else: orbitals = None orb_label = None bond_data = {'length': length, 'sites': site_indices, 'orbitals': orbitals, 'orb_label': orb_label} return bond_data
Subroutine to extract bond label, site indices, and length from a LOBSTER header line. The site indices are zero-based, so they can be easily used with a Structure object. Example header line: No.4:Fe1->Fe9(2.4524893531900283) Example header line for orbtial-resolved COHP: No.1:Fe1[3p_x]->Fe2[3d_x^2-y^2](2.456180552772262) Args: line: line in the COHPCAR header describing the bond. Returns: Dict with the bond label, the bond length, a tuple of the site indices, a tuple containing the orbitals (if orbital-resolved), and a label for the orbitals (if orbital-resolved).
codesearchnet
def MakeSuiteFromDict(d, name=''): suite = Suite(name=name) suite.SetDict(d) suite.Normalize() return suite
Makes a suite from a map from values to probabilities. Args: d: dictionary that maps values to probabilities name: string name for this suite Returns: Suite object
juraj-google-style
def convert_clip(params, w_name, scope_name, inputs, layers, weights, names): print('Converting clip ...') if params['min'] == 0: print("using ReLU({0})".format(params['max'])) layer = keras.layers.ReLU(max_value=params['max']) else: def target_layer(x, vmin=params['min'], vmax=params['max']): import tensorflow as tf return tf.clip_by_value(x, vmin, vmax) layer = keras.layers.Lambda(target_layer) layers[scope_name] = layer(layers[inputs[0]])
Convert clip operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def __init__(self, data_type, default=None, **kwargs): kwargs["default"] = default super(JsonProperty, self).__init__(**kwargs) self.data_type = data_type
Constructor. Args: data_type: underlying data type as class. default: default value for the property. The value is deep copied fore each model instance. **kwargs: remaining arguments.
juraj-google-style
def returnListOfConfigurationValues(util): VALUES = {} configPath = os.path.join(getConfigPath()['appPath'], 'general.cfg') if (not os.path.exists(configPath)): defaultConfigPath = os.path.join(getConfigPath()['appPathDefaults'], 'general.cfg') try: with open(defaultConfigPath) as iF: cont = iF.read() with open(configPath, 'w') as oF: oF.write(cont) except Exception as e: raise errors.DefaultConfigurationFileNotFoundError(configPath, defaultConfigPath) config = ConfigParser.ConfigParser() config.read(configPath) LISTS = ['tlds', 'domains', 'platforms', 'extension', 'exclude_platforms', 'exclude_domains'] for section in config.sections(): incomplete = False if (section.lower() == util.lower()): for (param, value) in config.items(section): if (value == ''): if (param in LISTS): value = [] else: value = '' elif (param in LISTS): value = value.split(' ') elif (param == 'threads'): try: value = int(value) except Exception as err: raise errors.ConfigurationParameterNotValidError(configPath, section, param, value) elif (param == 'debug'): try: if (int(value) == 0): value = False else: value = True except Exception as err: print('Something happened when processing this debug option. Resetting to default.') defaultConfigPath = os.path.join(getConfigPath()['appPathDefaults'], 'general.cfg') try: with open(defaultConfigPath) as iF: cont = iF.read() with open(configPath, 'w') as oF: oF.write(cont) except Exception as e: raise errors.DefaultConfigurationFileNotFoundError(configPath, defaultConfigPath) VALUES[param] = value break return VALUES
Method that recovers the configuration information about each program TODO: Grab the default file from the package data instead of storing it in the main folder. Args: ----- util: Any of the utils that are contained in the framework: domainfy, entify, mailfy, phonefy, searchfy, usufy. Returns: -------- A dictionary containing the default configuration.
codesearchnet
def ias53(msg): d = hex2bin(data(msg)) if (d[12] == '0'): return None ias = bin2int(d[13:23]) return ias
Indicated airspeed, DBS 5,3 message Args: msg (String): 28 bytes hexadecimal message Returns: int: indicated arispeed in knots
codesearchnet
def _FilterOutPathInfoDuplicates(path_infos): pi_dict = {} for pi in path_infos: path_key = (pi.path_type, pi.GetPathID()) pi_dict.setdefault(path_key, []).append(pi) def _SortKey(pi): return (pi.stat_entry.st_ctime, pi.stat_entry.st_mtime, pi.stat_entry.st_atime, pi.stat_entry.st_ino) for pi_values in pi_dict.values(): if (len(pi_values) > 1): pi_values.sort(key=_SortKey, reverse=True) return [v[0] for v in pi_dict.values()]
Filters out duplicates from passed PathInfo objects. Args: path_infos: An iterable with PathInfo objects. Returns: A list of PathInfo objects with duplicates removed. Duplicates are removed following this logic: they're sorted by (ctime, mtime, atime, inode number) in the descending order and then the first one is taken and the others are dropped.
codesearchnet
def tmybasename(usaf): url_file = open((env.SRC_PATH + '/tmy3.csv')) for line in url_file.readlines(): if (line.find(usaf) is not (- 1)): return line.rstrip().partition(',')[0]
Basename for USAF base. Args: usaf (str): USAF code Returns: (str)
codesearchnet
def log_optimal_transport(scores: torch.Tensor, reg_param: torch.Tensor, iterations: int) -> torch.Tensor: batch_size, num_rows, num_columns = scores.shape one_tensor = scores.new_tensor(1) num_rows_tensor, num_columns_tensor = ((num_rows * one_tensor).to(scores), (num_columns * one_tensor).to(scores)) source_reg_param = reg_param.expand(batch_size, num_rows, 1) target_reg_param = reg_param.expand(batch_size, 1, num_columns) reg_param = reg_param.expand(batch_size, 1, 1) couplings = torch.cat([torch.cat([scores, source_reg_param], -1), torch.cat([target_reg_param, reg_param], -1)], 1) log_normalization = -(num_rows_tensor + num_columns_tensor).log() log_source_distribution = torch.cat([log_normalization.expand(num_rows), num_columns_tensor.log()[None] + log_normalization]) log_target_distribution = torch.cat([log_normalization.expand(num_columns), num_rows_tensor.log()[None] + log_normalization]) log_source_distribution, log_target_distribution = (log_source_distribution[None].expand(batch_size, -1), log_target_distribution[None].expand(batch_size, -1)) log_optimal_transport_matrix = log_sinkhorn_iterations(couplings, log_source_distribution, log_target_distribution, num_iterations=iterations) log_optimal_transport_matrix = log_optimal_transport_matrix - log_normalization return log_optimal_transport_matrix
Perform Differentiable Optimal Transport in Log-space for stability Args: scores: (`torch.Tensor` of shape `(batch_size, num_rows, num_columns)`): Cost matrix. reg_param: (`torch.Tensor` of shape `(batch_size, 1, 1)`): Regularization parameter. iterations: (`int`): Number of Sinkhorn iterations. Returns: log_optimal_transport_matrix: (`torch.Tensor` of shape `(batch_size, num_rows, num_columns)`): Logarithm of the optimal transport matrix.
github-repos
def preprocess(self, images: ImageInput, return_tensors: Optional[Union[str, TensorType]]='pt') -> BatchFeature: if return_tensors != 'pt': raise ValueError(f"return_tensors for TimmWrapperImageProcessor must be 'pt', but got {return_tensors}") if self._not_supports_tensor_input and isinstance(images, torch.Tensor): images = images.cpu().numpy() if isinstance(images, torch.Tensor): images = self.val_transforms(images) images = images.unsqueeze(0) if images.ndim == 3 else images else: images = make_list_of_images(images) images = [to_pil_image(image) for image in images] images = torch.stack([self.val_transforms(image) for image in images]) return BatchFeature({'pixel_values': images}, tensor_type=return_tensors)
Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. Expects a single or batch of images return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return.
github-repos
def rewrite_bytes(self, in_bytes: List[str], reverse=False) -> List[str]: out_bytes = [] b_start = 0 b_end = 0 while b_start < len(in_bytes): tree_pointer = self.hash_tree if not reverse else self.reverse_hash_tree for j in range(b_start, len(in_bytes)): b = in_bytes[j] if b in tree_pointer: tree_pointer = tree_pointer[b] elif j == b_start: cur_leaf = [b] b_end = j break else: break if self.LEAF in tree_pointer: cur_leaf = tree_pointer[self.LEAF] b_end = j out_bytes.extend(cur_leaf) b_start = b_end + 1 return out_bytes
Rewrite a sequence of bytes using the hash tree. Args: in_bytes (`List[str]`): A list of bytes to be rewritten. reverse (`bool`): If True, decoding is performed with the reverse hash tree. Returns: `List[str]`: The rewritten byte sequence.
github-repos
def smooth(self, noise, strategy=INVERSE_STRATEGY): if (strategy is INVERSE_STRATEGY): self.points = with_inverse(self.points, noise) elif (strategy is EXTRAPOLATE_STRATEGY): self.points = with_extrapolation(self.points, noise, 30) elif (strategy is NO_STRATEGY): self.points = with_no_strategy(self.points, noise) return self
In-place smoothing See smooth_segment function Args: noise (float): Noise expected strategy (int): Strategy to use. Either smooth.INVERSE_STRATEGY or smooth.EXTRAPOLATE_STRATEGY Returns: :obj:`Segment`
codesearchnet
def retry_loop(self, context, step_method): logger.debug("starting") context['retryCounter'] = 0 sleep = context.get_formatted_as_type(self.sleep, out_type=float) if self.max: max = context.get_formatted_as_type(self.max, out_type=int) logger.info(f"retry decorator will try {max} times at {sleep}s " "intervals.") else: max = None logger.info(f"retry decorator will try indefinitely at {sleep}s " "intervals.") if poll.while_until_true(interval=sleep, max_attempts=max)( self.exec_iteration)(context=context, step_method=step_method ): logger.debug("retry loop complete, reporting success.") logger.debug("retry loop done") logger.debug("done")
Run step inside a retry loop. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context)
juraj-google-style
def GetPlasoTimestamp(self): normalized_timestamp = self._GetNormalizedTimestamp() if (normalized_timestamp is None): return None normalized_timestamp *= definitions.MICROSECONDS_PER_SECOND normalized_timestamp = normalized_timestamp.quantize(1, rounding=decimal.ROUND_HALF_UP) return int(normalized_timestamp)
Retrieves a timestamp that is compatible with plaso. Returns: int: a POSIX timestamp in microseconds or None if no timestamp is available.
codesearchnet
def projector(state, flatten=False): density_matrix = np.outer(state.conjugate(), state) if flatten: return density_matrix.flatten(order='F') return density_matrix
maps a pure state to a state matrix Args: state (ndarray): the number of qubits flatten (bool): determine if state matrix of column work Returns: ndarray: state_mat(2**num, 2**num) if flatten is false ndarray: state_mat(4**num) if flatten is true stacked on by the column
codesearchnet
def word_error_rate(ref: Sequence[T], hyp: Sequence[T]) -> float: if len(ref) == 0: raise EmptyReferenceException( "Cannot calculating word error rate against a length 0 "\ "reference sequence.") distance = min_edit_distance(ref, hyp) return 100 * float(distance) / len(ref)
Calculate the word error rate of a sequence against a reference. Args: ref: The gold-standard reference sequence hyp: The hypothesis to be evaluated against the reference. Returns: The word error rate of the supplied hypothesis with respect to the reference string. Raises: persephone.exceptions.EmptyReferenceException: If the length of the reference sequence is 0.
juraj-google-style
def WaitUntilNoFlowsToProcess(self, timeout=None): t = self.flow_handler_thread if not t: return start_time = time.time() while True: with self.lock: if (not t.isAlive() or (not self._GetFlowRequestsReadyForProcessing() and not self.flow_handler_num_being_processed)): return time.sleep(0.2) if timeout and time.time() - start_time > timeout: raise TimeOutWhileWaitingForFlowsToBeProcessedError( "Flow processing didn't finish in time.")
Waits until flow processing thread is done processing flows. Args: timeout: If specified, is a max number of seconds to spend waiting. Raises: TimeOutWhileWaitingForFlowsToBeProcessedError: if timeout is reached.
juraj-google-style
def take(x, indices, axis=None): if any_symbolic_tensors((x, indices)): return Take(axis=axis).symbolic_call(x, indices) return backend.numpy.take(x, indices, axis=axis)
Take elements from a tensor along an axis. Args: x: Source tensor. indices: The indices of the values to extract. axis: The axis over which to select values. By default, the flattened input tensor is used. Returns: The corresponding tensor of values.
github-repos
def get_summary(result): summary = { "success": result.wasSuccessful(), "stat": { 'total': result.testsRun, 'failures': len(result.failures), 'errors': len(result.errors), 'skipped': len(result.skipped), 'expectedFailures': len(result.expectedFailures), 'unexpectedSuccesses': len(result.unexpectedSuccesses) } } summary["stat"]["successes"] = summary["stat"]["total"] \ - summary["stat"]["failures"] \ - summary["stat"]["errors"] \ - summary["stat"]["skipped"] \ - summary["stat"]["expectedFailures"] \ - summary["stat"]["unexpectedSuccesses"] summary["time"] = { 'start_at': result.start_at, 'duration': result.duration } summary["records"] = result.records return summary
get summary from test result Args: result (instance): HtmlTestResult() instance Returns: dict: summary extracted from result. { "success": True, "stat": {}, "time": {}, "records": [] }
juraj-google-style
def transform_coords(self, width, height): if (self.type not in {EventType.TOUCH_DOWN, EventType.TOUCH_MOTION}): raise AttributeError(_wrong_meth.format(self.type)) x = self._libinput.libinput_event_touch_get_x_transformed(self._handle, width) y = self._libinput.libinput_event_touch_get_y_transformed(self._handle, height) return (x, y)
Return the current absolute coordinates of the touch event, transformed to screen coordinates. For events not of type :attr:`~libinput.constant.EventType.TOUCH_DOWN`, :attr:`~libinput.constant.EventType.TOUCH_MOTION`, this method raises :exc:`AttributeError`. Args: width (int): The current output screen width. height (int): The current output screen height. Returns: (float, float): The current absolute (x, y) coordinates transformed to screen coordinates.
codesearchnet
class ErrorHandlingConfig(NamedTuple): output: str
This option specifies whether and where to output error rows. Args: output (str): Name to use for the output error collection
github-repos
def _GetIdentifierFromPath(self, parser_mediator): file_entry = parser_mediator.GetFileEntry() path = file_entry.path_spec.location file_system = file_entry.GetFileSystem() path_segments = file_system.SplitPath(path) return path_segments[(- 2)]
Extracts a container or a graph ID from a JSON file's path. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. Returns: str: container or graph identifier.
codesearchnet
def external_ids(self, **kwargs): path = self._get_id_path('external_ids') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the external ids for a specific movie id. Args: language: (optional) ISO 639-1 code. append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
codesearchnet
def purview(repertoire): if (repertoire is None): return None return tuple((i for (i, dim) in enumerate(repertoire.shape) if (dim == 2)))
The purview of the repertoire. Args: repertoire (np.ndarray): A repertoire Returns: tuple[int]: The purview that the repertoire was computed over.
codesearchnet
def setup(self, reason, grr_server_url, grr_username, grr_password, approvers=None, verify=True): grr_auth = (grr_username, grr_password) self.approvers = [] if approvers: self.approvers = [item.strip() for item in approvers.strip().split(',')] self.grr_api = grr_api.InitHttp(api_endpoint=grr_server_url, auth=grr_auth, verify=verify) self.output_path = tempfile.mkdtemp() self.reason = reason
Initializes a GRR hunt result collector. Args: reason: justification for GRR access. grr_server_url: GRR server URL. grr_username: GRR username. grr_password: GRR password. approvers: list of GRR approval recipients. verify: boolean, whether to verify the GRR server's x509 certificate.
codesearchnet
def _send_success_response(self, response, start_response): headers = [('Content-Type', 'application/json; charset=UTF-8')] return util.send_wsgi_response('200 OK', headers, response, start_response)
Sends an HTTP 200 json success response. This calls start_response and returns the response body. Args: response: A string containing the response body to return. start_response: A function with semantics defined in PEP-333. Returns: A string, the response body.
codesearchnet
async def _open_connection_https(self, location): sock = await connect_tcp(location[0], location[1], ssl_context=self.ssl_context or ssl.SSLContext(), bind_host=self.source_address, autostart_tls=True) sock._active = True return sock
Creates an async SSL socket, returns it. Args: location (tuple(str, int)): A tuple of net location (eg '127.0.0.1' or 'example.org') and port (eg 80 or 25000).
juraj-google-style
def et2roc(et_fo, roc_fo): stats_dicts = [ { "q": q, "M": 0, "w": 0, "m": 0, "P": 0, "U": 0, "u": 0, "T": 0, "t": 0, "x": 0 } for q in range(rnftools.lavender.MAXIMAL_MAPPING_QUALITY + 1) ] for line in et_fo: line = line.strip() if line != "" and line[0] != " (read_tuple_name, tab, info_categories) = line.partition("\t") intervals = info_categories.split(",") for interval in intervals: category = interval[0] (left, colon, right) = interval[2:].partition("-") for q in range(int(left), int(right) + 1): stats_dicts[q][category] += 1 roc_fo.write(" roc_fo.write(" roc_fo.write(" roc_fo.write(" roc_fo.write(" roc_fo.write(" roc_fo.write(" roc_fo.write(" roc_fo.write(" roc_fo.write(" roc_fo.write(" roc_fo.write(" roc_fo.write(" roc_fo.write(" roc_fo.write(" l_numbers = [] for line in stats_dicts: numbers = [ line["M"], line["w"], line["m"], line["P"], line["U"], line["u"], line["T"], line["t"], line["x"] ] if numbers != l_numbers: roc_fo.write("\t".join([str(line["q"])] + list(map(str, numbers)) + [str(sum(numbers))]) + os.linesep) l_numbers = numbers
ET to ROC conversion. Args: et_fo (file): File object for the ET file. roc_fo (file): File object for the ROC file. raises: ValueError
juraj-google-style
def _flatten_multiplicand_list(kernels): flattened = [] for k in kernels: if isinstance(k, _ProductKernel): flattened += k.kernels else: flattened.append(k) return flattened
Flatten a list of kernels which may contain _ProductKernel instances. Args: kernels: Python list of `PositiveSemidefiniteKernel` instances Returns: Python list containing the elements of kernels, with any _ProductKernel instances replaced by their `kernels` property contents.
juraj-google-style
def __directory_list_descriptor(self, configs): descriptor = { 'kind': 'discovery 'discoveryVersion': 'v1', } items = [] for config in configs: item_descriptor = self.__item_descriptor(config) if item_descriptor: items.append(item_descriptor) if items: descriptor['items'] = items return descriptor
Builds a directory list for an API. Args: configs: List of dicts containing the service configurations to list. Returns: A dictionary that can be deserialized into JSON in discovery list format. Raises: ApiConfigurationError: If there's something wrong with the API configuration, such as a multiclass API decorated with different API descriptors (see the docstring for api()), or a repeated method signature.
juraj-google-style
def find_files(base_dir, extensions, exclude_dirs=list()): result = [] for (root, dir_names, file_names) in os.walk(base_dir): for filename in file_names: candidate = os.path.join(root, filename) if should_include_file_in_search(candidate, extensions, exclude_dirs): result.append(candidate) return result
Find all files matching the given extensions. Args: base_dir (str): Path of base directory to search in. extensions (list): A list of file extensions to search for. exclude_dirs (list): A list of directories to exclude from search. Returns: list of paths that match the search
codesearchnet
def list_container_services_sub(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.ContainerService/ContainerServices', '?api-version=', ACS_API]) return do_get(endpoint, access_token)
List the container services in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON model.
codesearchnet
def timezone(self, value=0.0): if value is not None: try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float ' 'for field `timezone`'.format(value)) if value < -12.0: raise ValueError('value need to be greater or equal -12.0 ' 'for field `timezone`') if value > 12.0: raise ValueError('value need to be smaller 12.0 ' 'for field `timezone`') self._timezone = value
Corresponds to IDD Field `timezone` Time relative to GMT. Args: value (float): value for IDD Field `timezone` Unit: hr - not on standard units list??? Default value: 0.0 value >= -12.0 value <= 12.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def get_source_url(obj): source_env_prefix = obj.context.config['source_env_prefix'] task = obj.task log.debug("Getting source url for {} {}...".format(obj.name, obj.task_id)) repo = get_repo(obj.task, source_env_prefix=source_env_prefix) source = task['metadata']['source'] if repo and not verify_repo_matches_url(repo, source): raise CoTError("{name} {task_id}: {source_env_prefix} {repo} doesn't match source {source}!".format( name=obj.name, task_id=obj.task_id, source_env_prefix=source_env_prefix, repo=repo, source=source )) log.info("{} {}: found {}".format(obj.name, obj.task_id, source)) return source
Get the source url for a Trust object. Args: obj (ChainOfTrust or LinkOfTrust): the trust object to inspect Raises: CoTError: if repo and source are defined and don't match Returns: str: the source url.
juraj-google-style
def ForceRemoveFileObject(self, path_spec): cache_value = self._file_object_cache.GetCacheValue(path_spec.comparable) if not cache_value: return False while not cache_value.IsDereferenced(): cache_value.vfs_object.close() return True
Forces the removal of a file-like object based on a path specification. Args: path_spec (PathSpec): path specification. Returns: bool: True if the file-like object was cached.
juraj-google-style
def merge_with(self, other): other = as_shape(other) if (self._dims is None): return other else: try: self.assert_same_rank(other) new_dims = [] for (i, dim) in enumerate(self._dims): new_dims.append(dim.merge_with(other[i])) return TensorShape(new_dims) except ValueError: raise ValueError(('Shapes %s and %s are not convertible' % (self, other)))
Returns a `TensorShape` combining the information in `self` and `other`. The dimensions in `self` and `other` are merged elementwise, according to the rules defined for `Dimension.merge_with()`. Args: other: Another `TensorShape`. Returns: A `TensorShape` containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not convertible.
codesearchnet
def get_dG_at_T(seq, temp): r_cal = scipy.constants.R / scipy.constants.calorie seq = ssbio.protein.sequence.utils.cast_to_str(seq) oobatake = {} for t in range(20, 51): oobatake[t] = calculate_oobatake_dG(seq, t) stable = [i for i in oobatake.values() if i > 0] if len(stable) == 0: dG = 0.238846 * calculate_dill_dG(len(seq), temp) method='Dill' else: dG = oobatake[temp] method='Oobatake' keq = math.exp(-1 * dG / (r_cal * (temp + 273.15))) return dG, keq, method
Predict dG at temperature T, using best predictions from Dill or Oobatake methods. Args: seq (str, Seq, SeqRecord): Amino acid sequence temp (float): Temperature in degrees C Returns: (tuple): tuple containing: dG (float) Free energy of unfolding dG (cal/mol) keq (float): Equilibrium constant Keq method (str): Method used to calculate
juraj-google-style
def _get_user_agent(): client = '{0}/{1}'.format(__name__.split('.')[0], ver.__version__) python_version = 'Python/{v.major}.{v.minor}.{v.micro}'.format(v=sys.version_info) system_info = '{0}/{1}'.format(platform.system(), platform.release()) user_agent_string = ' '.join([python_version, client, system_info]) return user_agent_string
Construct the user-agent header with the package info, Python version and OS version. Returns: The user agent string. e.g. 'Python/3.6.7 slack/2.0.0 Darwin/17.7.0'
codesearchnet
def squeeze(x, axis): return array_ops.squeeze(x, [axis])
Removes a 1-dimension from the tensor at index "axis". Args: x: A tensor or variable. axis: Axis to drop. Returns: A tensor with the same data as `x` but reduced dimensions.
github-repos
def run(self, *args): if (self.prefix_char is None): prefix_char = config.suite_alias_prefix_char else: prefix_char = self.prefix_char if (prefix_char == ''): return self._run_no_args(args) else: return self._run(prefix_char, args)
Invoke the wrapped script. Returns: Return code of the command, or 0 if the command is not run.
codesearchnet
def get_data_path(module_id: str) -> Path: profile = coordinator.profile data_path = get_base_path() / 'profiles' / profile / module_id if not data_path.exists(): data_path.mkdir(parents=True) return data_path
Get the path for persistent storage of a module. This method creates the queried path if not existing. Args: module_id (str): Module ID Returns: The data path of indicated module.
juraj-google-style
def add_condition(self, observed_arr): condition_arr = self.__image_true_sampler.draw() return np.concatenate((observed_arr, condition_arr), axis=1)
Add condtion. Args: observed_arr: `np.ndarray` of samples. Returns: `np.ndarray` of samples.
juraj-google-style
def _instantiate_exception(self, node, exc_type): value = self.ctx.program.NewVariable() types = [] stack = list(exc_type.data) while stack: e = stack.pop() if isinstance(e, abstract.Tuple): for sub_exc_type in e.pyval: sub_value, sub_types = self._instantiate_exception(node, sub_exc_type) value.PasteVariable(sub_value) types.extend(sub_types) elif isinstance(e, abstract.Instance) and e.cls.full_name == 'builtins.tuple': sub_exc_type = e.get_instance_type_parameter(abstract_utils.T) sub_value, sub_types = self._instantiate_exception(node, sub_exc_type) value.PasteVariable(sub_value) types.extend(sub_types) elif isinstance(e, abstract.Class) and any((base.full_name == 'builtins.BaseException' or isinstance(base, abstract.AMBIGUOUS_OR_EMPTY) for base in e.mro)): value.PasteVariable(self.init_class(node, e)) types.append(e) elif isinstance(e, abstract.Union): stack.extend(e.options) else: if not isinstance(e, abstract.AMBIGUOUS_OR_EMPTY): if isinstance(e, abstract.Class): mro_seqs = [e.mro] if isinstance(e, abstract.Class) else [] msg = f'{e.name} does not inherit from BaseException' else: mro_seqs = [] msg = 'Not a class' self.ctx.errorlog.mro_error(self.frames, e.name, mro_seqs, details=msg) value.AddBinding(self.ctx.convert.unsolvable, [], node) types.append(None) return (value, types)
Instantiate an exception type. Args: node: The current node. exc_type: A cfg.Variable of the exception type. Returns: A tuple of a cfg.Variable of the instantiated type and a list of the flattened exception types in the data of exc_type. None takes the place of invalid types.
github-repos
def _validate_version(connection, dsn): try: version = get_stored_version(connection) except VersionIsNotStored: logger.debug('Version not stored in the db: assuming new database creation.') version = SCHEMA_VERSION _update_version(connection, version) assert isinstance(version, int) if ((version > 10) and (version < 100)): raise DatabaseError('You are trying to open an old SQLite database.') if _migration_required(connection): migrate(connection, dsn)
Performs on-the-fly schema updates based on the models version. Raises: DatabaseError: if user uses old sqlite database.
codesearchnet
def signUserCsr(self, xcsr, signas, outp=None): pkey = xcsr.get_pubkey() name = xcsr.get_subject().CN return self.genUserCert(name, csr=pkey, signas=signas, outp=outp)
Signs a user CSR with a CA keypair. Args: cert (OpenSSL.crypto.X509Req): The certificate signing request. signas (str): The CA keypair name to sign the CSR with. outp (synapse.lib.output.Output): The output buffer. Examples: cdir.signUserCsr(mycsr, 'myca') Returns: ((OpenSSL.crypto.PKey, OpenSSL.crypto.X509)): Tuple containing the public key and certificate objects.
juraj-google-style
def text_editor(file='', background=False, return_cmd=False): desktop_env = system.get_name() if (desktop_env == 'windows'): editor_cmd_str = system.get_cmd_out(['ftype', 'textfile']).split('=', 1)[1] elif (desktop_env == 'mac'): editor_cmd_str = ('open -a' + system.get_cmd_out(['def', 'read', 'com.apple.LaunchServices', 'LSHandlers-array{LSHandlerContentType=public.plain-text;}'])) else: editor_cmd_str = system.get_cmd_out(['xdg-mime', 'query', 'default', 'text/plain']) if ('\n' in editor_cmd_str): editor_cmd_str = editor_cmd_str.split('\n')[0] if editor_cmd_str.endswith('.desktop'): editor_cmd_str = desktopfile.parse(desktopfile.locate(editor_cmd_str)[0])['Exec'] for i in editor_cmd_str.split(): if i.startswith('%'): editor_cmd_str = editor_cmd_str.replace(i, '') if (i == '--new-document'): editor_cmd_str = editor_cmd_str.replace(i, '') if file: editor_cmd_str += ' {}'.format(shlex.quote(file)) if return_cmd: return editor_cmd_str text_editor_proc = sp.Popen([editor_cmd_str], shell=True) if (not background): text_editor_proc.wait()
Starts the default graphical text editor. Start the user's preferred graphical text editor, optionally with a file. Args: file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file). background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``. return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``. Returns: str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing.
codesearchnet
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(KeyWrappingSpecification, self).read( input_stream, kmip_version=kmip_version ) local_stream = BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.WRAPPING_METHOD, local_stream): self._wrapping_method = primitives.Enumeration( enum=enums.WrappingMethod, tag=enums.Tags.WRAPPING_METHOD ) self._wrapping_method.read( local_stream, kmip_version=kmip_version ) else: raise ValueError( "Invalid struct missing the wrapping method attribute." ) if self.is_tag_next( enums.Tags.ENCRYPTION_KEY_INFORMATION, local_stream ): self._encryption_key_information = EncryptionKeyInformation() self._encryption_key_information.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next( enums.Tags.MAC_SIGNATURE_KEY_INFORMATION, local_stream ): self._mac_signature_key_information = MACSignatureKeyInformation() self._mac_signature_key_information.read( local_stream, kmip_version=kmip_version ) attribute_names = [] while self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, local_stream): attribute_name = primitives.TextString( tag=enums.Tags.ATTRIBUTE_NAME ) attribute_name.read(local_stream, kmip_version=kmip_version) attribute_names.append(attribute_name) self._attribute_names = attribute_names if self.is_tag_next(enums.Tags.ENCODING_OPTION, local_stream): self._encoding_option = primitives.Enumeration( enum=enums.EncodingOption, tag=enums.Tags.ENCODING_OPTION ) self._encoding_option.read( local_stream, kmip_version=kmip_version ) self.is_oversized(local_stream)
Read the data encoding the KeyWrappingSpecification struct and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def get_dense_tensor(self, transformation_cache, state_manager): pass
Returns a `Tensor`. The output of this function will be used by model-builder-functions. For example the pseudo code of `input_layer` will be like: ```python def input_layer(features, feature_columns, ...): outputs = [fc.get_dense_tensor(...) for fc in feature_columns] return tf.concat(outputs) ``` Args: transformation_cache: A `FeatureTransformationCache` object to access features. state_manager: A `StateManager` to create / access resources such as lookup tables. Returns: `Tensor` of shape [batch_size] + `variable_shape`.
github-repos
def read(self, filename): try: SafeConfigParser.read(self, filename) except SafeConfigParserError as exc: msg = ('%s: parsing error in eapi conf file: %s' % (type(exc).__name__, filename)) debug(msg) self._add_default_connection() for name in self.sections(): if (name.startswith('connection:') and ('host' not in dict(self.items(name)))): self.set(name, 'host', name.split(':')[1]) self.generate_tags()
Reads the file specified by filename This method will load the eapi.conf file specified by filename into the instance object. It will also add the default connection localhost if it was not defined in the eapi.conf file Args: filename (str): The full path to the file to load
codesearchnet
def to_ising(self): return (dict(self.spin.linear), dict(self.spin.quadratic), self.spin.offset)
Converts a binary quadratic model to Ising format. If the binary quadratic model's vartype is not :class:`.Vartype.SPIN`, values are converted. Returns: tuple: 3-tuple of form (`linear`, `quadratic`, `offset`), where `linear` is a dict of linear biases, `quadratic` is a dict of quadratic biases, and `offset` is a number that represents the constant offset of the binary quadratic model. Examples: This example converts a binary quadratic model to an Ising problem. >>> import dimod >>> model = dimod.BinaryQuadraticModel({0: 1, 1: -1, 2: .5}, ... {(0, 1): .5, (1, 2): 1.5}, ... 1.4, ... dimod.SPIN) >>> model.to_ising() # doctest: +SKIP ({0: 1, 1: -1, 2: 0.5}, {(0, 1): 0.5, (1, 2): 1.5}, 1.4)
codesearchnet
def use(self, middleware, path=None): self.log.info(" Using middleware {}", middleware) if path is None: path = MiddlewareChain.ROOT_PATTERN self.add(HTTPMethod.ALL, path, middleware) return self
Call the provided middleware upon requests matching the path. If path is not provided or None, all requests will match. Args: middleware (callable): Callable with the signature ``(res, req) -> None`` path (Optional[str or regex]): a specific path the request must match for the middleware to be called. Returns: This router
juraj-google-style
def __init__(self, server, port, project_key=None, run_asyncore_thread=True): self.server = server self.port = port super(ZEOWrapper, self).__init__( project_key=project_key, run_asyncore_thread=run_asyncore_thread, )
Initialize the object. Args: conf_path (str): See :attr:`conf_path`. project_key (str, default None): See :attr:`project_key`. If not set, the root of the database is used (this may cause performace issues). run_asyncore_thread (bool, default True): Run external asyncore thread, which handles connections to database? Default True.
juraj-google-style
def update_labels(self, node_name: str, labels: dict): if not self._manager: raise RuntimeError('Only the Swarm manager node can update ' 'node details.') node_spec = {'Availability': 'active', 'Name': node_name, 'Role': 'manager', 'Labels': labels} node = self._client.nodes.get(node_name) node.update(node_spec)
Update label of a node. Args: node_name (string): Name of the node. labels (dict): Label to add to the node
juraj-google-style
def attrname_to_colname_dict(cls) -> Dict[str, str]: attr_col = {} for attrname, column in gen_columns(cls): attr_col[attrname] = column.name return attr_col
Asks an SQLAlchemy class how its attribute names correspond to database column names. Args: cls: SQLAlchemy ORM class Returns: a dictionary mapping attribute names to database column names
juraj-google-style
def write_info_file(tensorboard_info): payload = "%s\n" % _info_to_string(tensorboard_info) with open(_get_info_file_path(), "w") as outfile: outfile.write(payload)
Write TensorBoardInfo to the current process's info file. This should be called by `main` once the server is ready. When the server shuts down, `remove_info_file` should be called. Args: tensorboard_info: A valid `TensorBoardInfo` object. Raises: ValueError: If any field on `info` is not of the correct type.
juraj-google-style
def starts_when(iterable, condition): if not callable(condition): cond_value = condition def condition(x): return x == cond_value return itertools.dropwhile(lambda x: not condition(x), iterable)
Start yielding items when a condition arise. Args: iterable: the iterable to filter. condition: if the callable returns True once, start yielding items. If it's not a callable, it will be converted to one as `lambda condition: condition == item`. Example: >>> list(starts_when(range(10), lambda x: x > 5)) [6, 7, 8, 9] >>> list(starts_when(range(10), 7)) [7, 8, 9]
juraj-google-style
def _run_submission(self, metadata): if self._use_gpu: docker_binary = 'nvidia-docker' container_name = metadata['container_gpu'] else: docker_binary = 'docker' container_name = metadata['container'] if (metadata['type'] == 'defense'): cmd = [docker_binary, 'run', '--network=none', '-m=24g', '-v', '{0}:/input_images:ro'.format(self._sample_input_dir), '-v', '{0}:/output_data'.format(self._sample_output_dir), '-v', '{0}:/code'.format(self._extracted_submission_dir), '-w', '/code', container_name, ('./' + metadata['entry_point']), '/input_images', '/output_data/result.csv'] else: epsilon = np.random.choice(ALLOWED_EPS) cmd = [docker_binary, 'run', '--network=none', '-m=24g', '-v', '{0}:/input_images:ro'.format(self._sample_input_dir), '-v', '{0}:/output_images'.format(self._sample_output_dir), '-v', '{0}:/code'.format(self._extracted_submission_dir), '-w', '/code', container_name, ('./' + metadata['entry_point']), '/input_images', '/output_images', str(epsilon)] logging.info('Command to run submission: %s', ' '.join(cmd)) return shell_call(cmd)
Runs submission inside Docker container. Args: metadata: dictionary with submission metadata Returns: True if status code of Docker command was success (i.e. zero), False otherwise.
codesearchnet
def get_encoder_from_vocab(vocab_filepath): if (not tf.gfile.Exists(vocab_filepath)): raise ValueError('Vocab file does not exist: {}.'.format(vocab_filepath)) tf.logging.info('Found vocab file: %s', vocab_filepath) encoder = text_encoder.SubwordTextEncoder(vocab_filepath) return encoder
Get encoder from vocab file. If vocab is not found in output dir, it will be copied there by copy_vocab_to_output_dir to clarify the vocab used to generate the data. Args: vocab_filepath: path to vocab, either local or cns Returns: A SubwordTextEncoder vocabulary object. None if the output_parallel_text is set.
codesearchnet
def decode_datetime(encoded_datetime): time_zone_match = _TIME_ZONE_RE.search(encoded_datetime) if time_zone_match: time_string = encoded_datetime[:time_zone_match.start(1)].upper() else: time_string = encoded_datetime.upper() if ('.' in time_string): format_string = '%Y-%m-%dT%H:%M:%S.%f' else: format_string = '%Y-%m-%dT%H:%M:%S' decoded_datetime = datetime.datetime.strptime(time_string, format_string) if (not time_zone_match): return decoded_datetime if time_zone_match.group('z'): offset_minutes = 0 else: sign = time_zone_match.group('sign') (hours, minutes) = [int(value) for value in time_zone_match.group('hours', 'minutes')] offset_minutes = ((hours * 60) + minutes) if (sign == '-'): offset_minutes *= (- 1) return datetime.datetime(decoded_datetime.year, decoded_datetime.month, decoded_datetime.day, decoded_datetime.hour, decoded_datetime.minute, decoded_datetime.second, decoded_datetime.microsecond, TimeZoneOffset(offset_minutes))
Decode a DateTimeField parameter from a string to a python datetime. Args: encoded_datetime: A string in RFC 3339 format. Returns: A datetime object with the date and time specified in encoded_datetime. Raises: ValueError: If the string is not in a recognized format.
codesearchnet
def auth_ping(self): url = (self.rest_url + '/non-existent/location') response = self._get(url) if (response.status_code == 401): return False elif (response.status_code == 404): return True else: return False
Test that application can authenticate to Crowd. Attempts to authenticate the application user against the Crowd server. In order for user authentication to work, an application must be able to authenticate. Returns: bool: True if the application authentication succeeded.
codesearchnet
def fermi_fourier_trans_inverse_conjugate_4(qubits): yield fswap(qubits[1], qubits[2]), yield fermi_fourier_trans_2(qubits[0], qubits[1]) yield fermi_fourier_trans_2(qubits[2], qubits[3]) yield fswap(qubits[1], qubits[2]) yield fermi_fourier_trans_2(qubits[0], qubits[1]) yield cirq.S(qubits[2]) ** 3 yield fermi_fourier_trans_2(qubits[2], qubits[3]) yield fswap(qubits[1], qubits[2])
We will need to map the momentum states in the reversed order for spin-down states to the position picture. This transformation can be simply implemented the complex conjugate of the former one. We only need to change the S gate to S* = S ** 3. Args: qubits: list of four qubits
juraj-google-style
def trim(self, len_): other = Version(None) other.tokens = self.tokens[:len_] other.seps = self.seps[:(len_ - 1)] return other
Return a copy of the version, possibly with less tokens. Args: len_ (int): New version length. If >= current length, an unchanged copy of the version is returned.
codesearchnet
def translate_sites(self, indices=None, vector=None): if indices is None: indices = range(len(self)) if vector is None: vector == [0, 0, 0] for i in indices: site = self._sites[i] new_site = Site(site.species, site.coords + vector, properties=site.properties) self._sites[i] = new_site
Translate specific sites by some vector, keeping the sites within the unit cell. Args: indices (list): List of site indices on which to perform the translation. vector (3x1 array): Translation vector for sites.
juraj-google-style
def create_server(self, server_name, *args, **kwargs): server = ServerConnection(name=server_name, reactor=self) if (args or kwargs): server.set_connect_info(*args, **kwargs) for (verb, infos) in self._event_handlers.items(): for info in infos: server.register_event(info['direction'], verb, info['handler'], priority=info['priority']) self.servers[server_name] = server return server
Create an IRC server connection slot. The server will actually be connected to when :meth:`girc.client.ServerConnection.connect` is called later. Args: server_name (str): Name of the server, to be used for functions and accessing the server later through the reactor. Returns: server (girc.client.ServerConnection): A not-yet-connected server.
codesearchnet
def check_completion(task, mark_incomplete=False, clear=False, return_stats=False): to_clear = dict() (is_complete, stats) = _check_completion(task, mark_incomplete=mark_incomplete, clear=clear, stats={}, visited=dict(), to_clear=to_clear) while to_clear: found_clearable_task = False for task_id in list(to_clear.keys()): v = to_clear[task_id] if (not v['required_by']): found_clearable_task = True task = v['task'] if isinstance(task, ORMTask): task.mark_incomplete() task.clear() _increment_stats(stats, 'Cleared') config.logger.info(('Cleared task: ' + task_id)) else: config.logger.info(('Cannot clear task, not an ORMTask: ' + task_id)) del to_clear[task_id] for w in to_clear.values(): w['required_by'].discard(task_id) if (not found_clearable_task): raise RuntimeError('Error in recursive task clearing, no clearable task found') config.logger.info(('Task completion checking, summary:\n' + str(stats))) if return_stats: return (is_complete, stats) else: return is_complete
Recursively check if a task and all its requirements are complete Args: task (derived from luigi.Task): Task to check completion for; check everything 'downstream' from that task. mark_incomplete (bool): If ``True`` set any task as incomplete for which a requirement is found to be incomplete (checked recursively). This works only for tasks derived from :class:`ORMTask`. clear (bool): If ``True``, call the :func:`clear()` method of any task for which a requirement is found to be incomplete (checked recursively). This implies ``mark_incomplete = True``. This works only for tasks derived from :class:`ORMTask`. return_stats (bool): If ``True``, return task checking statistics in addition to completion status Returns: bool: ``True`` if the task, all its requirements and (recursively) all their requirements are complete, ``False`` otherwise.
codesearchnet
def setup(import_roots, zip_safe): archive_path = _find_archive() if not archive_path: warnings.warn('Failed to initialize .par file runtime support', UserWarning) return False if os.path.abspath(sys.path[0]) != os.path.abspath(archive_path): warnings.warn('Failed to initialize .par file runtime support. ' + 'archive_path was %r, sys.path was %r' % (archive_path, sys.path), UserWarning) return False if not zip_safe: extract_dir = _extract_files(archive_path) sys.path[0] = extract_dir import_prefix = extract_dir else: extract_dir = None import_prefix = archive_path _initialize_import_path(import_roots, import_prefix) _setup_pkg_resources('pkg_resources') _setup_pkg_resources('pip._vendor.pkg_resources') return True
Initialize subpar run-time support Args: import_root (list): subdirs inside .par file to add to the module import path at runtime. zip_safe (bool): If False, extract the .par file contents to a temporary directory, and import everything from that directory. Returns: True if setup was successful, else False
github-repos
def append_dims_and_file_extension(fname, data_df): if (not fname.endswith('.gct')): out_fname = '{0}_n{1}x{2}.gct'.format(fname, data_df.shape[1], data_df.shape[0]) return out_fname else: basename = os.path.splitext(fname)[0] out_fname = '{0}_n{1}x{2}.gct'.format(basename, data_df.shape[1], data_df.shape[0]) return out_fname
Append dimensions and file extension to output filename. N.B. Dimensions are cols x rows. Args: fname (string): output filename data_df (pandas df) Returns: out_fname (string): output filename with matrix dims and .gct appended
codesearchnet
def normalize(a, new_min=0.0, new_max=1.0): n = (a - np.amin(a)) / np.amax(a - np.amin(a)) return n * (new_max - new_min) + new_min
From ``bruges`` Normalize an array to [0,1] or to arbitrary new min and max. Args: a (ndarray) new_min (float): the new min, default 0. new_max (float): the new max, default 1. Returns: ndarray. The normalized array.
juraj-google-style
def _escape_token(token, alphabet): token = token.replace(u'\\', u'\\\\').replace(u'_', u'\\u') ret = [(c if ((c in alphabet) and (c != u'\n')) else ('\\%d;' % ord(c))) for c in token] return (u''.join(ret) + '_')
r"""Replace characters that aren't in the alphabet and append "_" to token. Apply three transformations to the token: 1. Replace underline character "_" with "\u", and backslash "\" with "\\". 2. Replace characters outside of the alphabet with "\###;", where ### is the character's Unicode code point. 3. Appends "_" to mark the end of a token. Args: token: unicode string to be escaped alphabet: list of all known characters Returns: escaped string
codesearchnet
def update(self, jump): atom = jump.initial_site.atom dr = jump.dr(self.cell_lengths) jump.final_site.occupation = atom.number jump.final_site.atom = atom jump.final_site.is_occupied = True jump.initial_site.occupation = 0 jump.initial_site.atom = None jump.initial_site.is_occupied = False atom.site = jump.final_site atom.number_of_hops += 1 atom.dr += dr atom.summed_dr2 += np.dot(dr, dr)
Update the lattice state by accepting a specific jump Args: jump (Jump): The jump that has been accepted. Returns: None.
codesearchnet
def _AddCredentialConfiguration(self, path_spec, credential_type, credential_data): credential_configuration = configurations.CredentialConfiguration(credential_data=credential_data, credential_type=credential_type, path_spec=path_spec) self._credential_configurations.append(credential_configuration)
Adds a credential configuration. Args: path_spec (dfvfs.PathSpec): path specification. credential_type (str): credential type. credential_data (bytes): credential data.
codesearchnet
def generate_output_newline(self, line='0', colorize=True): return generate_output(line=line, is_parent=True, colorize=colorize)
The function for generating a CLI output new line. Args: line (:obj:`str`): The line number (0-4). Determines indentation. Defaults to '0'. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. Returns: str: The generated output.
codesearchnet
def relocate(self, destination): for activate in self.bin.activates: activate.vpath = destination for binfile in self.bin.files: if binfile.shebang and ( 'python' in binfile.shebang or 'pypy' in binfile.shebang ): binfile.shebang = ' os.path.join(destination, 'bin', 'python') )
Configure the virtual environment for another path. Args: destination (str): The target path of the virtual environment. Note: This does not actually move the virtual environment. Is only rewrites the metadata required to support a move.
juraj-google-style
def pythonify_logs(logs): logs = logs or {} result = {} for key, value in sorted(logs.items()): if isinstance(value, dict): result.update(pythonify_logs(value)) else: try: value = float(value) except: pass result[key] = value return result
Flatten and convert log values to Python-native types. This function attempts to convert dict value by `float(value)` and skips the conversion if it fails. Args: logs: A dict containing log values. Returns: A flattened dict with values converted to Python-native types if possible.
github-repos