code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def GetGroupMap(self, since=None): return GroupUpdateGetter().GetUpdates(self._GetClient(), self.conf['bucket'], self.conf['group_object'], since)
Return the group map from this source. Args: since: Get data only changed since this timestamp (inclusive) or None for all data. Returns: instance of group.GroupMap
github-repos
def code_challenge(verifier): digest = hashlib.sha256(verifier).digest() return base64.urlsafe_b64encode(digest).rstrip(b'=')
Creates a 'code_challenge' as described in section 4.2 of RFC 7636 by taking the sha256 hash of the verifier and then urlsafe base64-encoding it. Args: verifier: bytestring, representing a code_verifier as generated by code_verifier(). Returns: Bytestring, representing a urlsafe base64-encoded sha256 hash digest, without '=' padding.
codesearchnet
def sparse_dense_cwise_add(sp_t, dense_t): result = gen_sparse_ops.sparse_dense_cwise_add(sp_t.indices, sp_t.values, sp_t.dense_shape, dense_t) return sparse_tensor.SparseTensor(sp_t.indices, result, sp_t.dense_shape)
Adds up a SparseTensor and a dense Tensor, using these special rules: (1) Broadcasts the dense side to have the same shape as the sparse side, if eligible; (2) Then, only the dense values pointed to by the indices of the SparseTensor participate in the cwise addition. By the rules, the result is a logical SparseTensor with exactly the same indices and shape, but possibly with different non-zero values. The output of this Op is the resultant non-zero values. Args: sp_t: the SparseTensor operand. dense_t: the dense Tensor operand; must have the same dtype and a broadcast-compatible shape as `sp_t`. Returns: output: the SparseTensor output.
github-repos
def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, past_key_values_length=0, training=False): assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.cast(tf.fill(dims=input_shape, value=0), tf.int64) if position_ids is None: if input_ids is not None: position_ids = self.create_position_ids_from_input_ids(input_ids=input_ids, past_key_values_length=past_key_values_length) else: position_ids = tf.expand_dims(tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1, dtype=tf.int64), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings
Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor.
github-repos
def get_cartesian(self): def create_cartesian(positions, row): xyz_frame = pd.DataFrame(columns=['atom', 'x', 'y', 'z'], index=self.index[:row], dtype='f8') xyz_frame['atom'] = self.loc[xyz_frame.index, 'atom'] xyz_frame.loc[:, ['x', 'y', 'z']] = positions[:row] from chemcoord.cartesian_coordinates.cartesian_class_main \ import Cartesian cartesian = Cartesian(xyz_frame, metadata=self.metadata) return cartesian c_table = self.loc[:, ['b', 'a', 'd']] c_table = c_table.replace(constants.int_label) c_table = c_table.replace({k: v for v, k in enumerate(c_table.index)}) c_table = c_table.values.astype('i8').T C = self.loc[:, ['bond', 'angle', 'dihedral']].values.T C[[1, 2], :] = np.radians(C[[1, 2], :]) err, row, positions = transformation.get_X(C, c_table) positions = positions.T if err == ERR_CODE_InvalidReference: rename = dict(enumerate(self.index)) i = rename[row] b, a, d = self.loc[i, ['b', 'a', 'd']] cartesian = create_cartesian(positions, row) raise InvalidReference(i=i, b=b, a=a, d=d, already_built_cartesian=cartesian) elif err == ERR_CODE_OK: return create_cartesian(positions, row + 1)
Return the molecule in cartesian coordinates. Raises an :class:`~exceptions.InvalidReference` exception, if the reference of the i-th atom is undefined. Args: None Returns: Cartesian: Reindexed version of the zmatrix.
juraj-google-style
def account_id(self, value): if (type(value) is not str): raise TypeError('commit value must be string') self._account_id = value
Sets the current account id Args: value: current account id (string) Returns: None
codesearchnet
def format_variant(variant, variant_type='snv'): chrom = variant.get('chrom') pos = variant.get('start') ref = variant.get('ref') alt = variant.get('alt') if variant_type == 'sv': pos = int((variant['pos_left'] + variant['pos_right'])/2) ref = 'N' alt = f"<{variant['sv_type']}>" info = None info = format_info(variant, variant_type=variant_type) variant_line = f"{chrom}\t{pos}\t.\t{ref}\t{alt}\t.\t.\t{info}" return variant_line
Convert variant information to a VCF formated string Args: variant(dict) variant_type(str) Returns: vcf_variant(str)
juraj-google-style
def connect_tcp(cls, host, port, echo=False): return cls(TCPClientSocketChannel(host, port), echo=echo)
Set up a :class:`TCPClientSocketChannel` and create a :class:`Flow` instance for it. Args: host(str): The hostname or IP address to connect to. port(int): The port number to connect to. echo(bool): Whether to echo read/written data to stdout by default. Returns: :class:`Flow`: A Flow instance initialised with the TCP socket channel.
juraj-google-style
def to_tensor(self): return tf.stack((self.year(), self.month(), self.day()), axis=-1)
Packs the dates into a single Tensor. The Tensor has shape `date_tensor.shape() + (3,)`, where the last dimension represents years, months and days, in this order. This can be convenient when the dates are the final result of a computation in the graph mode: a `tf.function` can return `date_tensor.to_tensor()`, or, if one uses `tf.compat.v1.Session`, they can call `session.run(date_tensor.to_tensor())`. Returns: A Tensor of shape `date_tensor.shape() + (3,)`. #### Example ```python dates = tff.datetime.dates_from_tuples([(2019, 1, 25), (2020, 3, 2)]) dates.to_tensor() # tf.Tensor with contents [[2019, 1, 25], [2020, 3, 2]]. ```
github-repos
def validate(self, data): user = self._confirmation.email.user if (app_settings.EMAIL_VERIFICATION_PASSWORD_REQUIRED and (not user.check_password(data['password']))): raise serializers.ValidationError(_('The provided password is invalid.')) data['email'] = self._confirmation.email.email return data
Validate the provided data. Returns: dict: The validated data. Raises: serializers.ValidationError: If the provided password is invalid.
codesearchnet
def _keys(self, pattern): result = [] for client in self.redis_clients: result.extend(list(client.scan_iter(match=pattern))) return result
Execute the KEYS command on all Redis shards. Args: pattern: The KEYS pattern to query. Returns: The concatenated list of results from all shards.
juraj-google-style
def index(self, name=None): try: return self.header.index(name) except ValueError: raise TableError('Unknown index name %s.' % name)
Returns index number of supplied column name. Args: name: string of column name. Raises: TableError: If name not found. Returns: Index of the specified header entry.
juraj-google-style
def load_map_coordinates(map_file): if (map_file[(- 4):] == '.pkl'): map_data = pickle.load(open(map_file)) lon = map_data['lon'] lat = map_data['lat'] else: map_data = Dataset(map_file) if ('lon' in map_data.variables.keys()): lon = map_data.variables['lon'][:] lat = map_data.variables['lat'][:] else: lon = map_data.variables['XLONG'][0] lat = map_data.variables['XLAT'][0] return (lon, lat)
Loads map coordinates from netCDF or pickle file created by util.makeMapGrids. Args: map_file: Filename for the file containing coordinate information. Returns: Latitude and longitude grids as numpy arrays.
codesearchnet
def Export(self): data = bytearray(38) data[0] = 128 data[1:33] = self.PrivateKey[0:32] data[33] = 1 checksum = Crypto.Default().Hash256(data[0:34]) data[34:38] = checksum[0:4] b58 = base58.b58encode(bytes(data)) return b58.decode('utf-8')
Export this KeyPair's private key in WIF format. Returns: str: The key in wif format
codesearchnet
def multi(self, **kwargs): path = self._get_path('multi') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Search the movie, tv show and person collections with a single query. Args: query: CGI escpaed string. page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. include_adult: (optional) Toggle the inclusion of adult titles. Expected value is True or False. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def compare_checkpoints(self, attr_mean): if (self._cmp_greater and (attr_mean > self.best_checkpoint_attr_value)): return True elif ((not self._cmp_greater) and (attr_mean < self.best_checkpoint_attr_value)): return True return False
Compares two checkpoints based on the attribute attr_mean param. Greater than is used by default. If command-line parameter checkpoint_score_attr starts with "min-" less than is used. Arguments: attr_mean: mean of attribute value for the current checkpoint Returns: True: when attr_mean is greater than previous checkpoint attr_mean and greater than function is selected when attr_mean is less than previous checkpoint attr_mean and less than function is selected False: when attr_mean is not in alignment with selected cmp fn
codesearchnet
def get_username(self, userid): username = self.user_map.get(userid) if (not username): users = self.get_users() if users: members = {m['id']: m['name'] for m in users.get('members', [{}]) if (m.get('id') and m.get('name'))} if members: self.user_map.update(members) username = self.user_map.get(userid, userid) return username
Perform a lookup of users to resolve a userid to a username Args: userid (string): Slack userid to lookup. Returns: string: Human-friendly name of the user
codesearchnet
def remove_species(self, species): new_sites = [] species = [get_el_sp(sp) for sp in species] for site in self._sites: new_sp_occu = {sp: amt for sp, amt in site.species.items() if sp not in species} if len(new_sp_occu) > 0: new_sites.append(Site(new_sp_occu, site.coords, properties=site.properties)) self._sites = new_sites
Remove all occurrences of a species from a molecule. Args: species: Species to remove.
juraj-google-style
def probabilistic_collocation(order, dist, subset=.1): abscissas, weights = chaospy.quad.collection.golub_welsch(order, dist) likelihood = dist.pdf(abscissas) alpha = numpy.random.random(len(weights)) alpha = likelihood > alpha*subset*numpy.max(likelihood) abscissas = abscissas.T[alpha].T weights = weights[alpha] return abscissas, weights
Probabilistic collocation method. Args: order (int, numpy.ndarray) : Quadrature order along each axis. dist (Dist) : Distribution to generate samples from. subset (float) : Rate of which to removed samples.
juraj-google-style
def create(cls, name, config=None, kind="spark"): conn = Qubole.agent() return conn.post(cls.rest_entity_path, data={'name': name, 'config': config, 'kind': kind})
Create a new app. Args: `name`: the name of the app `config`: a dictionary of key-value pairs `kind`: kind of the app (default=spark)
juraj-google-style
def get_col_info(table_name, col_name, meta_file): with open(meta_file, 'r') as f: meta = json.load(f) data_table, table = load_data_table(table_name, meta_file, meta) for field in table['fields']: if field['name'] == col_name: col_meta = field col = data_table[col_name] return (col, col_meta)
Return the content and metadata of a fiven column. Args: table_name(str): Name of the table. col_name(str): Name of the column. meta_file(str): Path to the meta.json file. Returns: tuple(pandas.Series, dict)
juraj-google-style
def _kl_categorical_categorical(a, b, name=None): with tf.name_scope(name or "kl_categorical_categorical"): return tf.reduce_sum( input_tensor=tf.nn.softmax(a.logits) * (tf.nn.log_softmax(a.logits) - tf.nn.log_softmax(b.logits)), axis=-1)
Calculate the batched KL divergence KL(a || b) with a, b OneHotCategorical. Args: a: instance of a OneHotCategorical distribution object. b: instance of a OneHotCategorical distribution object. name: (optional) Name to use for created operations. default is "kl_categorical_categorical". Returns: Batchwise KL(a || b)
juraj-google-style
def _init_project_service(self, version): project_cfg = self._load_config_section(CONFIG_PROJECT_SECTION) self._token_project = project_cfg[CONFIG_TOKEN] proto = project_cfg[CONFIG_PROTOCOL] host = project_cfg[CONFIG_HOST] self._project = ProjectService(host, version) self._project.base_protocol = proto self._project.set_auth(self._token_project)
Method to initialize the Project Service from the config data Args: version (string): Version of Boss API to use. Returns: None Raises: (KeyError): if given invalid version.
juraj-google-style
def _get_test_methods(self, test_names): test_methods = [] for test_name in test_names: if test_name.startswith(TEST_SELECTOR_REGEX_PREFIX): regex_matching_methods = self._get_regex_matching_test_methods(test_name.removeprefix(TEST_SELECTOR_REGEX_PREFIX)) test_methods += regex_matching_methods continue self._assert_valid_test_name(test_name) if test_name not in self.get_existing_test_names(): raise Error(f'{self.TAG} does not have test method {test_name}.') if hasattr(self, test_name): test_method = getattr(self, test_name) elif test_name in self._generated_test_table: test_method = self._generated_test_table[test_name] test_methods.append((test_name, test_method)) return test_methods
Resolves test method names to bound test methods. Args: test_names: A list of strings, each string is a test method name or a regex for matching test names. Returns: A list of tuples of (string, function). String is the test method name, function is the actual python method implementing its logic. Raises: Error: The test name does not follow naming convention 'test_*'. This can only be caused by user input.
github-repos
class TvltProcessor(ProcessorMixin): attributes = ['image_processor', 'feature_extractor'] image_processor_class = 'TvltImageProcessor' feature_extractor_class = 'TvltFeatureExtractor' def __init__(self, image_processor, feature_extractor): super().__init__(image_processor=image_processor, feature_extractor=feature_extractor) self.image_processor = image_processor self.feature_extractor = feature_extractor def __call__(self, images=None, audio=None, images_mixed=None, sampling_rate=None, mask_audio=False, mask_pixel=False, *args, **kwargs): if images is None and audio is None: raise ValueError('You need to specify either an `images` or `audio` input to process.') images_mixed_dict = None if images is not None: images_dict = self.image_processor(images, *args, mask_pixel=mask_pixel, **kwargs) if images_mixed is not None: images_mixed_dict = self.image_processor(images_mixed, *args, is_mixed=True, **kwargs) if audio is not None: audio_dict = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, mask_audio=mask_audio, **kwargs) output_dict = {} if audio is not None: output_dict.update(audio_dict) if images is not None: output_dict.update(images_dict) if images_mixed_dict is not None: output_dict.update(images_mixed_dict) return output_dict @property def model_input_names(self): image_processor_input_names = self.image_processor.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
Constructs a TVLT processor which wraps a TVLT image processor and TVLT feature extractor into a single processor. [`TvltProcessor`] offers all the functionalities of [`TvltImageProcessor`] and [`TvltFeatureExtractor`]. See the docstring of [`~TvltProcessor.__call__`] for more information. Args: image_processor (`TvltImageProcessor`): An instance of [`TvltImageProcessor`]. The image processor is a required input. feature_extractor (`TvltFeatureExtractor`): An instance of [`TvltFeatureExtractor`]. The feature extractor is a required input.
github-repos
def item_at(self, row, column): return self.children[str(row)].children[str(column)]
Returns the TableItem instance at row, column cordinates Args: row (int): zero based index column (int): zero based index
codesearchnet
def upload_benchmark_run(self, dataset_name, table_name, run_id): expected_file = os.path.join(self._logging_dir, logger.BENCHMARK_RUN_LOG_FILE_NAME) with tf.gfile.GFile(expected_file) as f: benchmark_json = json.load(f) benchmark_json['model_id'] = run_id table_ref = self._bq_client.dataset(dataset_name).table(table_name) errors = self._bq_client.insert_rows_json(table_ref, [benchmark_json]) if errors: tf.logging.error('Failed to upload benchmark info to bigquery: {}'.format(errors))
Upload benchmark run information to Bigquery. Args: dataset_name: string, the name of bigquery dataset where the data will be uploaded. table_name: string, the name of bigquery table under the dataset where the data will be uploaded. run_id: string, a unique ID that will be attached to the data, usually this is a UUID4 format.
codesearchnet
def get_module_object_and_name(globals_dict): name = globals_dict.get('__name__', None) module = sys.modules.get(name, None) return _ModuleObjectAndName(module, (sys.argv[0] if (name == '__main__') else name))
Returns the module that defines a global environment, and its name. Args: globals_dict: A dictionary that should correspond to an environment providing the values of the globals. Returns: _ModuleObjectAndName - pair of module object & module name. Returns (None, None) if the module could not be identified.
codesearchnet
def from_config(cls, config): return cls(**config)
Creates TFGPT2Tokenizer from configurations Args: config (Dict): Dictionary with keys such as stated in `get_config`.
github-repos
def data_to_unicode(self, data): if isinstance(data, dict): return {self.to_unicode(k): self.to_unicode(v) for (k, v) in data.iteritems()} if isinstance(data, list): return [self.to_unicode(l) for l in data] else: return self.to_unicode(data)
Recursively convert a list or dictionary to unicode. Args: data: The data to be unicoded. Returns: Unicoded data.
codesearchnet
def add_case(self, case_obj): for ind_obj in case_obj.individuals: self._add_individual(ind_obj) logger.debug("Adding case {0} to plugin".format(case_obj.case_id)) self.case_objs.append(case_obj) if case_obj.tabix_index: logger.debug("Setting filters.can_filter_range to True") self.filters.can_filter_range = True
Add a case obj with individuals to adapter Args: case_obj (puzzle.models.Case)
juraj-google-style
def __init__(self, agent, environment, repeat_actions=1, history=None, id_=0): super(Runner, self).__init__(agent, environment, repeat_actions, history) self.id = id_ self.current_timestep = None
Initialize a single Runner object (one Agent/one Environment). Args: id_ (int): The ID of this Runner (for distributed TF runs).
juraj-google-style
def __parse_hgvs_syntax(self, aa_hgvs): self.is_valid = True self.is_synonymous = False if self.unknown_effect or self.is_no_protein: self.pos = None pass elif self.is_lost_stop: self.initial = aa_hgvs[0] self.mutated = re.findall('([A-Z?*]+)$', aa_hgvs)[0] self.pos = int(re.findall('^\*(\d+)', aa_hgvs)[0]) self.stop_pos = None elif self.is_lost_start: self.initial = aa_hgvs[0] self.mutated = aa_hgvs[-1] self.pos = int(aa_hgvs[1:-1]) elif self.is_missense: self.initial = aa_hgvs[0] self.mutated = aa_hgvs[-1] self.pos = int(aa_hgvs[1:-1]) self.stop_pos = None if self.initial == self.mutated: self.is_synonymous = True self.is_non_silent = False elif self.mutated == '*': self.is_nonsense_mutation = True elif self.is_indel: if self.is_insertion: if not self.is_missing_info: self.initial = re.findall('([A-Z])\d+', aa_hgvs)[:2] self.pos = tuple(map(int, re.findall('[A-Z](\d+)', aa_hgvs)[:2])) self.mutated = re.findall('(?<=INS)[A-Z0-9?*]+', aa_hgvs)[0] self.mutated = self.mutated.strip('?') else: self.initial = '' self.pos = tuple() self.mutated = '' elif self.is_deletion: if not self.is_missing_info: self.initial = re.findall('([A-Z])\d+', aa_hgvs) self.pos = tuple(map(int, re.findall('[A-Z](\d+)', aa_hgvs))) self.mutated = re.findall('(?<=DEL)[A-Z]*', aa_hgvs)[0] else: self.initial = '' self.pos = tuple() self.mutated = '' elif self.is_frame_shift: self.initial = aa_hgvs[0] self.mutated = '' try: self.pos = int(re.findall('[A-Z*](\d+)', aa_hgvs)[0]) if self.is_premature_stop_codon: self.stop_pos = int(re.findall('\*>?(\d+)$', aa_hgvs)[0]) else: self.stop_pos = None except IndexError: self.logger.debug('(Parsing-Problem) frame shift hgvs string: "%s"' % aa_hgvs) self.pos = None self.stop_pos = None self.is_missing_info = True elif self.is_nonsense_mutation: self.initial = aa_hgvs[0] self.mutated = '*' self.stop_pos = 0 try: self.pos = int(aa_hgvs[1:-1]) except ValueError: self.is_valid = False self.pos = None self.logger.debug('(Parsing-Problem) Invalid HGVS Amino Acid ' 'syntax: ' + aa_hgvs) if self.initial == self.mutated: self.is_synonymous = True self.is_non_silent = False else: self.is_valid = False self.logger.debug('(Parsing-Problem) Invalid HGVS Amino Acid ' 'syntax: ' + aa_hgvs)
Convert HGVS syntax for amino acid change into attributes. Specific details of the mutation are stored in attributes like self.intial (prior to mutation), sel.pos (mutation position), self.mutated (mutation), and self.stop_pos (position of stop codon, if any). Args: aa_hgvs (str): amino acid string following HGVS syntax
juraj-google-style
def command(task: Task, command: str) -> Result: cmd = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False) (stdout, stderr) = cmd.communicate() stdout = stdout.decode() stderr = stderr.decode() if cmd.poll(): raise CommandError(command, cmd.returncode, stdout, stderr) result = (stderr if stderr else stdout) return Result(result=result, host=task.host, stderr=stderr, stdout=stdout)
Executes a command locally Arguments: command: command to execute Returns: Result object with the following attributes set: * result (``str``): stderr or stdout * stdout (``str``): stdout * stderr (``str``): stderr Raises: :obj:`nornir.core.exceptions.CommandError`: when there is a command error
codesearchnet
def universal_transformer_depthwise_attention(layer_inputs, step, hparams, ffn_unit, attention_unit): (_, inputs, memory) = layer_inputs all_states = memory if hparams.depth_embedding: all_states = add_depth_embedding(all_states) states_so_far = all_states[(:step, :, :, :)] states_so_far_weights = tf.nn.softmax(common_layers.dense(states_so_far, (hparams.hidden_size if hparams.dwa_elements else 1), activation=None, use_bias=True), axis=(- 1)) state_to_be_transformed = tf.reduce_sum((states_so_far * states_so_far_weights), axis=0) new_state = step_preprocess(state_to_be_transformed, step, hparams) for i in range(hparams.num_inrecurrence_layers): with tf.variable_scope(('rec_layer_%d' % i)): new_state = ffn_unit(attention_unit(new_state)) memory = fill_memory_slot(memory, new_state, (step + 1)) return (new_state, inputs, memory)
universal_transformer with depth-wise attention. It uses an attention mechanism-flipped vertically- over all the states from previous steps to generate the new_state. Args: layer_inputs: - state: state - memory: contains states from all the previous steps. step: indicating number of steps take so far hparams: model hyper-parameters. ffn_unit: feed-forward unit attention_unit: multi-head attention unit Returns: layer_output: new_state: new state memory: contains states from all the previous steps.
codesearchnet
def get_reversed_statuses(context): _rev = {v: k for k, v in STATUSES.items()} _rev.update(dict(context.config['reversed_statuses'])) return _rev
Return a mapping of exit codes to status strings. Args: context (scriptworker.context.Context): the scriptworker context Returns: dict: the mapping of exit codes to status strings.
juraj-google-style
def build_mount_env(source, mounts): return '\n'.join([ 'export {0}={1}/{2}'.format(var.name, source.rstrip('/'), var.docker_path.rstrip('/')) for var in mounts ])
Return a multi-line string with export statements for the variables. Arguments: source: Folder with the data. For example /mnt/data mounts: a list of MountParam Returns: a multi-line string with a shell script that sets environment variables corresponding to the mounts.
juraj-google-style
def parse(lines, root=None): doc = {} entries = [] name = None total = None for line in lines: line = line.strip() if (not line): continue if (line and (line[0] == '/') and (line[(- 1)] == ':')): if (name is None): name = line[:(- 1)] if entries: d = Directory(name, (total or len(entries)), entries) doc[root] = d total = None entries = [] else: d = Directory(name, (total or len(entries)), entries) doc[(name or root)] = d total = None entries = [] name = line[:(- 1)] continue if line.startswith('total'): total = int(line.split(None, 1)[1]) continue entries.append(line) name = (name or root) doc[name] = Directory(name, (total or len(entries)), entries) return doc
Parses a list of lines from ls into dictionaries representing their components. Args: lines (list): A list of lines generated by ls. root (str): The directory name to be used for ls output stanzas that don't have a name. Returns: A dictionary representing the ls output. It's keyed by the path containing each ls stanza.
codesearchnet
def recipe_dbm(config, auth_read, report, delete): dbm(config, {'auth': auth_read, 'report': report, 'delete': delete})
Create a DV360 report. Args: auth_read (authentication) - Credentials used for reading data. report (json) - Report body and filters. delete (boolean) - If report exists, delete it before creating a new one.
github-repos
def output(self, _filename): for contract in self.slither.contracts_derived: txt = "\nContract %s"%contract.name table = PrettyTable(["Function", "Modifiers"]) for function in contract.functions: modifiers = function.modifiers for call in function.all_internal_calls(): if isinstance(call, Function): modifiers += call.modifiers for (_, call) in function.all_library_calls(): if isinstance(call, Function): modifiers += call.modifiers table.add_row([function.name, [m.name for m in set(modifiers)]]) txt += "\n"+str(table) self.info(txt)
_filename is not used Args: _filename(string)
juraj-google-style
def _ProduceSingleContent(self, mod, showprivate=False, showinh=False): try: all = mod[1].__all__ except AttributeError: raise RuntimeError(('Module (%s) MUST have `__all__` defined.' % mod[1].__name__)) try: name = mod[1].__displayname__ except AttributeError: name = mod[0] try: category = mod[1].__category__ self.__categories.setdefault(category, 0) self.__categories[category] += 1 except AttributeError: pass feats = inspect.getmembers(mod[1]) fname = (('content/' + mod[1].__name__.replace('.', '/').replace(' ', '-')) + '.rst') feats = [f for f in feats if ((f[0] in all) and (showprivate or (not (f[0][0:1] == '_'))))] with open(fname, 'w') as fid: fid.write(Classifier.GetModuleText(name, mod[1].__name__, showprivate=showprivate)) for f in feats: if (inspect.isclass(f[1]) or inspect.isfunction(f[1])): try: featname = f[1].__displayname__ except AttributeError: featname = f[1].__name__ try: category = f[1].__category__ self.__categories.setdefault(category, 0) self.__categories[category] += 1 except AttributeError: pass if inspect.isclass(f[1]): fid.write(Classifier.GetClassText(featname, ('%s.%s' % (mod[1].__name__, f[1].__name__)), showprivate=showprivate, showinh=showinh)) elif inspect.isfunction(f[1]): fid.write(Classifier.GetFunctionText(featname, ('%s.%s' % (mod[1].__name__, f[1].__name__)))) fid.close() return ('\n %s' % fname.split('/')[(- 1)])
An internal helper to create a page for a single module. This will automatically generate the needed RSF to document the module and save the module to its own page in its appropriate location. Args: mod (module): The single module to document as its own page showprivate (bool): A flag for whether or not to display private members Returns: str: The file name ready to be appended to a toctree
codesearchnet
def open_street_map_geoloc_link(data): if isinstance(data, str): lat_lon = ip_geoloc(data) if (lat_lon is None): return '' (lat, lon) = lat_lon else: (lat, lon) = data return ('https:
Get a link to open street map pointing on this IP's geolocation. Args: data (str/tuple): IP address or (latitude, longitude). Returns: str: a link to open street map pointing on this IP's geolocation.
codesearchnet
def image(array, domain=None, width=None, format='png', **kwargs): image_data = serialize_array(array, fmt=format, domain=domain) image = IPython.display.Image(data=image_data, format=format, width=width) IPython.display.display(image)
Display an image. Args: array: NumPy array representing the image fmt: Image format e.g. png, jpeg domain: Domain of pixel values, inferred from min & max values if None w: width of output image, scaled using nearest neighbor interpolation. size unchanged if None
juraj-google-style
def uniquelines(q): setoflines = set() for facets in q: for line in itertools.combinations(facets, 2): setoflines.add(tuple(sorted(line))) return setoflines
Given all the facets, convert it into a set of unique lines. Specifically used for converting convex hull facets into line pairs of coordinates. Args: q: A 2-dim sequence, where each row represents a facet. E.g., [[1,2,3],[3,6,7],...] Returns: setoflines: A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
codesearchnet
def import_image_from_url(self, url, repository=None, tag=None, changes=None): return self.import_image( src=url, repository=repository, tag=tag, changes=changes )
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only supports importing from a URL. Args: url (str): A URL pointing to a tar file. repository (str): The repository to create tag (str): The tag to apply
juraj-google-style
def __init__(self, threshold=1e-3, symprec=0.1, **kwargs): self._kwargs = kwargs self._sp = SubstitutionProbability(**kwargs) self._threshold = threshold self._symprec = symprec
This substitutor uses the substitution probability class to find good substitutions for a given chemistry or structure. Args: threshold: probability threshold for predictions symprec: symmetry precision to determine if two structures are duplicates kwargs: kwargs for the SubstitutionProbability object lambda_table, alpha
juraj-google-style
def _VerifyValues(self, input_sizes=None, filter_sizes=None, out_backprop_sizes=None, strides=None, dilations=None, padding=None, data_format_src='NHWC', data_format_dst='NHWC', expected=None): total_size_1 = np.prod(filter_sizes) total_size_2 = np.prod(out_backprop_sizes) x1 = np.arange(1, total_size_1 + 1, dtype=np.float32).reshape(filter_sizes) x2 = np.arange(1, total_size_2 + 1, dtype=np.float32).reshape(out_backprop_sizes) strides = [1] + strides + [1] if dilations is not None: dilations = [1] + dilations + [1] expected = np.reshape(expected, input_sizes) expected = test_utils.ConvertBetweenDataFormats(expected, data_format_src, data_format_dst) x2 = test_utils.ConvertBetweenDataFormats(x2, data_format_src, data_format_dst) input_sizes = test_utils.PermuteDimsBetweenDataFormats(input_sizes, data_format_src, data_format_dst) out_backprop_sizes = test_utils.PermuteDimsBetweenDataFormats(out_backprop_sizes, data_format_src, data_format_dst) strides = test_utils.PermuteDimsBetweenDataFormats(strides, data_format_src, data_format_dst) if dilations is not None: dilations = test_utils.PermuteDimsBetweenDataFormats(dilations, data_format_src, data_format_dst) with self.session() as sess: t1 = array_ops.placeholder(dtypes.float32, shape=filter_sizes) t2 = array_ops.placeholder(dtypes.float32, shape=out_backprop_sizes) with self.test_scope(): out = gen_nn_ops.conv2d_backprop_input(input_sizes=input_sizes, filter=t1, out_backprop=t2, strides=strides, dilations=dilations, padding=padding, data_format=data_format_dst) value = sess.run(out, {t1: x1, t2: x2}) self.assertAllEqual(input_sizes, value.shape) self.assertAllClose(expected, value, 0.001)
Tests that gen_nn_ops.conv2d_backprop_input produces the expected output. Args: input_sizes: Input tensor dimensions in [batch, input_rows, input_cols, input_depth]. filter_sizes: Filter tensor dimensions in [kernel_rows, kernel_cols, input_depth, output_depth]. out_backprop_sizes: Output gradients tensor dimensions. strides: Strides. dilations: Dilations. padding: Padding type. data_format_src: Data format input is in. data_format_dst: Data format verification will run and input is converted to. expected: Expected output.
github-repos
def key_changes(self, from_token, to_token): params = {'from': from_token, 'to': to_token} return self._send('GET', '/keys/changes', query_params=params)
Gets a list of users who have updated their device identity keys. Args: from_token (str): The desired start point of the list. Should be the next_batch field from a response to an earlier call to /sync. to_token (str): The desired end point of the list. Should be the next_batch field from a recent call to /sync - typically the most recent such call.
codesearchnet
def get_unit_by_id(self, unit_id: str) -> typing.Optional['BaseUnit']: VALID_POSITIVE_INT.validate(unit_id, 'get_unit_by_id') for unit in self.units: if (unit.unit_id == unit_id): return unit return None
Gets a unit from its ID Args: unit_id: unit id Returns: Unit
codesearchnet
def _Check3DImage(image, require_static=True): try: image_shape = image.get_shape().with_rank(3) except ValueError: raise ValueError('\'image\' must be three-dimensional.') if require_static and not image_shape.is_fully_defined(): raise ValueError('\'image\' must be fully defined.') if any(x == 0 for x in image_shape): raise ValueError('all dims of \'image.shape\' must be > 0: %s' % image_shape)
Assert that we are working with properly shaped image. Args: image: 3-D Tensor of shape [height, width, channels] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if image.shape is not a [3] vector.
juraj-google-style
def nice_join(seq, sep=', ', conjunction='or'): seq = [str(x) for x in seq] if ((len(seq) <= 1) or (conjunction is None)): return sep.join(seq) else: return ('%s %s %s' % (sep.join(seq[:(- 1)]), conjunction, seq[(- 1)]))
Join together sequences of strings into English-friendly phrases using a conjunction when appropriate. Args: seq (seq[str]) : a sequence of strings to nicely join sep (str, optional) : a sequence delimiter to use (default: ", ") conjunction (str or None, optional) : a conjunction to use for the last two items, or None to reproduce basic join behavior (default: "or") Returns: a joined string Examples: >>> nice_join(["a", "b", "c"]) 'a, b or c'
codesearchnet
def _compute_nfps_real(counts, sizes): nfps = np.zeros((len(sizes), len(sizes))) for l in range(len(sizes)): for u in range(l, len(sizes)): nfps[(l, u)] = _compute_nfp_real(l, u, counts, sizes) return nfps
Computes the matrix of expected false positives for all possible sub-intervals of the complete domain of set sizes. Args: counts: the complete distribution of set sizes. sizes: the complete domain of set sizes. Return (np.array): the 2-D array of expected number of false positives for every pair of [l, u] interval, where l is axis-0 and u is axis-1.
codesearchnet
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') preferred_language = cls._ParseStringOption( options, 'preferred_language', default_value='en-US') setattr(configuration_object, '_preferred_language', preferred_language)
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
juraj-google-style
def _sequence_like(instance, args): return nest_util.sequence_like(instance, args)
Converts the sequence `args` to the same type as `instance`. Args: instance: an instance of `tuple`, `list`, `namedtuple`, `dict`, `collections.OrderedDict`, or `composite_tensor.Composite_Tensor` or `type_spec.TypeSpec`. args: items to be converted to the `instance` type. Returns: `args` with the type of `instance`.
github-repos
def assertAllGreaterEqual(self, a, comparison_target): a, comparison_target = self.evaluate_if_both_tensors(a, comparison_target) a = self._GetNdArray(a) self.assertGreaterEqual(np.min(a), comparison_target)
Assert element values are all greater than or equal to a target value. Args: a: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). comparison_target: The target value of comparison.
github-repos
def l1_regularizer(weight=1.0, scope=None): def regularizer(tensor): with tf.name_scope(scope, 'L1Regularizer', [tensor]): l1_weight = tf.convert_to_tensor(weight, dtype=tensor.dtype.base_dtype, name='weight') return tf.multiply(l1_weight, tf.reduce_sum(tf.abs(tensor)), name='value') return regularizer
Define a L1 regularizer. Args: weight: scale the loss by this factor. scope: Optional scope for name_scope. Returns: a regularizer function.
juraj-google-style
def get_registered_object(name, custom_objects=None, module_objects=None): if name in _GLOBAL_CUSTOM_OBJECTS: return _GLOBAL_CUSTOM_OBJECTS[name] elif custom_objects and name in custom_objects: return custom_objects[name] elif module_objects and name in module_objects: return module_objects[name] return None
Returns the class associated with `name` if it is registered with Keras. This function is part of the Keras serialization and deserialization framework. It maps strings to the objects associated with them for serialization/deserialization. Example: ``` def from_config(cls, config, custom_objects=None): if 'my_custom_object_name' in config: config['hidden_cls'] = tf.keras.utils.get_registered_object( config['my_custom_object_name'], custom_objects=custom_objects) ``` Args: name: The name to look up. custom_objects: A dictionary of custom objects to look the name up in. Generally, custom_objects is provided by the user. module_objects: A dictionary of custom objects to look the name up in. Generally, module_objects is provided by midlevel library implementers. Returns: An instantiable class associated with 'name', or None if no such class exists.
github-repos
def run_attack_work(self, work_id): adv_batch_id = ( self.attack_work.work[work_id]['output_adversarial_batch_id']) adv_batch = self.adv_batches[adv_batch_id] dataset_batch_id = adv_batch['dataset_batch_id'] submission_id = adv_batch['submission_id'] epsilon = self.dataset_batches[dataset_batch_id]['epsilon'] logging.info('Attack work piece: ' 'dataset_batch_id="%s" submission_id="%s" ' 'epsilon=%d', dataset_batch_id, submission_id, epsilon) if submission_id in self.blacklisted_submissions: raise WorkerError('Blacklisted submission') attack = AttackSubmission(submission_id, self.submissions, self.storage_bucket) attack.download() input_dir = os.path.join(LOCAL_DATASET_DIR, dataset_batch_id) if attack.type == TYPE_TARGETED: target_class_filename = os.path.join(input_dir, 'target_class.csv') self.dataset_meta.save_target_classes_for_batch(target_class_filename, self.dataset_batches, dataset_batch_id) if os.path.exists(LOCAL_OUTPUT_DIR): sudo_remove_dirtree(LOCAL_OUTPUT_DIR) os.mkdir(LOCAL_OUTPUT_DIR) if os.path.exists(LOCAL_PROCESSED_OUTPUT_DIR): shutil.rmtree(LOCAL_PROCESSED_OUTPUT_DIR) os.mkdir(LOCAL_PROCESSED_OUTPUT_DIR) if os.path.exists(LOCAL_ZIPPED_OUTPUT_DIR): shutil.rmtree(LOCAL_ZIPPED_OUTPUT_DIR) os.mkdir(LOCAL_ZIPPED_OUTPUT_DIR) elapsed_time_sec = attack.run(input_dir, LOCAL_OUTPUT_DIR, epsilon) if attack.type == TYPE_TARGETED: os.remove(target_class_filename) image_hashes = eval_lib.enforce_epsilon_and_compute_hash( input_dir, LOCAL_OUTPUT_DIR, LOCAL_PROCESSED_OUTPUT_DIR, epsilon) if not image_hashes: logging.warning('No images saved by the attack.') return elapsed_time_sec, submission_id for clean_image_id, hash_val in iteritems(image_hashes): adv_img_id = adv_batch_id + '_' + clean_image_id os.rename( os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, clean_image_id + '.png'), os.path.join(LOCAL_PROCESSED_OUTPUT_DIR, adv_img_id + '.png')) image_path = '{0}/adversarial_images/{1}/{1}.zip/{2}.png'.format( self.round_name, adv_batch_id, adv_img_id) adv_batch['images'][adv_img_id] = { 'clean_image_id': u'' + str(clean_image_id), 'image_path': u'' + str(image_path), 'image_hash': u'' + str(hash_val), } zipped_images_filename = os.path.join(LOCAL_ZIPPED_OUTPUT_DIR, adv_batch_id + '.zip') try: logging.debug('Compressing adversarial images to %s', zipped_images_filename) shell_call([ 'zip', '-j', '-r', zipped_images_filename, LOCAL_PROCESSED_OUTPUT_DIR]) except subprocess.CalledProcessError as e: raise WorkerError('Can''t make archive from adversarial iamges', e) dst_filename = '{0}/adversarial_images/{1}/{1}.zip'.format( self.round_name, adv_batch_id) logging.debug( 'Copying archive with adversarial images to %s', dst_filename) self.storage_client.new_blob(dst_filename).upload_from_filename( zipped_images_filename) logging.debug('Writing adversarial batch to datastore') self.adv_batches.write_single_batch_images_to_datastore(adv_batch_id) return elapsed_time_sec, submission_id
Runs one attack work. Args: work_id: ID of the piece of work to run Returns: elapsed_time_sec, submission_id - elapsed time and id of the submission Raises: WorkerError: if error occurred during execution.
juraj-google-style
def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False): def yield_csv(csv_contents, csv_file): try: for line in csv_contents: (yield line) finally: try: csv_file.close() except: pass def process_csv(csv_contents, csv_file): return [line for line in yield_csv(csv_contents, csv_file)] if file_contents: csv_file = BytesIO(file_contents) else: csv_file = open(file_name, 'rb') reader = csv.reader(csv_file, dialect=csv.excel, encoding=encoding) if on_demand: table = yield_csv(reader, csv_file) else: table = process_csv(reader, csv_file) return [table]
Gets good old csv data from a file. Args: file_name: The name of the local file, or the holder for the extension type when the file_contents are supplied. encoding: Loads the file with the specified cell encoding. file_contents: The file-like object holding contents of file_name. If left as None, then file_name is directly loaded. on_demand: Requests that a yielder be used in place of a full data copy.
codesearchnet
def calc_attribute_statistics(self, statistic_name): stats = {} for var, grids in self.attributes.items(): if len(grids) > 1: stats[var] = getattr(np.array([getattr(np.ma.array(x, mask=self.masks[t] == 0), statistic_name)() for t, x in enumerate(grids)]), statistic_name)() else: stats[var] = getattr(np.ma.array(grids[0], mask=self.masks[0] == 0), statistic_name)() return stats
Calculates summary statistics over the domains of each attribute. Args: statistic_name (string): numpy statistic, such as mean, std, max, min Returns: dict of statistics from each attribute grid.
juraj-google-style
def apply_transformations(collection, transformations, select=None): for t in transformations: kwargs = dict(t) func = kwargs.pop('name') cols = kwargs.pop('input', None) if isinstance(func, string_types): if (func in ('and', 'or')): func += '_' if (not hasattr(transform, func)): raise ValueError(("No transformation '%s' found!" % func)) func = getattr(transform, func) func(collection, cols, **kwargs) if (select is not None): transform.Select(collection, select) return collection
Apply all transformations to the variables in the collection. Args: transformations (list): List of transformations to apply. select (list): Optional list of names of variables to retain after all transformations are applied.
codesearchnet
def __call__(self, inputs: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, decoder_input_ids: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, freeze_feature_encoder: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if attention_mask is None: attention_mask = jnp.ones_like(inputs, dtype='i4') if decoder_input_ids is None: raise ValueError('`decoder_input_ids` cannot be `None`. For sequence to sequence training, `decoder_position_ids` must be specified as an input argument.') if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) if decoder_position_ids is None: batch_size, sequence_length = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {'dropout': dropout_rng} if dropout_rng is not None else {} return self.module.apply({'params': params or self.params}, inputs=jnp.array(inputs, dtype='f4'), attention_mask=jnp.array(attention_mask, dtype='i4'), decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, freeze_feature_encoder=freeze_feature_encoder, rngs=rngs)
Returns: Examples: ```python >>> from transformers import FlaxSpeechEncoderDecoderModel, AutoTokenizer >>> # load a fine-tuned wav2vec2-2-bart model >>> model = FlaxSpeechEncoderDecoderModel.from_pretrained("patrickvonplaten/wav2vec2-2-bart-large") >>> # load output tokenizer >>> tokenizer_output = AutoTokenizer.from_pretrained("facebook/bart-large") >>> inputs = jnp.ones((2, 5000), dtype=jnp.float32) >>> # use bart's special bos, pad and eos tokens >>> model.config.decoder_start_token_id = model.decoder.config.bos_token_id >>> model.config.pad_token_id = model.decoder.config.pad_token_id >>> model.config.eos_token_id = model.decoder.config.eos_token_id >>> outputs = model.generate(inputs) # Assert something? More interesting input? dtype correct? ```
github-repos
def __init__(self, initial_learning_rate, decay_steps, decay_rate, staircase=False, name=None): super(ExponentialDecay, self).__init__() self.initial_learning_rate = initial_learning_rate self.decay_steps = decay_steps self.decay_rate = decay_rate self.staircase = staircase self.name = name
Applies exponential decay to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The initial learning rate. decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Must be positive. See the decay computation above. decay_rate: A scalar `float32` or `float64` `Tensor` or a Python number. The decay rate. staircase: Boolean. If `True` decay the learning rate at discrete intervals name: String. Optional name of the operation. Defaults to 'ExponentialDecay'.
github-repos
def parse_config(data: dict) -> dict: return { 'email': data.get('email'), 'family': data['family_id'], 'samples': [{ 'id': sample_id, 'type': analysis_type, } for sample_id, analysis_type in data['analysis_type'].items()], 'config_path': data['config_file_analysis'], 'is_dryrun': True if 'dry_run_all' in data else False, 'log_path': data['log_file'], 'out_dir': data['outdata_dir'], 'priority': data['slurm_quality_of_service'], 'sampleinfo_path': data['sample_info_file'], }
Parse MIP config file. Args: data (dict): raw YAML input from MIP analysis config file Returns: dict: parsed data
juraj-google-style
def disable_eager_op_as_function(unused_msg: str) -> Callable[[_F], _F]: return _disable_test(execute_func=False)
Decorator for a function in a with_eager_op_as_function enabled test class. Blocks the function from being run with eager_op_as_function enabled. Args: unused_msg: Reason for disabling. Returns: The wrapped function with _disable_eager_op_as_function attr set to True.
github-repos
def collection(self, **kwargs): path = self._get_path('collection') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Search for collections by name. Args: query: CGI escpaed string. page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def _process_origin(self, req, resp, origin): if self._cors_config['allow_all_origins']: if self.supports_credentials: self._set_allow_origin(resp, origin) else: self._set_allow_origin(resp, '*') return True if (origin in self._cors_config['allow_origins_list']): self._set_allow_origin(resp, origin) return True regex = self._cors_config['allow_origins_regex'] if (regex is not None): if regex.match(origin): self._set_allow_origin(resp, origin) return True return False
Inspects the request and adds the Access-Control-Allow-Origin header if the requested origin is allowed. Returns: ``True`` if the header was added and the requested origin is allowed, ``False`` if the origin is not allowed and the header has not been added.
codesearchnet
def get_asn_whois(self, retry_count=3): try: conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) conn.settimeout(self.timeout) log.debug('ASN query for {0}'.format(self.address_str)) conn.connect((CYMRU_WHOIS, 43)) conn.send(( ' -r -a -c -p -f {0}{1}'.format( self.address_str, '\r\n') ).encode()) data = '' while True: d = conn.recv(4096).decode() data += d if not d: break conn.close() return str(data) except (socket.timeout, socket.error) as e: log.debug('ASN query socket error: {0}'.format(e)) if retry_count > 0: log.debug('ASN query retrying (count: {0})'.format( str(retry_count))) return self.get_asn_whois(retry_count - 1) else: raise ASNLookupError( 'ASN lookup failed for {0}.'.format(self.address_str) ) except: raise ASNLookupError( 'ASN lookup failed for {0}.'.format(self.address_str) )
The function for retrieving ASN information for an IP address from Cymru via port 43/tcp (WHOIS). Args: retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. Returns: str: The raw ASN data. Raises: ASNLookupError: The ASN lookup failed.
juraj-google-style
def _flat_types(self): return structure.get_flat_tensor_types(self.element_spec)
Returns a list `tf.DType`s for the element tensor representation. Returns: A list `tf.DType`s for the element tensor representation.
github-repos
def default_peek(python_type, exposes): with_args = False make = python_type try: make() except (SystemExit, KeyboardInterrupt): raise except: make = (lambda : python_type.__new__(python_type)) try: make() except (SystemExit, KeyboardInterrupt): raise except: make = (lambda args: python_type.__new__(python_type, *args)) with_args = True def missing(attr): return AttributeError("can't set attribute '{}' ({})".format(attr, python_type)) if with_args: def peek(store, container, _stack=None): state = [] for attr in exposes: if (attr in container): state.append(store.peek(attr, container, _stack=_stack)) else: state.append(None) return make(state) elif ('__dict__' in exposes): def peek(store, container, _stack=None): obj = make() for attr in container: val = store.peek(attr, container, _stack=_stack) try: setattr(obj, attr, val) except AttributeError: raise missing(attr) return obj else: def peek(store, container, _stack=None): obj = make() for attr in exposes: if (attr in container): val = store.peek(attr, container, _stack=_stack) else: val = None try: setattr(obj, attr, val) except AttributeError: raise missing(attr) return obj return peek
Autoserializer factory. Works best in Python 3. Arguments: python_type (type): type constructor. exposes (iterable): sequence of attributes. Returns: callable: deserializer (`peek` routine).
codesearchnet
def peek_step(self, val: ArrayValue, sn: "DataNode") -> Tuple[ObjectValue, "DataNode"]: keys = self.parse_keys(sn) for en in val: flag = True try: for k in keys: if en[k] != keys[k]: flag = False break except KeyError: continue if flag: return (en, sn) return (None, sn)
Return the entry addressed by the receiver + its schema node. Args: val: Current value (array). sn: Current schema node.
juraj-google-style
def long_id(self, sample): if (self.grid == 'WAC'): lon = (self.CENTER_LONGITUDE + (((((sample - self.SAMPLE_PROJECTION_OFFSET) - 1) * self.MAP_SCALE) * 0.001) / (self.A_AXIS_RADIUS * np.cos(((self.CENTER_LATITUDE * np.pi) / 180.0))))) return ((lon * 180) / np.pi) else: lon = (float(self.CENTER_LONGITUDE) + (((sample - float(self.SAMPLE_PROJECTION_OFFSET)) - 1) / float(self.MAP_RESOLUTION))) return lon
Return the corresponding longitude Args: sample (int): sample number on a line Returns: Correponding longidude in degree
codesearchnet
def _forward_and_backward_functions(self, inference_args, input_tangents): outputs = self._func_graph.outputs[:self._num_inference_outputs] return self._build_functions_for_outputs(outputs, inference_args, input_tangents)
Shortcut for when only first-order gradients are required. The returned backward function does not accept gradients with respect to side output of forward_function. This is fine as long as the user can't possibly request second order tape gradients, as when they've used a single non-persistent GradientTape. Since we don't need the backward function to take gradients with respect to side outputs, we can skip some potentially slow graph building. Args: inference_args: A flat list of Tensors, arguments to the inference function. input_tangents: A flat list of Tensors, jvps associated with `inference_args`. Returns: A tuple of (forward_function, backward_function): forward_function: Takes the same inputs as the inference function, but returns side outputs used by backward_function in addition to the inference function's outputs. backward_function: Takes side outputs from forward_function and gradients with respect to the "real" outputs of forward_function and returns gradients with respect to the inputs.
github-repos
def _write_json_blob(encoded_value, pipeline_id=None): default_bucket = app_identity.get_default_gcs_bucket_name() if (default_bucket is None): raise Exception('No default cloud storage bucket has been set for this application. This app was likely created before v1.9.0, please see: https: path_components = ['/', default_bucket, 'appengine_pipeline'] if pipeline_id: path_components.append(pipeline_id) path_components.append(uuid.uuid4().hex) file_name = posixpath.join(*path_components) with cloudstorage.open(file_name, 'w', content_type='application/json') as f: for start_index in xrange(0, len(encoded_value), _MAX_JSON_SIZE): end_index = (start_index + _MAX_JSON_SIZE) f.write(encoded_value[start_index:end_index]) key_str = blobstore.create_gs_key(('/gs' + file_name)) logging.debug('Created blob for filename = %s gs_key = %s', file_name, key_str) return blobstore.BlobKey(key_str)
Writes a JSON encoded value to a Cloud Storage File. This function will store the blob in a GCS file in the default bucket under the appengine_pipeline directory. Optionally using another directory level specified by pipeline_id Args: encoded_value: The encoded JSON string. pipeline_id: A pipeline id to segment files in Cloud Storage, if none, the file will be created under appengine_pipeline Returns: The blobstore.BlobKey for the file that was created.
codesearchnet
def gmove(pattern, destination): for item in glob.glob(pattern): if not move(item, destination): return False return True
Move all file found by glob.glob(pattern) to destination directory. Args: pattern (str): Glob pattern destination (str): Path to the destination directory. Returns: bool: True if the operation is successful, False otherwise.
juraj-google-style
def add_direct(self, target, var_id, var_type, data): data = (struct.pack('<H', var_id) + _convert_to_bytes(var_type, data)) if ((self.data_size - self.data_index) < len(data)): raise DataError('Not enough space for data in new conig entry', needed_space=len(data), actual_space=(self.data_size - self.data_index)) new_entry = ConfigEntry(target, var_id, data) for entry in self.entries: if ((entry.target == new_entry.target) and (entry.var_id == new_entry.var_id)): entry.valid = False self.entries.append(new_entry) self.data_index += new_entry.data_space()
Directly add a config variable. This method is meant to be called from emulation scenarios that want to directly set config database entries from python. Args: target (SlotIdentifer): The target slot for this config variable. var_id (int): The config variable ID var_type (str): The config variable type data (bytes or int or str): The data that will be encoded according to var_type.
codesearchnet
def setErrorHandler(self, errorhandler): class ErrorHandlerWrapper(ErrorHandler): def __init__(self, errorhandler): self.errorhandler = errorhandler self.last_exception = None def error(self, exception): if isinstance(exception, amplpython.AMPLException): exception = AMPLException(exception) try: self.errorhandler.error(exception) except Exception as e: self.last_exception = e def warning(self, exception): if isinstance(exception, amplpython.AMPLException): exception = AMPLException(exception) try: self.errorhandler.warning(exception) except Exception as e: self.last_exception = e def check(self): if self.last_exception is not None: e, self.last_exception = self.last_exception, None raise e errorhandler_wrapper = ErrorHandlerWrapper(errorhandler) class InnerErrorHandler(amplpython.ErrorHandler): def error(self, exception): errorhandler_wrapper.error(exception) def warning(self, exception): errorhandler_wrapper.warning(exception) self._errorhandler = errorhandler self._errorhandler_inner = InnerErrorHandler() self._errorhandler_wrapper = errorhandler_wrapper lock_and_call( lambda: self._impl.setErrorHandler(self._errorhandler_inner), self._lock )
Sets a new error handler. Args: errorhandler: The object handling AMPL errors and warnings.
juraj-google-style
def verify_binary(flag_name, process_args=None): if process_args is None: process_args = [] path = getattr(FLAGS, flag_name) if not path: logging.error('Flag %r not set' % flag_name) sys.exit(1) with open(os.devnull, 'w') as dev_null: try: subprocess.check_call( [path] + process_args, stdout=dev_null, stderr=subprocess.STDOUT) except: logging.exception('--%s binary at path %r does not work', flag_name, path) sys.exit(1)
Exits the program if the binary from the given flag doesn't run. Args: flag_name: Name of the flag that should be the path to the binary. process_args: Args to pass to the binary to do nothing but verify that it's working correctly (something like "--version") is good. Optional. Defaults to no args. Raises: SystemExit with error if the process did not work.
juraj-google-style
def malloc(self, key, shape, dtype): if ((key not in self._memory) or (self._memory[key].shape != shape) or (self._memory[key].dtype != dtype)): self._memory[key] = Shmem(key, shape, dtype, self._uuid) return self._memory[key].np_array
Allocates a block of shared memory, and returns a numpy array whose data corresponds with that block. Args: key (str): The key to identify the block. shape (list of int): The shape of the numpy array to allocate. dtype (type): The numpy data type (e.g. np.float32). Returns: np.ndarray: The numpy array that is positioned on the shared memory.
codesearchnet
def usergroups_users_list(self, *, usergroup: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"usergroup": usergroup}) return self.api_call("usergroups.users.list", http_verb="GET", params=kwargs)
List all users in a User Group Args: usergroup (str): The encoded ID of the User Group to update. e.g. 'S0604QSJC'
juraj-google-style
def cross_product_compare(start, candidate1, candidate2): delta1 = (candidate1 - start) delta2 = (candidate2 - start) return cross_product(delta1, delta2)
Compare two relative changes by their cross-product. This is meant to be a way to determine which vector is more "inside" relative to ``start``. .. note:: This is a helper for :func:`_simple_convex_hull`. Args: start (numpy.ndarray): The start vector (as 1D NumPy array with 2 elements). candidate1 (numpy.ndarray): The first candidate vector (as 1D NumPy array with 2 elements). candidate2 (numpy.ndarray): The second candidate vector (as 1D NumPy array with 2 elements). Returns: float: The cross product of the two differences.
codesearchnet
def _find_methods(cls, *names, **kwds): reverse = kwds.pop('reverse', False) assert not kwds, repr(kwds) cache = cls.__dict__.get('_find_methods_cache') if cache: hit = cache.get(names) if hit is not None: return hit else: cls._find_methods_cache = cache = {} methods = [] for c in cls.__mro__: for name in names: method = c.__dict__.get(name) if method is not None: methods.append(method) if reverse: methods.reverse() cache[names] = methods return methods
Compute a list of composable methods. Because this is a common operation and the class hierarchy is static, the outcome is cached (assuming that for a particular list of names the reversed flag is either always on, or always off). Args: *names: One or more method names. reverse: Optional flag, default False; if True, the list is reversed. Returns: A list of callable class method objects.
juraj-google-style
def get_parameter_vector(self, include_frozen=False): if include_frozen: return self.parameter_vector return self.parameter_vector[self.unfrozen_mask]
Get an array of the parameter values in the correct order Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
juraj-google-style
def __init__(self, title='sdl2', x=lib.SDL_WINDOWPOS_CENTERED, y=lib.SDL_WINDOWPOS_CENTERED, w=640, h=480, flags=frozenset()): self._ptr = check_ptr_err(lib.SDL_CreateWindow(title.encode('utf-8'), x, y, w, h, enumtools.get_mask(flags)))
Create a window with the specified position, dimensions, and flags. Args: title (str): The title of the window. x (int): The x postion of the window. y (int): The y position of the window. w (int): The width of the window. h (int): The height of the window. flags (Set[WindowFlags]): The flags for the window. Raises: SDLError: If the window could not be created.
juraj-google-style
def softmax(input_, labels=None, name=PROVIDED, loss_weight=None, per_example_weights=None): if (labels is not None): full = input_.as_layer() return SoftmaxResult(input_.softmax_activation(), full.cross_entropy(labels, name=name, loss_weight=loss_weight, per_example_weights=per_example_weights)) else: return SoftmaxResult(input_.softmax_activation(), None)
Applies softmax and if labels is not None, then it also adds a loss. Args: input_: A rank 2 Tensor or a Pretty Tensor holding the logits. labels: The target labels to learn as a float tensor. Use None to not include a training loss. name: The optional name. loss_weight: A scalar multiplier for the loss. per_example_weights: A Tensor with a weight per example. Returns: A tuple of the a handle to softmax and a handle to the loss tensor. Raises: ValueError: If the datatype is wrong.
codesearchnet
def _block_orth(self, projection_matrix): n = projection_matrix.shape.as_list()[0] kernel = {} eye = linalg_ops_impl.eye(n, dtype=self.dtype) kernel[0] = projection_matrix kernel[1] = eye - projection_matrix return kernel
Construct a kernel. Used to construct orthgonal kernel. Args: projection_matrix: A symmetric projection matrix of size n x n. Returns: [projection_matrix, (1 - projection_matrix)].
github-repos
def stream_realtime(self, stream, value): if (not self.stream_iface_open): return reading = IOTileReading(0, stream, value) report = IndividualReadingReport.FromReadings(self.iotile_id, [reading]) self.stream(report)
Stream a realtime value as an IndividualReadingReport. If the streaming interface of the VirtualInterface this VirtualDevice is attached to is not opened, the realtime reading may be dropped. Args: stream (int): The stream id to send value (int): The stream value to send
codesearchnet
def idxmin(self, **kwargs): if self._is_transposed: kwargs['axis'] = (kwargs.get('axis', 0) ^ 1) return self.transpose().idxmin(**kwargs) axis = kwargs.get('axis', 0) index = (self.index if (axis == 0) else self.columns) def idxmin_builder(df, **kwargs): if (axis == 0): df.index = index else: df.columns = index return df.idxmin(**kwargs) func = self._build_mapreduce_func(idxmin_builder, **kwargs) return self._full_axis_reduce(axis, func)
Returns the first occurrence of the minimum over requested axis. Returns: A new QueryCompiler object containing the minimum of each column or axis.
codesearchnet
def make_initializable_iterator(self, shared_name=None) -> iterator_ops.Iterator: return self._make_initializable_iterator(shared_name)
Creates an iterator for elements of this dataset. Note: The returned iterator will be in an uninitialized state, and you must run the `iterator.initializer` operation before using it: ```python # Building graph ... dataset = ... iterator = dataset.make_initializable_iterator() next_value = iterator.get_next() # This is a Tensor. # ... from within a session ... sess.run(iterator.initializer) try: while True: value = sess.run(next_value) ... except tf.errors.OutOfRangeError: pass ``` Args: shared_name: (Optional.) If non-empty, the returned iterator will be shared under the given name across multiple sessions that share the same devices (e.g. when using a remote server). Returns: A `tf.data.Iterator` for elements of this dataset. Raises: RuntimeError: If eager execution is enabled.
github-repos
def _ReadLine(self, file_object): if len(self._buffer) < self._buffer_size: content = file_object.read(self._buffer_size) content = content.decode(self._encoding) self._buffer = ''.join([self._buffer, content]) line, new_line, self._buffer = self._buffer.partition('\n') if not line and not new_line: line = self._buffer self._buffer = '' self._current_offset += len(line) if line.endswith('\r'): line = line[:-len('\r')] if new_line: line = ''.join([line, '\n']) self._current_offset += len('\n') return line
Reads a line from the file object. Args: file_object (dfvfs.FileIO): file-like object. Returns: str: line read from the file-like object.
juraj-google-style
def filter_by_moys(self, moys): _filt_values, _filt_datetimes = self._filter_by_moys_slow(moys) collection = HourlyDiscontinuousCollection( self.header.duplicate(), _filt_values, _filt_datetimes) collection._validated_a_period = self._validated_a_period return collection
Filter the Data Collection based on a list of minutes of the year. Args: moys: A List of minutes of the year [0..8759 * 60] Return: A new Data Collection with filtered data
juraj-google-style
def _count_righthand_zero_bits(number, bits): if number == 0: return bits return min(bits, _compat_bit_length(~number & (number - 1)))
Count the number of zero bits on the right hand side. Args: number: an integer. bits: maximum number of bits to count. Returns: The number of zero bits on the right hand side of the number.
juraj-google-style
def convert_elementwise_mul( params, w_name, scope_name, inputs, layers, weights, names ): print('Converting elementwise_mul ...') model0 = layers[inputs[0]] model1 = layers[inputs[1]] if names == 'short': tf_name = 'M' + random_string(7) elif names == 'keep': tf_name = w_name else: tf_name = w_name + str(random.random()) def target_layer(x): layer = tf.multiply( x[0], x[1] ) return layer lambda_layer = keras.layers.Lambda(target_layer, name=tf_name) layers[scope_name] = lambda_layer([layers[inputs[0]], layers[inputs[1]]])
Convert elementwise multiplication. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
juraj-google-style
def wait_for_other_workers(self): if not self._worker_barrier: return self._worker_barrier.wait()
Waits for other workers to reach the same call to this method. Raises: ValueError: if `worker_barrier` is not passed to the __init__ method.
github-repos
def _LoadDataIntoCache(self, file_object, minimum_offset, read_all_data=False): if (minimum_offset < self._decompressor_state.uncompressed_offset): self._ResetDecompressorState() while ((not self.IsCacheFull()) or read_all_data): decompressed_data = self._decompressor_state.Read(file_object) if (not decompressed_data): break decompressed_data_length = len(decompressed_data) decompressed_end_offset = self._decompressor_state.uncompressed_offset decompressed_start_offset = (decompressed_end_offset - decompressed_data_length) data_to_add = decompressed_data added_data_start_offset = decompressed_start_offset if (decompressed_start_offset < minimum_offset): data_to_add = None if (decompressed_start_offset < minimum_offset < decompressed_end_offset): data_add_offset = (decompressed_end_offset - minimum_offset) data_to_add = decompressed_data[(- data_add_offset)] added_data_start_offset = (decompressed_end_offset - data_add_offset) if ((not self.IsCacheFull()) and data_to_add): self._cache = b''.join([self._cache, data_to_add]) if (self._cache_start_offset is None): self._cache_start_offset = added_data_start_offset if (self._cache_end_offset is None): self._cache_end_offset = (self._cache_start_offset + len(data_to_add)) else: self._cache_end_offset += len(data_to_add) unused_data = self._decompressor_state.GetUnusedData() if unused_data: seek_offset = (- len(unused_data)) file_object.seek(seek_offset, os.SEEK_CUR) self._ResetDecompressorState() break
Reads and decompresses the data in the member. This function already loads as much data as possible in the cache, up to UNCOMPRESSED_DATA_CACHE_SIZE bytes. Args: file_object (FileIO): file-like object. minimum_offset (int): offset into this member's uncompressed data at which the cache should start. read_all_data (bool): True if all the compressed data should be read from the member.
codesearchnet
def ExpandRecursiveGlobs(cls, path, path_separator): glob_regex = '(.*)?{0:s}\\*\\*(\\d{{1,2}})?({0:s})?$'.format(re.escape(path_separator)) match = re.search(glob_regex, path) if (not match): return [path] skip_first = False if match.group(3): skip_first = True if match.group(2): iterations = int(match.group(2)) else: iterations = cls._RECURSIVE_GLOB_LIMIT logger.warning('Path "{0:s}" contains fully recursive glob, limiting to 10 levels'.format(path)) return cls.AppendPathEntries(match.group(1), path_separator, iterations, skip_first)
Expands recursive like globs present in an artifact path. If a path ends in '**', with up to two optional digits such as '**10', the '**' will recursively match all files and zero or more directories from the specified path. The optional digits indicate the recursion depth. By default recursion depth is 10 directories. If the glob is followed by the specified path segment separator, only directories and subdirectories will be matched. Args: path (str): path to be expanded. path_separator (str): path segment separator. Returns: list[str]: String path expanded for each glob.
codesearchnet
class custom_gradient: def __init__(self, forward_fn): self.forward_fn = forward_fn def __call__(self, *args, **kwargs): return CustomGradientFunction.apply(self.forward_fn, *args, **kwargs)
Decorator for custom gradients. Args: forward_fn: Forward pass function.
github-repos
def _set_control_flow_context(self, ctx) -> None: self._control_flow_context = ctx
Sets the current control flow context. Args: ctx: a context object.
github-repos
def Acf(poly, dist, N=None, **kws): if (N is None): N = ((len(poly) / 2) + 1) corr = Corr(poly, dist, **kws) out = numpy.empty(N) for n in range(N): out[n] = numpy.mean(corr.diagonal(n), 0) return out
Auto-correlation function. Args: poly (Poly): Polynomial of interest. Must have ``len(poly) > N``. dist (Dist): Defines the space the correlation is taken on. N (int): The number of time steps appart included. If omited set to ``len(poly)/2+1``. Returns: (numpy.ndarray) : Auto-correlation of ``poly`` with shape ``(N,)``. Note that by definition ``Q[0]=1``. Examples: >>> poly = chaospy.prange(10)[1:] >>> Z = chaospy.Uniform() >>> print(numpy.around(chaospy.Acf(poly, Z, 5), 4)) [1. 0.9915 0.9722 0.9457 0.9127]
codesearchnet
def save_q_df(self, state_key, action_key, q_value): if (isinstance(q_value, float) is False): raise TypeError('The type of q_value must be float.') new_q_df = pd.DataFrame([(state_key, action_key, q_value)], columns=['state_key', 'action_key', 'q_value']) if (self.q_df is not None): self.q_df = pd.concat([new_q_df, self.q_df]) self.q_df = self.q_df.drop_duplicates(['state_key', 'action_key']) else: self.q_df = new_q_df
Insert or update Q-Value in `self.q_df`. Args: state_key: State. action_key: Action. q_value: Q-Value. Exceptions: TypeError: If the type of `q_value` is not float.
codesearchnet