code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def write(self, output_buffer, kmip_version=enums.KMIPVersion.KMIP_1_0): local_buffer = utils.BytearrayStream() if self._located_items: self._located_items.write(local_buffer, kmip_version=kmip_version) if self._unique_identifiers: for unique_identifier in self._unique_identifiers: unique_identifier.write(local_buffer, kmip_version=kmip_version) self.length = local_buffer.length() super(LocateResponsePayload, self).write(output_buffer, kmip_version=kmip_version) output_buffer.write(local_buffer.buffer)
Write the data encoding the Locate response payload to a buffer. Args: output_buffer (stream): A data buffer in which to encode object data, supporting a write method. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0.
codesearchnet
def clean(self, force: bool=False): assert (not self._closed) with (yield from self._host_pools_lock): for (key, pool) in tuple(self._host_pools.items()): (yield from pool.clean(force=force)) if ((not self._host_pool_waiters[key]) and pool.empty()): del self._host_pools[key] del self._host_pool_waiters[key]
Clean all closed connections. Args: force: Clean connected and idle connections too. Coroutine.
codesearchnet
def orthologize(self, species_id: str) -> 'BEL': if (not self.ast): return self if (not self.ast.collected_orthologs): self = self.collect_orthologs([species_id]) self.ast.species = set() self.ast = bel_utils.orthologize(self.ast, self, species_id) return self
Orthologize BEL AST to given species_id Will return original entity (ns:value) if no ortholog found. Args: species_id (str): species id to convert genes/rna/proteins into Returns: BEL: returns self
codesearchnet
def tersoff_input(self, structure, periodic=False, uc=True, *keywords): gin = self.keyword_line(*keywords) gin += self.structure_lines( structure, cell_flg=periodic, frac_flg=periodic, anion_shell_flg=False, cation_shell_flg=False, symm_flg=not uc ) gin += self.tersoff_potential(structure) return gin
Gets a GULP input with Tersoff potential for an oxide structure Args: structure: pymatgen.core.structure.Structure periodic (Default=False): Flag denoting whether periodic boundary conditions are used library (Default=None): File containing the species and potential. uc (Default=True): Unit Cell Flag. keywords: GULP first line keywords.
juraj-google-style
def get_dm_channel(self, userid): dm_open = self.slack_client.api_call('im.open', user=userid) return dm_open['channel']['id']
Perform a lookup of users to resolve a userid to a DM channel Args: userid (string): Slack userid to lookup. Returns: string: DM channel ID of user
juraj-google-style
def is_mobile(user_agent): if user_agent: b = reg_b.search(user_agent) v = reg_v.search(user_agent[0:4]) return (b or v) return False
Checks if the user browser from the given user agent is mobile. Args: user_agent: A given user agent. Returns: True if the browser from the user agent is mobile.
codesearchnet
def date_to_datetime(self, time_input, tz=None): dt = None try: dt = parser.parse(time_input) if tz is not None and tz != dt.tzname(): if dt.tzinfo is None: dt = self._replace_timezone(dt) dt = dt.astimezone(timezone(tz)) except IndexError: pass except TypeError: pass except ValueError: pass return dt
Convert ISO 8601 and other date strings to datetime.datetime type. Args: time_input (string): The time input string (see formats above). tz (string): The time zone for the returned data. Returns: (datetime.datetime): Python datetime.datetime object.
juraj-google-style
def add_api_key(key, value): if ((key is None) or (key == '')): logger.error('Key cannot be empty') if ((value is None) or (value == '')): logger.error('Value cannot be empty') from .. import datatools data = datatools.get_data() if ('keys' not in data['discord']): data['discord']['keys'] = {} is_key_new = False if (key not in data['discord']['keys']): is_key_new = True elif (data['discord']['keys'][key] == value): logger.info("API key '{}' already has value '{}'".format(key, value)) return data['discord']['keys'][key] = value datatools.write_data(data) key_text = ('added' if is_key_new else 'updated') logger.info("API key '{}' {} with value '{}'".format(key, key_text, value))
Adds a key to the bot's data Args: key: The name of the key to add value: The value for the key
codesearchnet
def dump(collection: BioCCollection, fp, pretty_print: bool = True): fp.write(dumps(collection, pretty_print))
Serialize ``collection`` as a BioC formatted stream to ``fp``. Args: collection: the BioC collection fp: a ``.write()``-supporting file-like object pretty_print: enables formatted XML
juraj-google-style
def tile_and_concat(image, latent, concat_latent=True): if not concat_latent: return image image_shape = common_layers.shape_list(image) latent_shape = common_layers.shape_list(latent) height, width = image_shape[1], image_shape[2] latent_dims = latent_shape[1] height_multiples = height pad = height - (height_multiples * latent_dims) latent = tf.reshape(latent, (-1, latent_dims, 1, 1)) latent = tf.tile(latent, (1, height_multiples, width, 1)) latent = tf.pad(latent, [[0, 0], [pad return tf.concat([image, latent], axis=-1)
Tile latent and concatenate to image across depth. Args: image: 4-D Tensor, (batch_size X height X width X channels) latent: 2-D Tensor, (batch_size X latent_dims) concat_latent: If set to False, the image is returned as is. Returns: concat_latent: 4-D Tensor, (batch_size X height X width X channels+1) latent tiled and concatenated to the image across the channels.
juraj-google-style
def get_body(name): body = Pck()[name] body.propagate = (lambda date: get_orbit(name, date)) return body
Retrieve the Body structure of a JPL .bsp file object Args: name (str) Return: :py:class:`~beyond.constants.Body`
codesearchnet
def __init__(self, string_or_filelike, parser_delegate): if hasattr(string_or_filelike, 'readline'): line_reader = string_or_filelike.readline else: if six.PY2: string_or_filelike = unicode(string_or_filelike) string_io = io.StringIO(string_or_filelike) line_reader = string_io.readline def _text_line_reader(): line = line_reader() if isinstance(line, bytes): line = line.decode('utf8') return line self._token_generator = tokenize.generate_tokens(_text_line_reader) self._filename = getattr(string_or_filelike, 'name', None) self._current_token = None self._delegate = parser_delegate self._advance_one_token()
Construct the parser. Args: string_or_filelike: Either the string to parse, or a file-like object supporting the readline method. parser_delegate: An instance of the ParserDelegate class, that will be responsible for constructing appropriate objects for configurable references and macros.
juraj-google-style
def _slice_shape(self, start, stop): if stop <= start: return DynamicRaggedShape._from_inner_shape([]) elif start == 0: if stop <= self.num_row_partitions: if stop == 1: return DynamicRaggedShape._from_inner_shape([self.row_partitions[0].nrows()]) new_row_partitions = self.row_partitions[:stop - 1] new_inner_shape = [new_row_partitions[-1].nvals()] return DynamicRaggedShape(new_row_partitions, new_inner_shape) else: if self.rank is None: new_inner_rank = stop - self.num_row_partitions new_inner_shape = self.inner_shape[:new_inner_rank] return DynamicRaggedShape(row_partitions=self.row_partitions, inner_shape=new_inner_shape, static_inner_shape=None, validate=False) elif self.rank <= stop: return self new_inner_rank = stop - self.num_row_partitions new_inner_shape = self.inner_shape[:new_inner_rank] return DynamicRaggedShape(row_partitions=self.row_partitions, inner_shape=new_inner_shape, static_inner_shape=tensor_shape.TensorShape([None] * new_inner_rank), validate=False) else: if self.rank is None or stop < self.rank: partial = self._slice_shape(0, stop) else: partial = self for x in partial.row_partitions: if not x.is_uniform(): raise ValueError('All relevant dimensions must be uniform') if partial.rank is None: raise NotImplementedError('__getitem__[start:stop] where start > 0 not implemented') return DynamicRaggedShape._from_inner_shape(partial._with_num_row_partitions(0).inner_shape[start:])
Returns a shape self[start:stop]. If start == 0, then this truncates dimensions after stop. If start != 0, then this will return a shape with num_row_partitions == 0. See __getitem__. Args: start: the first dimension. 0 <= start <= rank stop: the last dimension (exclusive). 0 <= stop <= rank
github-repos
def scanJoiner(self, xEUI='*', strPSKd='threadjpaketest'): print '%s call scanJoiner' % self.port if not isinstance(xEUI, str): eui64 = self.__convertLongToString(xEUI) if len(eui64) < 16: eui64 = eui64.zfill(16) print eui64 else: eui64 = xEUI timeout = 500 cmd = WPANCTL_CMD + 'commissioner joiner-add %s %s %s' % (eui64, str(timeout), strPSKd) print cmd if not self.isActiveCommissioner: self.startCollapsedCommissioner() if self.__sendCommand(cmd)[0] != 'Fail': return True else: return False
scan Joiner Args: xEUI: Joiner's EUI-64 strPSKd: Joiner's PSKd for commissioning Returns: True: successful to add Joiner's steering data False: fail to add Joiner's steering data
juraj-google-style
def _export_files(self, bq: bigquery_tools.BigQueryWrapper, element: 'ReadFromBigQueryRequest', table_reference: TableReference): job_labels = self._get_bq_metadata().add_additional_bq_job_labels(self.bigquery_job_labels) export_job_name = bigquery_tools.generate_bq_job_name(self._job_name, self._source_uuid, bigquery_tools.BigQueryJobTypes.EXPORT, element.obj_id) temp_location = self.options.view_as(GoogleCloudOptions).temp_location gcs_location = bigquery_export_destination_uri(self.gcs_location, temp_location, '%s%s' % (self._source_uuid, element.obj_id)) try: if self.use_json_exports: job_ref = bq.perform_extract_job([gcs_location], export_job_name, table_reference, bigquery_tools.FileFormat.JSON, project=self._get_project(), job_labels=job_labels, include_header=False) else: job_ref = bq.perform_extract_job([gcs_location], export_job_name, table_reference, bigquery_tools.FileFormat.AVRO, project=self._get_project(), include_header=False, job_labels=job_labels, use_avro_logical_types=True) bq.wait_for_bq_job(job_ref) except Exception as exn: logging.warning('Error exporting table: %s. Note that external tables cannot be exported: https: raise metadata_list = FileSystems.match([gcs_location])[0].metadata_list if isinstance(table_reference, ValueProvider): table_ref = bigquery_tools.parse_table_reference(element.table, project=self._get_project()) else: table_ref = table_reference table = bq.get_table(table_ref.projectId, table_ref.datasetId, table_ref.tableId) return (table.schema, metadata_list)
Runs a BigQuery export job. Returns: bigquery.TableSchema instance, a list of FileMetadata instances
github-repos
def _make_env(resultdir=None): env = {'config': {}, 'resultdir': '', 'config_file': '', 'nodes': {}, 'phase': '', 'user': '', 'cwd': os.getcwd()} if resultdir: env_path = os.path.join(resultdir, 'env') if os.path.isfile(env_path): with open(env_path, 'r') as f: env.update(yaml.load(f)) logger.debug('Loaded environment %s', env_path) if (('config_file' in env) and (env['config_file'] is not None)): if os.path.isfile(env['config_file']): with open(env['config_file'], 'r') as f: env['config'].update(yaml.load(f)) logger.debug('Reloaded config %s', env['config']) return env
Loads the env from `resultdir` if not `None` or makes a new one. An Enos environment handles all specific variables of an experiment. This function either generates a new environment or loads a previous one. If the value of `resultdir` is `None`, then this function makes a new environment and return it. If the value is a directory path that contains an Enos environment, then this function loads and returns it. In case of a directory path, this function also rereads the configuration file (the reservation.yaml) and reloads it. This lets the user update his configuration between each phase. Args: resultdir (str): directory path to load the env from.
codesearchnet
def strides(self) -> List[int]: return _compute_mesh_strides(self.shape())
Returns the strides tensor array for this mesh. If the mesh shape is `[a, b, c, d]`, then the strides array can be computed as `[b*c*d, c*d, d, 1]`. This array can be useful in computing local device offsets given a device ID. Using the same example, the device coordinates of the mesh can be computed as: ``` [(device_id / (b*c*d)) % a, (device_id / (c*d)) % b, (device_id / (d)) % c, (device_id) % d] ``` This is the same as `(device_id // mesh.strides) % mesh.shape`. Returns: The mesh strides as an integer tensor.
github-repos
def content(self, request, id): gist = self.send(request, id).json() def convert(data): return base64.b64decode(data).decode('utf-8') content = {} for (name, data) in gist['files'].items(): content[name] = convert(data['content']) return content
Returns the content of the gist Arguments: request: an initial request object id: the gist identifier Returns: A dict containing the contents of each file in the gist
codesearchnet
def _check_triple_quotes(self, quote_record): (_, triple, row, col) = quote_record if (triple != TRIPLE_QUOTE_OPTS.get(self.config.triple_quote)): self._invalid_triple_quote(triple, row, col)
Check if the triple quote from tokenization is valid. Args: quote_record: a tuple containing the info about the string from tokenization, giving the (token, quote, row number, column).
codesearchnet
def Get(self, path, follow_symlink=True): key = self._Key(path=path, follow_symlink=follow_symlink) try: return self._cache[key] except KeyError: value = Stat.FromPath(path, follow_symlink=follow_symlink) self._cache[key] = value if ((not follow_symlink) and (not value.IsSymlink())): self._cache[self._Key(path=path, follow_symlink=True)] = value return value
Stats given file or returns a cached result if available. Args: path: A path to the file to perform `stat` on. follow_symlink: True if `stat` of a symlink should be returned instead of a file that it points to. For non-symlinks this setting has no effect. Returns: `Stat` object corresponding to the given path.
codesearchnet
def __setitem__(self,key,value): self.rdb.hset(self.session_hash,key,value) self.rdb.expire(self.session_hash,self.ttl)
Set an existing or new key, value association. Args: key (str): The dictionary key. value (str): The dictionary value
juraj-google-style
def get_day_end(config): day_start_datetime = datetime.datetime.combine(datetime.date.today(), config['day_start']) day_end_datetime = day_start_datetime - datetime.timedelta(seconds=1) return day_end_datetime.time()
Get the day end time given the day start. This assumes full 24h day. Args: config (dict): Configdict. Needed to extract ``day_start``. Note: This is merely a convinience funtion so we do not have to deduct this from ``day_start`` by hand all the time.
juraj-google-style
def push(self, value): stream = DataStream.FromEncoded(value.stream) if (stream.stream_type == DataStream.OutputType): if (len(self.streaming_data) == self.streaming_length): raise StorageFullError('Streaming buffer full') self.streaming_data.append(value) else: if (len(self.storage_data) == self.storage_length): raise StorageFullError('Storage buffer full') self.storage_data.append(value)
Store a new value for the given stream. Args: value (IOTileReading): The value to store. The stream parameter must have the correct value
codesearchnet
def cancel(self, queue): try: consumer = self._consumers[queue] yield consumer.channel.basic_cancel(consumer_tag=consumer.tag) except pika.exceptions.AMQPChannelError: pass except KeyError: defer.returnValue(None) try: yield consumer.channel.close() except pika.exceptions.AMQPChannelError: pass del self._consumers[queue]
Cancel the consumer for a queue. Args: queue (str): The name of the queue the consumer is subscribed to. Returns: defer.Deferred: A Deferred that fires when the consumer is canceled, or None if the consumer was already canceled. Wrap the call in :func:`.defer.maybeDeferred` to always receive a Deferred.
juraj-google-style
def __tf_tracing_type__(self, context: TracingContext) -> TraceType:
Returns the tracing type of this object. The tracing type is used to build the signature of a tf.function when traced, and to match arguments with existing signatures. When a Function object is called, tf.function looks at the tracing type of the call arguments. If an existing signature of matching type exists, it will be used. Otherwise, a new function is traced, and its signature will use the tracing type of the call arguments. Args: context: a context reserved for internal/future usage. Returns: The tracing type of this object.
github-repos
def add_answer(self, vote, rationale): self.raw_answers.append({VOTE_KEY: vote, RATIONALE_KEY: rationale})
Add an answer Args: vote (int): the option that student voted for rationale (str): the reason why the student vote for the option
codesearchnet
def scale(self, scalar, ignored_variables=None, ignored_interactions=None, ignore_offset=False): if (ignored_variables is None): ignored_variables = set() elif (not isinstance(ignored_variables, abc.Container)): ignored_variables = set(ignored_variables) if (ignored_interactions is None): ignored_interactions = set() elif (not isinstance(ignored_interactions, abc.Container)): ignored_interactions = set(ignored_interactions) linear = self.linear for v in linear: if (v in ignored_variables): continue linear[v] *= scalar quadratic = self.quadratic for (u, v) in quadratic: if (((u, v) in ignored_interactions) or ((v, u) in ignored_interactions)): continue quadratic[(u, v)] *= scalar if (not ignore_offset): self.offset *= scalar try: self._counterpart.scale(scalar, ignored_variables=ignored_variables, ignored_interactions=ignored_interactions) except AttributeError: pass
Multiply by the specified scalar all the biases and offset of a binary quadratic model. Args: scalar (number): Value by which to scale the energy range of the binary quadratic model. ignored_variables (iterable, optional): Biases associated with these variables are not scaled. ignored_interactions (iterable[tuple], optional): As an iterable of 2-tuples. Biases associated with these interactions are not scaled. ignore_offset (bool, default=False): If True, the offset is not scaled. Examples: This example creates a binary quadratic model and then scales it to half the original energy range. >>> import dimod ... >>> bqm = dimod.BinaryQuadraticModel({'a': -2.0, 'b': 2.0}, {('a', 'b'): -1.0}, 1.0, dimod.SPIN) >>> bqm.scale(0.5) >>> bqm.linear['a'] -1.0 >>> bqm.quadratic[('a', 'b')] -0.5 >>> bqm.offset 0.5
codesearchnet
def _get_variation_id(value, capital=False): value = int(value) base_power = base_start = base_end = 0 while value >= base_end: base_power += 1 base_start = base_end base_end += pow(26, base_power) base_index = value - base_start alphas = ['a'] * base_power for index in range(base_power - 1, -1, -1): alphas[index] = chr(int(97 + (base_index % 26))) base_index /= 26 characters = ''.join(alphas) return characters.upper() if capital else characters
Convert an integer value to a character. a-z then double aa-zz etc Args: value (int): integer index we're looking up capital (bool): whether we convert to capitals or not Returns (str): alphanumeric representation of the index
juraj-google-style
def _infer(self, request): label_vocab = inference_utils.get_label_vocab( request.args.get('label_vocab_path')) try: if request.method != 'GET': logger.error('%s requests are forbidden.', request.method) return http_util.Respond(request, {'error': 'invalid non-GET request'}, 'application/json', code=405) (inference_addresses, model_names, model_versions, model_signatures) = self._parse_request_arguments(request) indices_to_infer = sorted(self.updated_example_indices) examples_to_infer = [self.examples[index] for index in indices_to_infer] infer_objs = [] for model_num in xrange(len(inference_addresses)): serving_bundle = inference_utils.ServingBundle( inference_addresses[model_num], model_names[model_num], request.args.get('model_type'), model_versions[model_num], model_signatures[model_num], request.args.get('use_predict') == 'true', request.args.get('predict_input_tensor'), request.args.get('predict_output_tensor')) infer_objs.append(inference_utils.run_inference_for_inference_results( examples_to_infer, serving_bundle)) resp = {'indices': indices_to_infer, 'results': infer_objs} self.updated_example_indices = set() return http_util.Respond(request, {'inferences': json.dumps(resp), 'vocab': json.dumps(label_vocab)}, 'application/json') except common_utils.InvalidUserInputError as e: return http_util.Respond(request, {'error': e.message}, 'application/json', code=400) except AbortionError as e: return http_util.Respond(request, {'error': e.details}, 'application/json', code=400)
Returns JSON for the `vz-line-chart`s for a feature. Args: request: A request that should contain 'inference_address', 'model_name', 'model_type, 'model_version', 'model_signature' and 'label_vocab_path'. Returns: A list of JSON objects, one for each chart.
juraj-google-style
def remove_species(self, species): new_sites = [] species = [get_el_sp(s) for s in species] for site in self._sites: new_sp_occu = {sp: amt for (sp, amt) in site.species.items() if (sp not in species)} if (len(new_sp_occu) > 0): new_sites.append(PeriodicSite(new_sp_occu, site.frac_coords, self._lattice, properties=site.properties)) self._sites = new_sites
Remove all occurrences of several species from a structure. Args: species: Sequence of species to remove, e.g., ["Li", "Na"].
codesearchnet
def __init__(self, script_hash=None, key=None): self.ScriptHash = script_hash self.Key = key
Create an instance. Args: script_hash (UInt160): key (bytes):
juraj-google-style
def __pad_value(value, pad_len_multiple, pad_char): assert pad_len_multiple > 0 assert len(pad_char) == 1 padding_length = (pad_len_multiple - (len(value) % pad_len_multiple)) % pad_len_multiple return value + pad_char * padding_length
Add padding characters to the value if needed. Args: value: The string value to be padded. pad_len_multiple: Pad the result so its length is a multiple of pad_len_multiple. pad_char: The character to use for padding. Returns: The string value with padding characters added.
juraj-google-style
def stop(self, consumer): stopped_workflows = [] for request in [r for r in consumer.controller.state.active_requests]: job = AsyncResult(request.id) workflow_id = job.result['workflow_id'] if (workflow_id not in stopped_workflows): client = Client(SignalConnection(**consumer.app.user_options['config'].signal, auto_connect=True), request_key=workflow_id) client.send(Request(action='stop_workflow')) stopped_workflows.append(workflow_id)
This function is called when the worker received a request to terminate. Upon the termination of the worker, the workflows for all running jobs are stopped gracefully. Args: consumer (Consumer): Reference to the consumer object that handles messages from the broker.
codesearchnet
def get_soundcloud_data(url): data = {} request = requests.get(url) title_tag = request.text.split('<title>')[1].split('</title')[0] data['title'] = title_tag.split(' by ')[0].strip() data['artist'] = title_tag.split(' by ')[1].split('|')[0].strip() return data
Scrapes a SoundCloud page for a track's important information. Returns: dict: of audio data
codesearchnet
def translate_ostat(ostat): ostat_lower = ostat.strip().lower() if (ostat_lower == 'monomer'): return 1 elif (ostat_lower == 'homo-dimer'): return 2 elif (ostat_lower == 'homo-trimer'): return 3 elif (ostat_lower == 'homo-tetramer'): return 4 elif (ostat_lower == 'homo-pentamer'): return 5 elif (ostat_lower == 'homo-hexamer'): return 6 elif (ostat_lower == 'homo-heptamer'): return 7 elif (ostat_lower == 'homo-octamer'): return 8 else: num = int(ostat_lower.split('-')[1]) return num
Translate the OSTAT field to an integer. As of 2018-02-26, works on all E. coli models. Untested on other pre-made organism models. Args: ostat (str): Predicted oligomeric state of the PDB file Returns: int: Translated string to integer
codesearchnet
def find_or_create_all(cls, list_of_kwargs, keys=[]): (list_of_kwargs_wo_dupes, markers) = remove_and_mark_duplicate_dicts(list_of_kwargs, keys) added_objs = cls.add_all([(cls.first(**subdict(kwargs, keys)) or cls.new(**kwargs)) for kwargs in list_of_kwargs_wo_dupes]) result_objs = [] iterator_of_added_objs = iter(added_objs) for idx in range(len(list_of_kwargs)): if (idx in markers): result_objs.append(added_objs[markers[idx]]) else: result_objs.append(next(iterator_of_added_objs)) return result_objs
Batch method for querying for a list of instances and creating them if required Args: list_of_kwargs(list of dicts): A list of dicts where each dict denotes the keyword args that you would pass to the create method separately keys (list, optional): A list of keys to use for the initial finding step. Matching is done only on these attributes. Examples: >>> Customer.find_or_create_all([ ... {'name': 'Vicky', 'email': 'vicky@x.com', 'age': 34}, ... {'name': 'Ron', 'age': 40, 'email': 'ron@x.com', ... 'gender': 'Male'}], keys=['name', 'email'])
codesearchnet
def forward(self, hidden_states, output_router_logits): forwarded_states, router_tuple = self.mlp(hidden_states) forwarded_states += torch.tanh(self.soft_bypass_mlp(hidden_states)) output = hidden_states + self.norm(forwarded_states) if output_router_logits and router_tuple is not None: return (output, router_tuple) else: return output
Args: hidden_states (`torch.Tensor`) : [num_groups, tokens_per_group, hidden_dim] inputs to send to experts. output_router_logits (`bool`) : output experts router output. Returns: torch.Tensor[num_groups, tokens_per_group, hidden_dim]
github-repos
def _GetFormatErrorLocation( self, yaml_definition, last_definition_object): name = yaml_definition.get('name', None) if name: error_location = 'in: {0:s}'.format(name or '<NAMELESS>') elif last_definition_object: error_location = 'after: {0:s}'.format(last_definition_object.name) else: error_location = 'at start' return error_location
Retrieves a format error location. Args: yaml_definition (dict[str, object]): current YAML definition. last_definition_object (DataTypeDefinition): previous data type definition. Returns: str: format error location.
juraj-google-style
def ParseStatusRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TwitterIOSStatusEventData() event_data.favorite_count = self._GetRowValue( query_hash, row, 'favoriteCount') event_data.favorited = self._GetRowValue(query_hash, row, 'favorited') event_data.name = self._GetRowValue(query_hash, row, 'name') event_data.query = query event_data.retweet_count = self._GetRowValue( query_hash, row, 'retweetCount') event_data.text = self._GetRowValue(query_hash, row, 'text') event_data.user_id = self._GetRowValue(query_hash, row, 'user_id') timestamp = self._GetRowValue(query_hash, row, 'date') if timestamp: timestamp = int(timestamp) date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'updatedAt') if timestamp: timestamp = int(timestamp) date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_UPDATE) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a contact row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
juraj-google-style
def __init__(self, data_type_definition): super(StreamMap, self).__init__(data_type_definition) self._fold_byte_stream = None self._map_byte_stream = None if self._element_data_type_definition.IsComposite(): raise errors.FormatError('Unsupported composite element data type')
Initializes a stream data type map. Args: data_type_definition (DataTypeDefinition): data type definition. Raises: FormatError: if the data type map cannot be determined from the data type definition.
juraj-google-style
def reset(self, name=None): if self._reader_ref.dtype == dtypes.resource: return gen_io_ops.reader_reset_v2(self._reader_ref, name=name) else: return gen_io_ops.reader_reset(self._reader_ref, name=name)
Restore a reader to its initial clean state. Args: name: A name for the operation (optional). Returns: The created Operation.
github-repos
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') preferred_year = cls._ParseNumericOption(options, 'preferred_year') process_archives = getattr(options, 'process_archives', False) process_compressed_streams = getattr( options, 'process_compressed_streams', True) setattr(configuration_object, '_preferred_year', preferred_year) setattr(configuration_object, '_process_archives', process_archives) setattr( configuration_object, '_process_compressed_streams', process_compressed_streams)
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
juraj-google-style
def _create_dom(data): if (not isinstance(data, dhtmlparser.HTMLElement)): data = dhtmlparser.parseString(utils.handle_encodnig(data)) dhtmlparser.makeDoubleLinked(data) return data
Creates doublelinked DOM from `data`. Args: data (str/HTMLElement): Either string or HTML element. Returns: obj: HTMLElement containing double linked DOM.
codesearchnet
async def _get_async(self, url, session): data = None async with session.get(url) as resp: if (resp.status == 200): data = (await resp.json()) return data
Asynchronous internal method used for GET requests Args: url (str): URL to fetch session (obj): aiohttp client session for async loop Returns: data (obj): Individual URL request's response corountine
codesearchnet
def saml_metadata(self, client_id): return self.get(url='https:
Get SAML2.0 Metadata. Args: client_id (str): Client Id of the application to get the SAML metadata for.
codesearchnet
def require_params(self, req): params = {} for name, param in self.params.items(): if name not in req.params and param.required: missing = set( p for p in self.params if self.params[p].required ) - set(req.params.keys()) raise errors.HTTPMissingParam(", ".join(missing)) elif name in req.params or param.default: try: if param.many: values = req.get_param_as_list( name, param.validated_value ) or [ param.default and param.validated_value(param.default) ] params[name] = param.container(values) else: params[name] = param.validated_value( req.get_param(name, default=param.default) ) except ValidationError as err: raise err.as_invalid_param(name) except ValueError as err: raise errors.HTTPInvalidParam(str(err), name) return params
Require all defined parameters from request query string. Raises ``falcon.errors.HTTPMissingParam`` exception if any of required parameters is missing and ``falcon.errors.HTTPInvalidParam`` if any of parameters could not be understood (wrong format). Args: req (falcon.Request): request object
juraj-google-style
def fit(self, train_x, train_y): if self.first_fitted: self.incremental_fit(train_x, train_y) else: self.first_fit(train_x, train_y)
Fit the regressor with more data. Args: train_x: A list of NetworkDescriptor. train_y: A list of metric values.
juraj-google-style
def log_likelihood(self, y, _const=math.log((2.0 * math.pi)), quiet=False): y = self._process_input(y) resid = (y - self.mean.get_value(self._t)) try: self._recompute() except solver.LinAlgError: if quiet: return (- np.inf) raise if (len(y.shape) > 1): raise ValueError('dimension mismatch') logdet = self.solver.log_determinant() if (not np.isfinite(logdet)): return (- np.inf) loglike = ((- 0.5) * ((self.solver.dot_solve(resid) + logdet) + (len(y) * _const))) if (not np.isfinite(loglike)): return (- np.inf) return loglike
Compute the marginalized likelihood of the GP model The factorized matrix from the previous call to :func:`GP.compute` is used so ``compute`` must be called first. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. quiet (bool): If true, return ``-numpy.inf`` for non-positive definite matrices instead of throwing an error. Returns: float: The marginalized likelihood of the GP model. Raises: ValueError: For mismatched dimensions. solver.LinAlgError: For non-positive definite matrices.
codesearchnet
def from_corpus(cls, corpus): ds = Corpus() tracks = copy.deepcopy(list(corpus.tracks.values())) track_mapping = ds.import_tracks(tracks) issuers = copy.deepcopy(list(corpus.issuers.values())) issuer_mapping = ds.import_issuers(issuers) utterances = copy.deepcopy(list(corpus.utterances.values())) for utterance in utterances: utterance.track = track_mapping[utterance.track.idx] if utterance.issuer is not None: utterance.issuer = issuer_mapping[utterance.issuer.idx] ds.import_utterances(utterances) subviews = copy.deepcopy(corpus.subviews) for subview_idx, subview in subviews.items(): ds.import_subview(subview_idx, subview) for feat_container_idx, feature_container in corpus.feature_containers.items(): ds.new_feature_container(feat_container_idx, feature_container.path) return ds
Create a new modifiable corpus from any other CorpusView. This for example can be used to create a independent modifiable corpus from a subview. Args: corpus (CorpusView): The corpus to create a copy from. Returns: Corpus: A new corpus with the same data as the given one.
juraj-google-style
def __init__(self, faulty_file, msg): self.file = faulty_file self.msg = msg super().__init__(faulty_file, msg)
Initialization of instances: Args: faulty_file (pathlike): path of the file where a parsing problem was encountered. msg (str): error message. Attributes: file (pathlike): path of the file where a parsing problem was encountered. msg (str): error message.
juraj-google-style
def pi_to_number(self, page=1, item=1): if page > 1: return ((page - 1) * self.page_items) + item else: return 0 + item
Convert subpage & subitem to a integer * if page == 1, then return 0, since the item count is the true # of items * if page == 2, then return, page-1 * items_per_page, since we are returning the # of items on a full page. Args: * None Returns: * Integer - Which represents the number of items up to the page.
juraj-google-style
def __getitem__(self, column): if isinstance(column, (list, tuple)): ret = [] for col in column: ret.append(self[col]) return ret try: return self._values[self._index[column]] except (KeyError, TypeError, ValueError): pass try: return self._values[column] except (IndexError, TypeError): pass raise IndexError('No such column "%s" in row.' % column)
Support for [] notation. Args: column: Tuple of column names, or a (str) column name, or positional column number, 0-indexed. Returns: A list or string with column value(s). Raises: IndexError: The given column(s) were not found.
juraj-google-style
def classify_coincident(st_vals, coincident): if (not coincident): return None if ((st_vals[(0, 0)] >= st_vals[(0, 1)]) or (st_vals[(1, 0)] >= st_vals[(1, 1)])): return UNUSED_T else: return CLASSIFICATION_T.COINCIDENT
r"""Determine if coincident parameters are "unused". .. note:: This is a helper for :func:`surface_intersections`. In the case that ``coincident`` is :data:`True`, then we'll have two sets of parameters :math:`(s_1, t_1)` and :math:`(s_2, t_2)`. If one of :math:`s1 < s2` or :math:`t1 < t2` is not satisfied, the coincident segments will be moving in opposite directions, hence don't define an interior of an intersection. .. warning:: In the "coincident" case, this assumes, but doesn't check, that ``st_vals`` is ``2 x 2``. Args: st_vals (numpy.ndarray): ``2 X N`` array of intersection parameters. coincident (bool): Flag indicating if the intersections are the endpoints of coincident segments of two curves. Returns: Optional[.IntersectionClassification]: The classification of the intersections.
codesearchnet
def _InvokeGitkitApi(self, method, params=None, need_service_account=True): body = (simplejson.dumps(params) if params else None) req = urllib_request.Request((self.google_api_url + method)) req.add_header('Content-type', 'application/json') if need_service_account: if self.credentials: access_token = self.credentials.get_access_token().access_token elif (self.service_account_email and self.service_account_key): access_token = self._GetAccessToken() else: raise errors.GitkitClientError('Missing service account credentials') req.add_header('Authorization', ('Bearer ' + access_token)) try: binary_body = (body.encode('utf-8') if body else None) raw_response = urllib_request.urlopen(req, binary_body).read() except urllib_request.HTTPError as err: if (err.code == 400): raw_response = err.read() else: raise return self._CheckGitkitError(raw_response)
Invokes Gitkit API, with optional access token for service account. Args: method: string, the api method name. params: dict of optional parameters for the API. need_service_account: false if service account is not needed. Raises: GitkitClientError: if the request is bad. GitkitServerError: if Gitkit can not handle the request. Returns: API response as dict.
codesearchnet
def num_gpus(): return context().num_gpus()
Get the number of available GPU devices. Returns: The number of available GPU devices.
github-repos
def putenv(key, value): key = path2fsn(key) value = path2fsn(value) if (is_win and PY2): try: set_windows_env_var(key, value) except WindowsError: raise ValueError else: try: os.putenv(key, value) except OSError: raise ValueError
Like `os.putenv` but takes unicode under Windows + Python 2 Args: key (pathlike): The env var to get value (pathlike): The value to set Raises: ValueError
codesearchnet
def _example_from_array_spec(self, prop_spec): if isinstance(prop_spec['items'], list): return [self.get_example_from_prop_spec(item_prop_spec) for item_prop_spec in prop_spec['items']] elif ('type' in prop_spec['items'].keys()): if (('format' in prop_spec['items'].keys()) and (prop_spec['items']['format'] == 'date-time')): return self._get_example_from_basic_type('datetime') else: return self._get_example_from_basic_type(prop_spec['items']['type']) elif (('$ref' in prop_spec['items'].keys()) or (('schema' in prop_spec) and ('$ref' in prop_spec['schema']['items'].keys()))): definition_name = (self.get_definition_name_from_ref(prop_spec['items']['$ref']) or self.get_definition_name_from_ref(prop_spec['schema']['items']['$ref'])) if self.build_one_definition_example(definition_name): example_dict = self.definitions_example[definition_name] if (not isinstance(example_dict, dict)): return [example_dict] if (len(example_dict) == 1): try: res = example_dict[example_dict.keys()[0]] except TypeError: res = example_dict[list(example_dict)[0]] return res else: return_value = {} for (example_name, example_value) in example_dict.items(): return_value[example_name] = example_value return [return_value] elif ('properties' in prop_spec['items']): prop_example = {} for (prop_name, prop_spec) in prop_spec['items']['properties'].items(): example = self.get_example_from_prop_spec(prop_spec) if (example is not None): prop_example[prop_name] = example return [prop_example]
Get an example from a property specification of an array. Args: prop_spec: property specification you want an example of. Returns: An example array.
codesearchnet
def apply_transformation(self, structure): if structure.is_ordered: return structure species = [dict(sp) for sp in structure.species_and_occu] for sp in species: for k, v in sp.items(): old_occ = sp[k] new_occ = float( Fraction(old_occ).limit_denominator(self.max_denominator)) if self.fix_denominator: new_occ = around(old_occ*self.max_denominator)\ / self.max_denominator if round(abs(old_occ - new_occ), 6) > self.tol: raise RuntimeError( "Cannot discretize structure within tolerance!") sp[k] = new_occ return Structure(structure.lattice, species, structure.frac_coords)
Discretizes the site occupancies in the structure. Args: structure: disordered Structure to discretize occupancies Returns: A new disordered Structure with occupancies discretized
juraj-google-style
def _make_dense_default(self, key, shape, dtype): default_value = self.dense_defaults.get(key) if shape.ndims is not None and shape.ndims > 0 and (shape.dims[0].value is None): if default_value is None: default_value = ops.convert_to_tensor('' if dtype == dtypes.string else 0, dtype=dtype) else: key_name = 'padding_' + re.sub('[^A-Za-z0-9_.\\-/]', '_', key) default_value = ops.convert_to_tensor(default_value, dtype=dtype, name=key_name) default_value = array_ops.reshape(default_value, []) elif default_value is None: default_value = constant_op.constant([], dtype=dtype) elif not isinstance(default_value, tensor.Tensor): key_name = 'key_' + re.sub('[^A-Za-z0-9_.\\-/]', '_', key) default_value = ops.convert_to_tensor(default_value, dtype=dtype, name=key_name) default_value = array_ops.reshape(default_value, shape) return default_value
Construct the default value tensor for a specified dense feature. Args: key: The key string identifying the dense feature. shape: The dense feature's shape. dtype: The dense feature's dtype. Returns: A Tensor.
github-repos
def _enrichment_test_preprocessor(test_spec: dict, expected: List[str], env: TestEnvironment): if (pipeline := test_spec.get('pipeline', None)): for transform in pipeline.get('transforms', []): if transform.get('type', '').startswith('Enrichment'): transform['type'] = 'TestEnrichment' return test_spec
Preprocessor for tests that involve the Enrichment transform. This preprocessor replaces the actual Enrichment transform with a mock `TestEnrichment` transform. This allows the test to verify the pipeline's correctness without requiring external services like BigTable or BigQuery. Args: test_spec: The dictionary representation of the YAML pipeline specification. expected: A list of strings representing the expected output of the pipeline. env: The TestEnvironment object providing utilities for creating temporary files. Returns: The modified test_spec dictionary with Enrichment transforms replaced.
github-repos
def structure_path(self, path): if (not path): self.structure_dir = None self.structure_file = None else: if (not op.exists(path)): raise OSError('{}: file does not exist!'.format(path)) if (not op.dirname(path)): self.structure_dir = '.' else: self.structure_dir = op.dirname(path) self.structure_file = op.basename(path)
Provide pointers to the paths of the structure file Args: path: Path to structure file
codesearchnet
def update(self, *args, **kwargs): for next_dict in chain(args, (kwargs,)): for (k, v) in next_dict.items(): self[k] = v
Equivalent to the python dict update method. Update the dictionary with the key/value pairs from other, overwriting existing keys. Args: other (dict): The source of key value pairs to add to headers Keyword Args: All keyword arguments are stored in header directly Returns: None
codesearchnet
def add_node(self, node_id, name, labels): node = self.graph_db.get_or_create_indexed_node('Node', 'node_id', node_id, {'node_id': node_id, 'name': name}) try: node.add_labels(*labels) except NotImplementedError: pass
Add the node with name and labels. Args: node_id: Id for the node. name: Name for the node. labels: Label for the node. Raises: NotImplementedError: When adding labels is not supported.
codesearchnet
def apply_grad_processors(opt, gradprocs): assert isinstance(gradprocs, (list, tuple)), gradprocs for gp in gradprocs: assert isinstance(gp, GradientProcessor), gp class _ApplyGradientProcessor(ProxyOptimizer): def __init__(self, opt, gradprocs): self._gradprocs = gradprocs[:] super(_ApplyGradientProcessor, self).__init__(opt) def apply_gradients(self, grads_and_vars, global_step=None, name=None): g = self._apply(grads_and_vars) return self._opt.apply_gradients(g, global_step, name) def _apply(self, g): for proc in self._gradprocs: g = proc.process(g) return g return _ApplyGradientProcessor(opt, gradprocs)
Wrapper around optimizers to apply gradient processors. Args: opt (tf.train.Optimizer): gradprocs (list[GradientProcessor]): gradient processors to add to the optimizer. Returns: a :class:`tf.train.Optimizer` instance which runs the gradient processors before updating the variables.
codesearchnet
def __init__(self, model: PreTrainedModel): super().__init__() if model.generation_config is None: raise AssertionError('The model must have a generation config to be exported with static caching. Please set `generation_config`.') if not model.generation_config.use_cache: raise AssertionError('The model must have caching enabled to be exported with static caching. Please set `generation_config.use_cache=True`.') if model.generation_config.cache_implementation != 'static': raise AssertionError("The model must use a 'static' caching implementation to be exported with static caching. Please set `generation_config.cache_implementation='static'`.") self.model = model self.static_cache = StaticCache(config=self.model.config, max_batch_size=self.model.generation_config.cache_config.batch_size, max_cache_len=self.model.generation_config.cache_config.max_cache_len, device=self.model.generation_config.cache_config.device, dtype=self.model.dtype) for i in range(len(self.static_cache.key_cache)): self.register_buffer(f'key_cache_{i}', self.static_cache.key_cache[i], persistent=False) self.register_buffer(f'value_cache_{i}', self.static_cache.value_cache[i], persistent=False)
Initializes the wrapper module with the pretrained model. Args: model (`PreTrainedModel`): The pretrained model to wrap. The model must have caching enabled and use a 'static' caching implementation. Raises: AssertionError: If the pretrained model does not have caching enabled or if it does not use a 'static' caching implementation in `model.generation_config`.
github-repos
def RegisterImplementation(cache_name, map_name, cache): global _cache_implementations if cache_name not in _cache_implementations: logging.info('Registering [%s] cache for [%s].', cache_name, map_name) _cache_implementations[cache_name] = {} _cache_implementations[cache_name][map_name] = cache
Register a Cache implementation with the CacheFactory. Child modules are expected to call this method in the file-level scope so that the CacheFactory is aware of them. Args: cache_name: (string) The name of the NSS backend. map_name: (string) The name of the map handled by this Cache. cache: A class type that is a subclass of Cache. Returns: Nothing
github-repos
def get_selector(self, name): try: return self.matcher.by_name[name] except (AttributeError, KeyError): if self.base is not None: return self.base.get_selector(name) else: raise KeyError("No selector found for style '{}'".format(name))
Find a selector mapped to a style in this or a base style sheet. Args: name (str): a style name Returns: :class:`.Selector`: the selector mapped to the style `name` Raises: KeyError: if the style `name` was not found in this or a base style sheet
juraj-google-style
def op(name, data, bucket_count=None, display_name=None, description=None, collections=None): import tensorflow.compat.v1 as tf if (display_name is None): display_name = name summary_metadata = metadata.create_summary_metadata(display_name=display_name, description=description) with tf.name_scope(name): tensor = _buckets(data, bucket_count=bucket_count) return tf.summary.tensor_summary(name='histogram_summary', tensor=tensor, collections=collections, summary_metadata=summary_metadata)
Create a legacy histogram summary op. Arguments: name: A unique name for the generated summary node. data: A `Tensor` of any shape. Must be castable to `float64`. bucket_count: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op.
codesearchnet
def setDocuments(self, documenting_pid, documented_pid): self._check_initialized() documenting_id = self.getObjectByPid(documenting_pid) documented_id = self.getObjectByPid(documented_pid) self.add((documenting_id, CITO.documents, documented_id))
Add a CiTO, the Citation Typing Ontology, triple asserting that ``documenting_pid`` documents ``documented_pid``. Adds assertion: ``documenting_pid cito:documents documented_pid`` Args: documenting_pid: str PID of a Science Object that documents ``documented_pid``. documented_pid: str PID of a Science Object that is documented by ``documenting_pid``.
juraj-google-style
def print_stack_info(self): try: rest_api_id = None deployment_found = False response = self._cf_client.describe_stack_resources( StackName=self._stack_name ) print('\nThe following resources were created:') rows = [] for resource in response['StackResources']: if resource['ResourceType'] == 'AWS::ApiGateway::RestApi': rest_api_id = resource['PhysicalResourceId'] elif resource['ResourceType'] == 'AWS::ApiGateway::Deployment': deployment_found = True row = [] row.append(resource['ResourceType']) row.append(resource['LogicalResourceId']) row.append(resource['PhysicalResourceId']) rows.append(row) print(tabulate(rows, headers=['Resource Type', 'Logical ID', 'Physical ID'])) if rest_api_id and deployment_found: url = 'https: rest_api_id, self._region, '<stage>' ) print('\nThe deployed service can be found at this URL:') print('\t{}\n'.format(url)) return response except Exception as wtf: print(wtf) return None
List resources from the given stack Args: None Returns: A dictionary filled resources or None if things went sideways
juraj-google-style
def WriteOutput(self, output_file, feed_merger, old_feed_path, new_feed_path, merged_feed_path): if merged_feed_path is None: html_merged_feed_path = '' else: html_merged_feed_path = '<p>Merged feed created: <code>%s</code></p>' % ( merged_feed_path) html_header = % locals() html_stats = self._GenerateStatsTable(feed_merger) html_summary = self._GenerateSummary() html_notices = self._GenerateNotices() html_errors = self._GenerateSection(transitfeed.TYPE_ERROR) html_warnings = self._GenerateSection(transitfeed.TYPE_WARNING) html_footer = % (transitfeed.__version__, time.strftime('%B %d, %Y at %I:%M %p %Z')) output_file.write(transitfeed.EncodeUnicode(html_header)) output_file.write(transitfeed.EncodeUnicode(html_stats)) output_file.write(transitfeed.EncodeUnicode(html_summary)) output_file.write(transitfeed.EncodeUnicode(html_notices)) output_file.write(transitfeed.EncodeUnicode(html_errors)) output_file.write(transitfeed.EncodeUnicode(html_warnings)) output_file.write(transitfeed.EncodeUnicode(html_footer))
Write the HTML output to a file. Args: output_file: The file object that the HTML output will be written to. feed_merger: The FeedMerger instance. old_feed_path: The path to the old feed file as a string. new_feed_path: The path to the new feed file as a string merged_feed_path: The path to the merged feed file as a string. This may be None if no merged feed was written.
juraj-google-style
def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) kernel_shape = kernel.shape.as_list() if padding == 'causal': left_pad = dilation_rate * (kernel_shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid' padding = _preprocess_padding(padding) x, tf_data_format = _preprocess_conv1d_input(x, data_format) x = nn.convolution(input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NWC': x = array_ops.transpose(x, (0, 2, 1)) return x
1D convolution. Args: x: Tensor or variable. kernel: kernel tensor. strides: stride integer. padding: string, `"same"`, `"causal"` or `"valid"`. data_format: string, one of "channels_last", "channels_first". dilation_rate: integer dilate rate. Returns: A tensor, result of 1D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`.
github-repos
def _section_from_possible_title(possible_title): for section in SECTION_TITLES: if _matches_section(possible_title, section): return section return None
Returns a section matched by the possible title, or None if none match. Args: possible_title: A string that may be the title of a new section. Returns: A Section type if one matches, or None if no section type matches.
github-repos
def randint(self, low: int, high: int) -> int: return int(lib.TCOD_random_get_i(self.random_c, low, high))
Return a random integer within the linear range: low <= n <= high. Args: low (int): The lower bound of the random range. high (int): The upper bound of the random range. Returns: int: A random integer.
juraj-google-style
def get_google_drive_folder_location(): gdrive_db_path = 'Library/Application Support/Google/Drive/sync_config.db' yosemite_gdrive_db_path = 'Library/Application Support/Google/Drive/user_default/sync_config.db' yosemite_gdrive_db = os.path.join(os.environ['HOME'], yosemite_gdrive_db_path) if os.path.isfile(yosemite_gdrive_db): gdrive_db_path = yosemite_gdrive_db googledrive_home = None gdrive_db = os.path.join(os.environ['HOME'], gdrive_db_path) if os.path.isfile(gdrive_db): con = sqlite3.connect(gdrive_db) if con: cur = con.cursor() query = "SELECT data_value FROM data WHERE entry_key = 'local_sync_root_path';" cur.execute(query) data = cur.fetchone() googledrive_home = str(data[0]) con.close() if (not googledrive_home): error('Unable to find your Google Drive install =(') return googledrive_home
Try to locate the Google Drive folder. Returns: (str) Full path to the current Google Drive folder
codesearchnet
def set_cellpy_datadir(self, directory=None): if directory is None: self.logger.info("no directory name given") return if not os.path.isdir(directory): self.logger.info("directory does not exist") return self.cellpy_datadir = directory
Set the directory containing .hdf5-files. Used for setting directory for looking for hdf5-files. A valid directory name is required. Args: directory (str): path to hdf5-directory Example: >>> d = CellpyData() >>> directory = "MyData/HDF5" >>> d.set_raw_datadir(directory)
juraj-google-style
def DeregisterPlugin(cls, plugin_class): name = getattr(plugin_class, 'ARTIFACT_DEFINITION_NAME', plugin_class.__name__) name = name.lower() if (name not in cls._plugins): raise KeyError('Artifact plugin class not set for name: {0:s}.'.format(name)) del cls._plugins[name] if (name in cls._file_system_plugins): del cls._file_system_plugins[name] if (name in cls._knowledge_base_plugins): del cls._knowledge_base_plugins[name] if (name in cls._windows_registry_plugins): del cls._windows_registry_plugins[name]
Deregisters an preprocess plugin class. Args: plugin_class (type): preprocess plugin class. Raises: KeyError: if plugin class is not set for the corresponding name. TypeError: if the source type of the plugin class is not supported.
codesearchnet
def __init__(self, channel): self.Lookup = channel.unary_unary( "/google.datastore.v1.Datastore/Lookup", request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.LookupRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.LookupResponse.FromString, ) self.RunQuery = channel.unary_unary( "/google.datastore.v1.Datastore/RunQuery", request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RunQueryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RunQueryResponse.FromString, ) self.BeginTransaction = channel.unary_unary( "/google.datastore.v1.Datastore/BeginTransaction", request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.BeginTransactionRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.BeginTransactionResponse.FromString, ) self.Commit = channel.unary_unary( "/google.datastore.v1.Datastore/Commit", request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.CommitRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.CommitResponse.FromString, ) self.Rollback = channel.unary_unary( "/google.datastore.v1.Datastore/Rollback", request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RollbackRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.RollbackResponse.FromString, ) self.AllocateIds = channel.unary_unary( "/google.datastore.v1.Datastore/AllocateIds", request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.AllocateIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.AllocateIdsResponse.FromString, ) self.ReserveIds = channel.unary_unary( "/google.datastore.v1.Datastore/ReserveIds", request_serializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.ReserveIdsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_datastore__v1_dot_proto_dot_datastore__pb2.ReserveIdsResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def UpdateFrom(self, src): if (not isinstance(src, PathInfo)): raise TypeError(('expected `%s` but got `%s`' % (PathInfo, type(src)))) if (self.path_type != src.path_type): raise ValueError(('src [%s] does not represent the same path type as self [%s]' % (src.path_type, self.path_type))) if (self.components != src.components): raise ValueError(('src [%s] does not represent the same path as self [%s]' % (src.components, self.components))) if src.HasField('stat_entry'): self.stat_entry = src.stat_entry self.last_stat_entry_timestamp = max(self.last_stat_entry_timestamp, src.last_stat_entry_timestamp) self.directory = (self.directory or src.directory)
Merge path info records. Merges src into self. Args: src: An rdfvalues.objects.PathInfo record, will be merged into self. Raises: ValueError: If src does not represent the same path.
codesearchnet
def __init__(self, callback): super(RPCServer, self).__init__() self._callback = callback
Initializes the RPC server object. Args: callback (function): callback to invoke on get status RPC request.
juraj-google-style
def _process_book(link): data = DOWNER.download(link) dom = dhtmlparser.parseString( utils.handle_encodnig(data) ) dhtmlparser.makeDoubleLinked(dom) price = None try: price = _strip_content(zapi.get_price(dom)) except UserWarning: price = dom.find("p", {"class": "vaseCena"}) if price: price = price[0].getContent().replace("&nbsp;", " ") price = filter(lambda x: x.isdigit(), price.strip()) if price: price = price[0] + "kč" else: price = "-1" else: price = "-1" pub = Publication( title=_strip_content(zapi.get_title(dom)), authors=_parse_authors(zapi.get_author(dom)), price=price, publisher=_strip_content(zapi.get_publisher(dom)) ) pub.optionals.URL = link pub.optionals.pages = _strip_content(zapi.get_pages(dom)) pub.optionals.pub_date = _strip_content(zapi.get_pub_date(dom)) pub.optionals.ISBN = _strip_content(zapi.get_ISBN(dom)) pub.optionals.binding = _strip_content(zapi.get_binding(dom)) if pub.title.startswith("E-kniha:"): pub.title = pub.title.replace("E-kniha:", "", 1).strip() pub.optionals.is_ebook = True if pub.optionals.ISBN: if " " in pub.optionals.ISBN: pub.optionals.ISBN = pub.optionals.ISBN.split(" ")[0] if "(" in pub.optionals.ISBN: pub.optionals.ISBN = pub.optionals.ISBN.split("(")[0] return pub
Download and parse available informations about book from the publishers webpages. Args: link (str): URL of the book at the publishers webpages. Returns: obj: :class:`.Publication` instance with book details.
juraj-google-style
def __init__(self, file_path_regex=None, log_format_regex=None, top_dir=None): if file_path_regex is not None: self.file_path_regex = file_path_regex if log_format_regex is not None: self.log_format_regex = log_format_regex if top_dir is not None: self.top_dir = top_dir self._content = None
Init method. Args: file_path_regex (regex): the regex to find the log files. log_format_regex (regex): the regex to parse the log files. top_dir (str): the path to the root directory containing the logs.
juraj-google-style
def fit(self, X): if isinstance(X, (pd.Series, pd.DataFrame)): self.name = X.name self.constant_value = self._get_constant_value(X) if self.constant_value is None: self.mean = np.mean(X) self.std = np.std(X) else: self._replace_constant_methods() self.fitted = True
Fit the model. Arguments: X: `np.ndarray` of shape (n, 1). Returns: None
juraj-google-style
def RegisterMessage(self, message): desc = message.DESCRIPTOR self._symbols[desc.full_name] = message if (desc.file.name not in self._symbols_by_file): self._symbols_by_file[desc.file.name] = {} self._symbols_by_file[desc.file.name][desc.full_name] = message self.pool.AddDescriptor(desc) return message
Registers the given message type in the local database. Args: message: a message.Message, to be registered. Returns: The provided message.
codesearchnet
def run_program(self, name, arguments=[], timeout=30, exclusive=False): logger.debug('Running program ...') if exclusive: kill_longrunning(self.config) prog = RunningProgram(self, name, arguments, timeout) return prog.expect_end()
Runs a program in the working directory to completion. Args: name (str): The name of the program to be executed. arguments (tuple): Command-line arguments for the program. timeout (int): The timeout for execution. exclusive (bool): Prevent parallel validation runs on the test machines, e.g. when doing performance measurements for submitted code. Returns: tuple: A tuple of the exit code, as reported by the operating system, and the output produced during the execution.
codesearchnet
def util_granulate_time_series(time_series, scale): n = len(time_series) b = int(np.fix(n / scale)) temp = np.reshape(time_series[0:b*scale], (b, scale)) cts = np.mean(temp, axis = 1) return cts
Extract coarse-grained time series Args: time_series: Time series scale: Scale factor Returns: Vector of coarse-grained time series with given scale factor
juraj-google-style
def __init__(self, optimizer, num_steps=10, unroll_loop=False, scope='multi-step', summary_labels=()): assert isinstance(num_steps, int) and num_steps > 0 self.num_steps = num_steps assert isinstance(unroll_loop, bool) self.unroll_loop = unroll_loop super(MultiStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)
Creates a new multi-step meta optimizer instance. Args: optimizer: The optimizer which is modified by this meta optimizer. num_steps: Number of optimization steps to perform.
juraj-google-style
def _apply_filters_to_first_location_occurrence(match_traversal, location_to_filters, already_filtered_locations): new_match_traversal = [] newly_filtered_locations = set() for match_step in match_traversal: current_location = match_step.as_block.location if (current_location in newly_filtered_locations): raise AssertionError(u'The same location {} was encountered twice in a single match traversal: {}. This should never happen.'.format(current_location, match_traversal)) if all(((current_location in location_to_filters), (current_location not in already_filtered_locations))): where_block = Filter(_filter_list_to_conjunction_expression(location_to_filters[current_location])) newly_filtered_locations.add(current_location) else: where_block = None new_match_step = MatchStep(root_block=match_step.root_block, coerce_type_block=match_step.coerce_type_block, where_block=where_block, as_block=match_step.as_block) new_match_traversal.append(new_match_step) return (new_match_traversal, newly_filtered_locations)
Apply all filters for a specific location into its first occurrence in a given traversal. For each location in the given match traversal, construct a conjunction of all filters applied to that location, and apply the resulting Filter to the first instance of the location. Args: match_traversal: list of MatchStep objects to be lowered location_to_filters: dict mapping each location in the MatchQuery which contains the given match traversal to a list of filters applied at that location already_filtered_locations: set of locations that have already had their filters applied Returns: new list of MatchStep objects with all filters for any given location composed into a single filter which is applied to the first instance of that location
codesearchnet
def movies_upcoming(self, **kwargs): path = self._get_path('movies_upcoming') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def format_usage(doc, width=None): sections = doc.replace('\r', '').split('\n\n') width = (width or get_terminal_size().columns or 80) return '\n\n'.join((_wrap_section(s.strip(), width) for s in sections))
Format the docstring for display to the user. Args: doc: The docstring to reformat for display. Returns: The docstring formatted to parse and display to the user. This includes dedenting, rewrapping, and translating the docstring if necessary.
codesearchnet
def stage_out(self, file, executor): if ((file.scheme == 'http') or (file.scheme == 'https')): raise Exception('HTTP/HTTPS file staging out is not supported') elif (file.scheme == 'ftp'): raise Exception('FTP file staging out is not supported') elif (file.scheme == 'globus'): globus_ep = self._get_globus_endpoint(executor) stage_out_app = self._globus_stage_out_app() return stage_out_app(globus_ep, inputs=[file]) else: raise Exception('Staging out with unknown file scheme {} is not supported'.format(file.scheme))
Transport the file from the local filesystem to the remote Globus endpoint. This function returns a DataFuture. Args: - self - file (File) - file to stage out - executor (str) - Which executor the file is going to be staged out from. If the executor argument is not specified for a file with the 'globus' scheme, the file will be staged in to the first executor with the "globus" key in a config.
codesearchnet
def normalize_url(base_url, rel_url): if not rel_url: return None if not is_absolute_url(rel_url): rel_url = rel_url.replace("../", "/") if (not base_url.endswith("/")) and (not rel_url.startswith("/")): return base_url + "/" + rel_url.replace("../", "/") return base_url + rel_url.replace("../", "/") return rel_url
Normalize the `url` - from relative, create absolute URL. Args: base_url (str): Domain with ``protocol://`` string rel_url (str): Relative or absolute url. Returns: str/None: Normalized URL or None if `url` is blank.
juraj-google-style
def mobility(sdat, tstart=None, tend=None): tseries = sdat.tseries_between(tstart, tend) steps = sdat.steps[tseries.index[0]:tseries.index[-1]] time = [] mob = [] for step in steps.filter(rprof=True): time.append(step.timeinfo['t']) mob.append(step.rprof.iloc[-1].loc['vrms'] / step.timeinfo['vrms']) return np.array(mob), np.array(time)
Plates mobility. Compute the ratio vsurf / vrms. Args: sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance. tstart (float): time at which the computation should start. Use the beginning of the time series data if set to None. tend (float): time at which the computation should end. Use the end of the time series data if set to None. Returns: tuple of :class:`numpy.array`: mobility and time arrays.
juraj-google-style
def request(self, request): url = '{}{}'.format(self._base_url, request.path) timeout = self.poll_timeout if (request.stream is True): timeout = self.stream_timeout try: http_response = self._session.request(request.method, url, headers=self._headers, params=request.params, data=request.body, stream=request.stream, timeout=timeout) except requests.exceptions.ConnectionError: raise V20ConnectionError(url) except requests.exceptions.ConnectTimeout: raise V20Timeout(url, 'connect') except requests.exceptions.ReadTimeout: raise V20Timeout(url, 'read') request.headers = http_response.request.headers response = Response(request, request.method, http_response.url, http_response.status_code, http_response.reason, http_response.headers) if request.stream: response.set_line_parser(request.line_parser) response.set_lines(http_response.iter_lines(self.stream_chunk_size)) else: response.set_raw_body(http_response.text) return response
Perform an HTTP request through the context Args: request: A v20.request.Request object Returns: A v20.response.Response object
codesearchnet
def _replace_args_with_defaults(self, _args=None, **kwargs): if _args is None: _args = six.iterkeys(kwargs) my_defaults = self.defaults for k in _args: if k not in kwargs: if k in my_defaults: kwargs[k] = my_defaults[k] elif k in _defaults: kwargs[k] = _defaults[k] return kwargs
Internal method to fill absent values in the kwargs with the defaults. Args: _args: A list of arguments to replace if a subset is required. Name chosen to prevent conflicts with kwargs. **kwargs: The arguments to replace with defaults. Returns: A map with the same fields as kwargs, but absent values are filled with defaults.
juraj-google-style
def parse_environment_file(filename, world_size=(60, 60)): infile = open(filename) lines = infile.readlines() infile.close() tasks = [] res_order = [] res_dict = {} for line in lines: if line.startswith("GRADIENT_RESOURCE"): name, cells = parse_gradient(line, world_size) elif line.startswith("CELL"): name, cells = parse_cell(line, world_size) elif line.startswith("REACTION"): task = parse_reaction(line) if task not in tasks: tasks.append(task) else: continue dict_increment(res_dict, name, cells) if name not in res_order: res_order.append(name) grid = make_niche_grid(res_dict, world_size) return EnvironmentFile(grid, res_order, world_size, filename, tasks)
Extract information about spatial resources from an environment file. Arguments: filename - a string representing the path to the environment file. world_size - a tuple representing the x and y coordinates of the world. (default: 60x60) Returns a list of lists of sets indicating the set of resources available at each x,y location in the Avida grid.
juraj-google-style
def add(self, layers, above=None, below=None): def add_named_layer(name, image): image = self.get_image(image, output='vector') if above is not None: image[image < above] = 0. if below is not None: image[image > below] = 0. self.layers[name] = image self.stack.append(name) if isinstance(layers, dict): for (name, image) in layers.items(): add_named_layer(name, image) else: if not isinstance(layers, list): layers = [layers] for image in layers: name = 'layer_%d' % len(self.stack) add_named_layer(name, image) self.set_mask()
Add one or more layers to the stack of masking layers. Args: layers: A string, NiBabel image, list, or dict. If anything other than a dict is passed, assigns sequential layer names based on the current position in stack; if a dict, uses key as the name and value as the mask image.
juraj-google-style
def options(self): if context.executing_eagerly(): options = self._options_tensor_to_options(self._options()) options._set_mutable(False) return options warnings.warn('To make it possible to preserve tf.data options across serialization boundaries, their implementation has moved to be part of the TensorFlow graph. As a consequence, the options value is in general no longer known at graph construction time. Invoking this method in graph mode retains the legacy behavior of the original implementation, but note that the returned value might not reflect the actual value of the options.') return self._options_attr
Returns the options for this dataset and its inputs. Returns: A `tf.data.Options` object representing the dataset options.
github-repos
def create_html_from_fragment(tag): try: assert isinstance(tag, bs4.element.Tag) except AssertionError: raise TypeError try: assert tag.find_all('body') == [] except AssertionError: raise ValueError soup = BeautifulSoup('<html><head></head><body></body></html>', 'html.parser') soup.body.append(tag) return soup
Creates full html tree from a fragment. Assumes that tag should be wrapped in a body and is currently not Args: tag: a bs4.element.Tag Returns:" bs4.element.Tag: A bs4 tag representing a full html document
juraj-google-style
def claim(self, file_readers): (prefix_to_readers, filter_files, unclaimed_set) = self._find_varscan_files(file_readers) prefix_by_patients = self._split_prefix_by_patient(prefix_to_readers) self._validate_vcf_readers(prefix_by_patients) vcf_hc_pairs = self._pair_files(prefix_to_readers, filter_files) self._validate_vcf_hc_pairs(vcf_hc_pairs) vcf_readers = self._create_vcf_readers(vcf_hc_pairs) return (list(unclaimed_set), vcf_readers)
Recognizes and claims VarScan VCFs form the set of all input VCFs. Each defined caller has a chance to evaluate and claim all the incoming files as something that it can process. Since VarScan can claim high-confidence files as well, this process is significantly more complex than for other callers. Args: file_readers: the collection of currently unclaimed files Returns: A tuple of unclaimed readers and VarScanVcfReaders.
codesearchnet