code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def from_fn(cls, dna_spec: DNASpec, generator_fn: Callable[['DecisionPoint'], Union[List[int], float, str, 'DNA']]) -> 'DNA': if not isinstance(dna_spec, DNASpec): raise TypeError(f"Argument 'dna_spec' should be DNASpec type. Encountered {dna_spec}.") if dna_spec.is_space: children = [] for child_spec in dna_spec.elements: children.append(DNA.from_fn(child_spec, generator_fn)) if len(children) == 1: return children[0] dna = DNA(None, children) elif dna_spec.is_categorical: assert isinstance(dna_spec, DecisionPoint), dna_spec decision = generator_fn(dna_spec) if isinstance(decision, DNA): dna = decision else: if len(decision) != dna_spec.num_choices: raise ValueError(f'Number of DNA child values does not match the number of choices. Child values: {decision!r}, Choices: {dna_spec.num_choices}, Location: {dna_spec.location.path}.') children = [] for i, choice in enumerate(decision): choice_location = utils.KeyPath(i, dna_spec.location) if not isinstance(choice, int): raise ValueError(f'Choice value should be int. Encountered: {choice}, Location: {choice_location.path}.') if choice >= len(dna_spec.candidates): raise ValueError(f'Choice out of range. Value: {choice}, Candidates: {len(dna_spec.candidates)}, Location: {choice_location.path}.') child_dna = DNA.from_fn(dna_spec.candidates[choice], generator_fn) children.append(DNA(choice, [child_dna])) dna = DNA(None, children) else: assert isinstance(dna_spec, DecisionPoint), dna_spec decision = generator_fn(dna_spec) if isinstance(decision, DNA): dna = decision else: dna = DNA(decision) dna_spec.validate(dna) return dna
Generate a DNA with user generator function. Args: dna_spec: The DNASpec for the DNA. generator_fn: A callable object with signature: `(decision_point) -> decision` The decision_point is a `Choices` object or a `Float` object. The returned decision should be: * a list of integer or a DNA object for a `Choices` decision point. When a DNA is returned, it will be used as the DNA for the entire sub-tree, hence `generate_fn` will not be called on sub-decision points. * a float or a DNA object for a Float decision point. * a string or a DNA object for a CustomDecisionPoint. Returns: A DNA generated from the user function.
github-repos
def generate_hyperband_schedule(self, R, eta): schedule = [] s_max = int(math.floor(math.log(R, eta))) for s in range(0, (s_max + 1)): n = math.ceil((int(((s_max + 1) / (s + 1))) * (eta ** s))) r = (R * (eta ** (- s))) bracket = [] for i in range(0, (s + 1)): n_i = int(math.floor((n * (eta ** (- i))))) r_i = int((r * (eta ** i))) bracket.append((n_i, r_i)) schedule = ([bracket] + schedule) return schedule
Generate hyperband schedule according to the paper. Args: R: maximum resources per config. eta: proportion of configruations to discard per iteration of successive halving. Returns: hyperband schedule, which is represented as a list of brackets, where each bracket contains a list of (num configurations, num resources to use per configuration). See the paper for more details.
codesearchnet
def generate_payload(self, command, data=None): json_data = payload_dict[self.dev_type][command]['command'] if ('gwId' in json_data): json_data['gwId'] = self.id if ('devId' in json_data): json_data['devId'] = self.id if ('uid' in json_data): json_data['uid'] = self.id if ('t' in json_data): json_data['t'] = str(int(time.time())) if (data is not None): json_data['dps'] = data json_payload = json.dumps(json_data) json_payload = json_payload.replace(' ', '') json_payload = json_payload.encode('utf-8') log.debug('json_payload=%r', json_payload) if (command == SET): self.cipher = AESCipher(self.local_key) json_payload = self.cipher.encrypt(json_payload) preMd5String = (((((b'data=' + json_payload) + b'||lpv=') + PROTOCOL_VERSION_BYTES) + b'||') + self.local_key) m = md5() m.update(preMd5String) hexdigest = m.hexdigest() json_payload = ((PROTOCOL_VERSION_BYTES + hexdigest[8:][:16].encode('latin1')) + json_payload) self.cipher = None postfix_payload = hex2bin((bin2hex(json_payload) + payload_dict[self.dev_type]['suffix'])) assert (len(postfix_payload) <= 255) postfix_payload_hex_len = ('%x' % len(postfix_payload)) buffer = (hex2bin((((payload_dict[self.dev_type]['prefix'] + payload_dict[self.dev_type][command]['hexByte']) + '000000') + postfix_payload_hex_len)) + postfix_payload) return buffer
Generate the payload to send. Args: command(str): The type of command. This is one of the entries from payload_dict data(dict, optional): The data to be send. This is what will be passed via the 'dps' entry
codesearchnet
def __call__(self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]]=None, boxes: Optional[Union[List[List[int]], List[List[List[int]]]]]=None, word_labels: Optional[Union[List[int], List[List[int]]]]=None, add_special_tokens: bool=True, padding: Union[bool, str, PaddingStrategy]=False, truncation: Union[bool, str, TruncationStrategy]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, padding_side: Optional[str]=None, return_tensors: Optional[Union[str, TensorType]]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, **kwargs) -> BatchEncoding: def _is_valid_text_input(t): if isinstance(t, str): return True elif isinstance(t, (list, tuple)): if len(t) == 0: return True elif isinstance(t[0], str): return True elif isinstance(t[0], (list, tuple)): return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: if not _is_valid_text_input(text): raise ValueError('text input must of type `str` (single example) or `List[str]` (batch of examples). ') if not isinstance(text_pair, (list, tuple)): raise ValueError('words must of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).') elif not isinstance(text, (list, tuple)): raise ValueError('Words must of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).') if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError('You must provide corresponding bounding boxes') if is_batched: if len(words) != len(boxes): raise ValueError('You must provide words and boxes for an equal amount of examples') for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError('You must provide as many words as there are bounding boxes') elif len(words) != len(boxes): raise ValueError('You must provide as many words as there are bounding boxes') if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError(f'batch length of `text`: {len(text)} does not match batch length of `text_pair`: {len(text_pair)}.') batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus(batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs) else: return self.encode_plus(text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, padding_side=padding_side, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs)
Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD).
github-repos
def _get_bounds(self, layers): extent_query = ('SELECT ST_EXTENT(the_geom) AS the_geom ' 'FROM ({query}) AS t{idx}\n') union_query = 'UNION ALL\n'.join( [extent_query.format(query=layer.orig_query, idx=idx) for idx, layer in enumerate(layers) if not layer.is_basemap]) extent = self.sql_client.send( utils.minify_sql(( 'SELECT', ' ST_XMIN(ext) AS west,', ' ST_YMIN(ext) AS south,', ' ST_XMAX(ext) AS east,', ' ST_YMAX(ext) AS north', 'FROM (', ' SELECT ST_Extent(the_geom) AS ext', ' FROM ({union_query}) AS _wrap1', ') AS _wrap2', )).format(union_query=union_query), do_post=False) return extent['rows'][0]
Return the bounds of all data layers involved in a cartoframes map. Args: layers (list): List of cartoframes layers. See `cartoframes.layers` for all types. Returns: dict: Dictionary of northern, southern, eastern, and western bounds of the superset of data layers. Keys are `north`, `south`, `east`, and `west`. Units are in WGS84.
juraj-google-style
def _check_registry_type(folder=None): folder = _registry_folder(folder) default_file = os.path.join(folder, 'registry_type.txt') try: with open(default_file, 'r') as infile: data = infile.read() data = data.strip() ComponentRegistry.SetBackingStore(data) except IOError: pass
Check if the user has placed a registry_type.txt file to choose the registry type If a default registry type file is found, the DefaultBackingType and DefaultBackingFile class parameters in ComponentRegistry are updated accordingly. Args: folder (string): The folder that we should check for a default registry type
codesearchnet
def output_hist(self, output_hist: Hist, input_observable: Any, **kwargs: Dict[(str, Any)]) -> Union[(Hist, Any)]: return output_hist
Return an output object. It should store the ``output_hist``. Note: The output object could just be the raw histogram. Note: This function is just a basic placeholder which returns the given output object (a histogram) and likely should be overridden. Args: output_hist: The output histogram input_observable (object): The corresponding input object. It could be a histogram or something more complex. kwargs: Projection information dict combined with additional arguments passed to the projection function Return: The output object which should be stored in the output dict. By default, it returns the output hist.
codesearchnet
def HasDataStream(self, name, case_sensitive=True): if not isinstance(name, py2to3.STRING_TYPES): raise ValueError('Name is not a string.') name_lower = name.lower() for data_stream in self._GetDataStreams(): if data_stream.name == name: return True if not case_sensitive and data_stream.name.lower() == name_lower: return True return False
Determines if the file entry has specific data stream. Args: name (str): name of the data stream. case_sensitive (Optional[bool]): True if the name is case sensitive. Returns: bool: True if the file entry has the data stream. Raises: ValueError: if the name is not string.
juraj-google-style
def generate_custom_cert_name(env='', region='', account='', certificate=None): cert_name = None template_kwargs = {'account': account, 'name': certificate} try: rendered_template = get_template(template_file='infrastructure/iam/tlscert_naming.json.j2', **template_kwargs) tlscert_dict = json.loads(rendered_template) except ForemastTemplateNotFound: LOG.info('Unable to find TLS Cert Template...falling back to default logic...') return cert_name try: LOG.info('Attempting to find TLS Cert using TLS Cert Template v1 lookup...') cert_name = tlscert_dict[env][certificate] LOG.info('Found TLS certificate named %s under %s using TLS Cert Template v1', certificate, env) except KeyError: LOG.error('Unable to find TLS certificate named %s under %s using v1 TLS Cert Template.', certificate, env) tls_services = ['iam', 'acm'] if ((cert_name is None) and all(((service in tlscert_dict) for service in tls_services))): LOG.info('Attempting to find TLS Cert using TLS Cert Template v2 lookup...') if (certificate in tlscert_dict['iam'][env]): cert_name = tlscert_dict['iam'][env][certificate] LOG.info('Found IAM TLS certificate named %s under %s using TLS Cert Template v2', certificate, env) elif (certificate in tlscert_dict['acm'][region][env]): cert_name = tlscert_dict['acm'][region][env][certificate] LOG.info('Found ACM TLS certificate named %s under %s in %s using TLS Cert Template v2', certificate, env, region) else: LOG.error('Unable to find TLS certificate named %s under parent keys [ACM, IAM] %s in v2 TLS Cert Template.', certificate, env) return cert_name
Generate a custom TLS Cert name based on a template. Args: env (str): Account environment name region (str): AWS Region. account (str): Account number for ARN. certificate (str): Name of SSL certificate. Returns: str: Fully qualified ARN for SSL certificate. None: Template doesn't exist.
codesearchnet
def _post_process(self, feed_item, item): campaign = self._campaign_dao.get(feed_item, required=True) if campaign: feed_item[FieldMap.CAMPAIGN_NAME] = campaign['name'] feed_item[FieldMap.CAMPAIGN_ID] = campaign['id']
Updates the feed item with ids and names of related object so those can be updated in the Bulkdozer feed. Args: feed_item: The Bulkdozer feed item. item: The CM newly created or updated object.
github-repos
def _ConvertValueForCsv(self, pql_value): if 'value' in pql_value: field = pql_value['value'] elif 'values' in pql_value: field = pql_value['values'] else: field = None if field: if isinstance(field, list): if all(AdManagerClassType(single_field) == AdManagerClassType(field[0]) for single_field in field): return ','.join([ '"%s"' % str(self._ConvertValueForCsv(single_field)) for single_field in field]) else: raise googleads.errors.GoogleAdsValueError( 'The set value returned contains unsupported mix value types') class_type = AdManagerClassType(pql_value) if class_type == 'TextValue': s = field.replace('"', '""') if sys.version_info.major < 3: s = s.encode('UTF8') return s elif class_type == 'NumberValue': return float(field) if '.' in field else int(field) elif class_type == 'DateTimeValue': return self._ConvertDateTimeToOffset(field) elif class_type == 'DateValue': return datetime.date(int(field['date']['year']), int(field['date']['month']), int(field['date']['day'])).isoformat() else: return field else: return '-'
Sanitizes a field value from a Value object to a CSV suitable format. Args: pql_value: dict a dictionary containing the data for a single field of an entity. Returns: str a CSV writer friendly value formatted by Value.Type.
juraj-google-style
def create_resource_group(access_token, subscription_id, rgname, location): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '?api-version=', RESOURCE_API]) rg_body = {'location': location} body = json.dumps(rg_body) return do_put(endpoint, body, access_token)
Create a resource group in the specified location. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. location (str): Azure data center location. E.g. westus. Returns: HTTP response. JSON body.
codesearchnet
def request_parking(self, endpoint, url_args={}, **kwargs): if (endpoint not in ENDPOINTS_PARKING): return None url = (URL_OPENBUS + ENDPOINTS_PARKING[endpoint]) lang = url_args.get('lang', 'ES') address = url_args.get('address', '') url = url.format(id_client=self._emt_id, passkey=self._emt_pass, address=address, lang=lang) return _parking_req.post(url, data=kwargs).json()
Make a request to the given endpoint of the ``parking`` server. This returns the plain JSON (dict) response which can then be parsed using one of the implemented types. Args: endpoint (str): Endpoint to send the request to. This string corresponds to the key in the ``ENDPOINTS`` dict. url_args (dict): Dictionary for URL string replacements. **kwargs: Request arguments. Returns: Obtained response (dict) or None if the endpoint was not found.
codesearchnet
def extract_tensors_from_dataset(dataset): iterator = get_iterator(dataset) inputs, targets, sample_weight = unpack_iterator_input(iterator) return (inputs, targets, sample_weight)
Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset. Args: dataset: Dataset instance. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
github-repos
def linear(self, x): with tf.name_scope('presoftmax_linear'): batch_size = tf.shape(x)[0] length = tf.shape(x)[1] x = tf.reshape(x, [(- 1), self.hidden_size]) logits = tf.matmul(x, self.shared_weights, transpose_b=True) return tf.reshape(logits, [batch_size, length, self.vocab_size])
Computes logits by running x through a linear layer. Args: x: A float32 tensor with shape [batch_size, length, hidden_size] Returns: float32 tensor with shape [batch_size, length, vocab_size].
codesearchnet
def read(self, size=None): data = b'' while ((size and len(data) < size) and self._current_offset < self.uncompressed_data_size): member = self._GetMemberForOffset(self._current_offset) member_offset = self._current_offset - member.uncompressed_data_offset data_read = member.ReadAtOffset(member_offset, size) if data_read: self._current_offset += len(data_read) data = b''.join([data, data_read]) return data
Reads a byte string from the gzip file at the current offset. The function will read a byte string up to the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
juraj-google-style
def double(self, count: float) -> float: return 2 * count
Returns the input multiplied by 2. Args: count: Input number that you want to double. Returns: A number that is the double of count.
github-repos
def stylify(code: str) -> str: has_indent = len(get_indent(code)) > 0 if has_indent: code = f'class Bla:\n{code}' formatted_code = run_ruff(code) return formatted_code[len('class Bla:\n'):] if has_indent else formatted_code
Applies the ruff part of our `make style` command to some code. This formats the code using `ruff format`. As `ruff` does not provide a python api this cannot be done on the fly. Args: code (`str`): The code to format. Returns: `str`: The formatted code.
github-repos
def check_integrity(models): messages = dict(error=[], warning=[]) for model in models: validators = [] for name in dir(model): if (not name.startswith('_check')): continue obj = getattr(model, name) if getattr(obj, 'validator_type', None): validators.append(obj) for func in validators: messages[func.validator_type].extend(func()) for msg in sorted(messages['error']): log.error(('E-%d (%s): %s: %s' % msg)) for msg in sorted(messages['warning']): (code, name, desc, obj) = msg if (code not in __silencers__): log.warning(('W-%d (%s): %s: %s' % msg))
Apply validation and integrity checks to a collection of Bokeh models. Args: models (seq[Model]) : a collection of Models to test Returns: None This function will emit log warning and error messages for all error or warning conditions that are detected. For example, layouts without any children will trigger a warning: .. code-block:: python >>> empty_row = Row >>> check_integrity([empty_row]) W-1002 (EMPTY_LAYOUT): Layout has no children: Row(id='2404a029-c69b-4e30-9b7d-4b7b6cdaad5b', ...)
codesearchnet
def list_documents(project_id, knowledge_base_id): import dialogflow_v2beta1 as dialogflow client = dialogflow.DocumentsClient() knowledge_base_path = client.knowledge_base_path(project_id, knowledge_base_id) print('Documents for Knowledge Id: {}'.format(knowledge_base_id)) for document in client.list_documents(knowledge_base_path): print(' - Display Name: {}'.format(document.display_name)) print(' - Knowledge ID: {}'.format(document.name)) print(' - MIME Type: {}'.format(document.mime_type)) print(' - Knowledge Types:') for knowledge_type in document.knowledge_types: print(' - {}'.format(KNOWLEDGE_TYPES[knowledge_type])) print(' - Source: {}\n'.format(document.content_uri))
Lists the Documents belonging to a Knowledge base. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base.
juraj-google-style
def write_temp_bird_conf(dummy_ip_prefix, config_file, variable_name, prefixes): log = logging.getLogger(PROGRAM_NAME) comment = (" "REMOVED from the constant.".format(i=dummy_ip_prefix)) tm_file = os.path.join(os.path.dirname(config_file), str(time.time())) log.debug("going to write to %s", tm_file) try: with open(tm_file, 'w') as tmpf: tmpf.write(" .format(t=datetime.datetime.now(), n=PROGRAM_NAME, p=os.getpid())) tmpf.write("{c}\n".format(c=comment)) tmpf.write("define {n} =\n".format(n=variable_name)) tmpf.write("{s}[\n".format(s=4 * ' ')) tmpf.write(',\n'.join([' '*8 + n for n in prefixes])) tmpf.write("\n{s}];\n".format(s=4 * ' ')) except OSError as error: log.critical("failed to write temporary file %s: %s. This is a FATAL " "error, this exiting main program", tm_file, error) sys.exit(1) else: return tm_file
Write in a temporary file the list of IP-Prefixes. A failure to create and write the temporary file will exit main program. Arguments: dummy_ip_prefix (str): The dummy IP prefix, which must be always config_file (str): The file name of bird configuration variable_name (str): The name of the variable set in bird configuration prefixes (list): The list of IP-Prefixes to write Returns: The filename of the temporary file
juraj-google-style
def _WriteRow(self, output_writer, values): maximum_row_width = self._MAXIMUM_WIDTH - self._column_width - 3 primary_format_string = '{{0:>{0:d}s}} : {{1:s}}\n'.format( self._column_width) secondary_format_string = '{{0:<{0:d}s}}{{1:s}}\n'.format( self._column_width + 3) if isinstance(values[1], py2to3.STRING_TYPES): value_string = values[1] else: value_string = '{0!s}'.format(values[1]) if len(value_string) < maximum_row_width: output_writer.Write(primary_format_string.format( values[0], value_string)) return words = value_string.split() current = 0 lines = [] word_buffer = [] for word in words: current += len(word) + 1 if current >= maximum_row_width: current = len(word) lines.append(' '.join(word_buffer)) word_buffer = [word] else: word_buffer.append(word) lines.append(' '.join(word_buffer)) output_writer.Write( primary_format_string.format(values[0], lines[0])) for line in lines[1:]: output_writer.Write(secondary_format_string.format('', line))
Writes a row of values aligned to the column width. Args: output_writer (OutputWriter): output writer. values (list[object]): values.
juraj-google-style
def _AddProvidesEdges(self, rdf_artifact): for attribute in rdf_artifact.provides: self._AddEdge(rdf_artifact.name, attribute)
Add an edge for every attribute the given artifact provides. This method adds a directed edge from the artifact node to every attribute this artifact provides. Args: rdf_artifact: The artifact object.
juraj-google-style
def __init__(self, no_decomp: Callable[[ops.Operation], bool]=(lambda _: False) ) -> None: super().__init__() self.no_decomp = no_decomp
Construct the optimization pass. Args: no_decomp: A predicate that determines whether an operation should be decomposed or not. Defaults to decomposing everything.
juraj-google-style
def calc_digest(origin, algorithm='sha1', block_size=None): try: hashM = hashlib.new(algorithm) except ValueError: raise ValueError('hash algorithm not supported by the underlying platform: "{0}"'.format(algorithm)) while True: chunk = (origin.read(block_size) if block_size else origin.read()) if (not chunk): break hashM.update(chunk) return hashM.hexdigest()
Calculate digest of a readable object Args: origin -- a readable object for which calculate digest algorithn -- the algorithm to use. See ``hashlib.algorithms_available`` for supported algorithms. block_size -- the size of the block to read at each iteration
codesearchnet
def merge(self, ref_name: str): if self.is_dirty(): LOGGER.error('repository is dirty; cannot merge: %s', ref_name) sys.exit(-1) LOGGER.info('merging ref: "%s" into branch: %s', ref_name, self.get_current_branch()) self.repo.git.merge(ref_name)
Merges two refs Args: ref_name: ref to merge in the current one
juraj-google-style
async def dist(self, mesg): if self.isfini: return () ret = [] for func in self._syn_funcs.get(mesg[0], ()): try: ret.append((await s_coro.ornot(func, mesg))) except asyncio.CancelledError: raise except Exception: logger.exception('base %s error with mesg %s', self, mesg) for func in self._syn_links: try: ret.append((await func(mesg))) except asyncio.CancelledError: raise except Exception: logger.exception('base %s error with mesg %s', self, mesg) return ret
Distribute an existing event tuple. Args: mesg ((str,dict)): An event tuple. Example: await base.dist( ('foo',{'bar':'baz'}) )
codesearchnet
def check_initializers(initializers, keys): if (initializers is None): return {} _assert_is_dictlike(initializers, valid_keys=keys) keys = set(keys) if (not (set(initializers) <= keys)): extra_keys = (set(initializers) - keys) raise KeyError('Invalid initializer keys {}, initializers can only be provided for {}'.format(', '.join(("'{}'".format(key) for key in extra_keys)), ', '.join(("'{}'".format(key) for key in keys)))) _check_nested_callables(initializers, 'Initializer') return dict(initializers)
Checks the given initializers. This checks that `initializers` is a dictionary that only contains keys in `keys`, and furthermore the entries in `initializers` are functions or further dictionaries (the latter used, for example, in passing initializers to modules inside modules) that must satisfy the same constraints. Args: initializers: Dictionary of initializers (allowing nested dictionaries) or None. keys: Iterable of valid keys for `initializers`. Returns: Copy of checked dictionary of initializers. If `initializers=None`, an empty dictionary will be returned. Raises: KeyError: If an initializer is provided for a key not in `keys`. TypeError: If a provided initializer is not a callable function, or `initializers` is not a Mapping.
codesearchnet
def is_file_on_local_server(self, text) -> Tuple[(Optional[Path], Optional[int], Optional[int])]: lineno = None colno = None py_func = None m = re.compile('(.*)\\:(\\d+)\\:(\\d+)$').match(text) if m: text = m.group(1) lineno = m.group(2) colno = m.group(3) else: m = re.compile('(.*)\\:(\\d+)$').match(text) if m: text = m.group(1) lineno = m.group(2) else: m = re.compile('^(.*)\\:\\:([a-zA-Z0-9\\_]+)$').match(text) if m: text = m.group(1) py_func = m.group(2).strip() def find_lineno(text, pt, lineno, py_func): if lineno: return lineno if (not py_func): return with pt.open() as f: for (i, line) in enumerate(f.readlines()): if line.startswith('def {}'.format(py_func)): return (i + 1) break pt = Path(text) log.debug('checking file existance: %r', pt) try: if pt.exists(): lineno = find_lineno(text, pt, lineno, py_func) log.info('File exists: %r, line=%r', pt.absolute().as_posix(), lineno) return (pt, lineno, colno) log.debug('No file found matching: %r', text) cwd = self.get_current_directory() pt = (Path(cwd) / pt) log.debug('checking file existance: %r', pt) if pt.exists(): lineno = find_lineno(text, pt, lineno, py_func) log.info('File exists: %r, line=%r', pt.absolute().as_posix(), lineno) return (pt, lineno, colno) log.debug('file does not exist: %s', str(pt)) except OSError: log.debug('not a file name: %r', text) return (None, None, None)
Test if the provided text matches a file on local server Supports: - absolute path - relative path (using current working directory) - file:line syntax - file:line:colum syntax Args: text (str): candidate for file search Returns - Tuple(None, None, None) if the provided text does not match anything - Tuple(file path, None, None) if only a file path is found - Tuple(file path, linenumber, None) if line number is found - Tuple(file path, linenumber, columnnumber) if line and column numbers are found
codesearchnet
class PoolFormerFastImageProcessorKwargs(DefaultFastImageProcessorKwargs): crop_pct: Optional[float]
Args: crop_pct (`float`, *optional*, defaults to `self.crop_pct`): Percentage of the image to crop. Only has an effect if `do_resize` is set to `True`.
github-repos
def run_node(self, node, stim): if isinstance(node, string_types): node = self.nodes[node] result = node.transformer.transform(stim) if node.is_leaf(): return listify(result) stim = result if ((len(node.children) > 1) and isgenerator(stim)): stim = list(stim) return list(chain(*[self.run_node(c, stim) for c in node.children]))
Executes the Transformer at a specific node. Args: node (str, Node): If a string, the name of the Node in the current Graph. Otherwise the Node instance to execute. stim (str, stim, list): Any valid input to the Transformer stored at the target node.
codesearchnet
def load(self, cellpy_file, parent_level='CellpyData'): try: self.logger.debug('loading cellpy-file (hdf5):') self.logger.debug(cellpy_file) new_datasets = self._load_hdf5(cellpy_file, parent_level) self.logger.debug('cellpy-file loaded') except AttributeError: new_datasets = [] self.logger.warning('This cellpy-file version is not supported bycurrent reader (try to update cellpy).') if new_datasets: for dataset in new_datasets: self.datasets.append(dataset) else: self.logger.warning('Could not load') self.logger.warning(str(cellpy_file)) self.number_of_datasets = len(self.datasets) self.status_datasets = self._validate_datasets() self._invent_a_name(cellpy_file) return self
Loads a cellpy file. Args: cellpy_file (path, str): Full path to the cellpy file. parent_level (str, optional): Parent level
codesearchnet
def from_str(cls, input_string, fmt, primitive=False, sort=False, merge_tol=0.0): from pymatgen.io.cif import CifParser from pymatgen.io.vasp import Poscar from pymatgen.io.cssr import Cssr from pymatgen.io.xcrysden import XSF from pymatgen.io.atat import Mcsqs fmt = fmt.lower() if (fmt == 'cif'): parser = CifParser.from_string(input_string) s = parser.get_structures(primitive=primitive)[0] elif (fmt == 'poscar'): s = Poscar.from_string(input_string, False, read_velocities=False).structure elif (fmt == 'cssr'): cssr = Cssr.from_string(input_string) s = cssr.structure elif (fmt == 'json'): d = json.loads(input_string) s = Structure.from_dict(d) elif (fmt == 'yaml'): import ruamel.yaml as yaml d = yaml.safe_load(input_string) s = Structure.from_dict(d) elif (fmt == 'xsf'): s = XSF.from_string(input_string).structure elif (fmt == 'mcsqs'): s = Mcsqs.structure_from_string(input_string) else: raise ValueError(('Unrecognized format `%s`!' % fmt)) if sort: s = s.get_sorted_structure() if merge_tol: s.merge_sites(merge_tol) return cls.from_sites(s)
Reads a structure from a string. Args: input_string (str): String to parse. fmt (str): A format specification. primitive (bool): Whether to find a primitive cell. Defaults to False. sort (bool): Whether to sort the sites in accordance to the default ordering criteria, i.e., electronegativity. merge_tol (float): If this is some positive number, sites that are within merge_tol from each other will be merged. Usually 0.01 should be enough to deal with common numerical issues. Returns: IStructure / Structure
codesearchnet
def recalculate_concepts(self, concepts, lang=None): if len(concepts) == 0: return if lang is None: items = Concept.objects.get_concept_item_mapping(concepts=Concept.objects.filter(pk__in=set(flatten(concepts.values())))) else: items = Concept.objects.get_concept_item_mapping(lang=lang) environment = get_environment() mastery_threshold = get_mastery_trashold() for user, concepts in concepts.items(): all_items = list(set(flatten([items[c] for c in concepts]))) answer_counts = environment.number_of_answers_more_items(all_items, user) correct_answer_counts = environment.number_of_correct_answers_more_items(all_items, user) predictions = dict(list(zip(all_items, get_predictive_model(). predict_more_items(environment, user, all_items, time=get_time_for_knowledge_overview())))) new_user_stats = [] stats_to_delete_condition = Q() for concept in concepts: answer_aggregates = Answer.objects.filter(user=user, item__in=items[concept]).aggregate( time_spent=Sum("response_time"), sessions=Count("session", True), time_first=Min("time"), time_last=Max("time"), ) stats = { "answer_count": sum(answer_counts[i] for i in items[concept]), "correct_answer_count": sum(correct_answer_counts[i] for i in items[concept]), "item_count": len(items[concept]), "practiced_items_count": sum([answer_counts[i] > 0 for i in items[concept]]), "mastered_items_count": sum([predictions[i] >= mastery_threshold for i in items[concept]]), "prediction": sum([predictions[i] for i in items[concept]]) / len(items[concept]), "time_spent": answer_aggregates["time_spent"] / 1000, "session_count": answer_aggregates["sessions"], "time_first": answer_aggregates["time_first"].timestamp(), "time_last": answer_aggregates["time_last"].timestamp(), } stats_to_delete_condition |= Q(user=user, concept=concept) for stat_name, value in stats.items(): new_user_stats.append(UserStat(user_id=user, concept_id=concept, stat=stat_name, value=value)) self.filter(stats_to_delete_condition).delete() self.bulk_create(new_user_stats)
Recalculated given concepts for given users Args: concepts (dict): user id (int -> set of concepts to recalculate) lang(Optional[str]): language used to get items in all concepts (cached). Defaults to None, in that case are get items only in used concepts
juraj-google-style
def get_note_list(self, data=True, since=None, tags=[]): status = 0 ret = [] response_notes = {} notes = {'index': []} params = ('/index?limit=%s' % str(NOTE_FETCH_LENGTH)) if (since is not None): params += ('&since=%s' % since) if data: params += '&data=true' request = Request((DATA_URL + params)) request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) response_notes = json.loads(response.read().decode('utf-8')) note_objects = [] for n in response_notes['index']: if (not data): n['d'] = {} note_object = self.__add_simplenote_api_fields(n['d'], n['id'], n['v']) note_objects.append(note_object) notes['index'].extend(note_objects) except HTTPError as e: if (e.code == 401): raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return (e, (- 1)) except IOError as e: return (e, (- 1)) while ('mark' in response_notes): params += ('&mark=%s' % response_notes['mark']) request = Request((DATA_URL + params)) request.add_header(self.header, self.get_token()) try: response = urllib2.urlopen(request) response_notes = json.loads(response.read().decode('utf-8')) note_objects = [] for n in response_notes['index']: if (not data): n['d'] = {} note_object = n['d'] note_object = self.__add_simplenote_api_fields(n['d'], n['id'], n['v']) note_objects.append(note_object) notes['index'].extend(note_objects) except HTTPError as e: if (e.code == 401): raise SimplenoteLoginFailed('Login to Simplenote API failed! Check Token.') else: return (e, (- 1)) except IOError as e: return (e, (- 1)) note_list = notes['index'] self.current = response_notes['current'] if (len(tags) > 0): note_list = [n for n in note_list if (len(set(n['tags']).intersection(tags)) > 0)] return (note_list, status)
Method to get the note list The method can be passed optional arguments to limit the list to notes containing a certain tag, or only updated since a certain Simperium cursor. If omitted a list of all notes is returned. By default data objects are returned. If data is set to false only keys/ids and versions are returned. An empty data object is inserted for compatibility. Arguments: - tags=[] list of tags as string: return notes that have at least one of these tags - since=cursor Simperium cursor as string: return only changes since this cursor - data=True If false only return keys/ids and versions Returns: A tuple `(notes, status)` - notes (list): A list of note objects with all properties set except `content`. - status (int): 0 on success and -1 otherwise
codesearchnet
def parse_frequencies(variant, transcripts): frequencies = {} thousand_genomes_keys = ['1000GAF'] thousand_genomes_max_keys = ['1000G_MAX_AF'] exac_keys = ['EXACAF'] exac_max_keys = ['ExAC_MAX_AF', 'EXAC_MAX_AF'] gnomad_keys = ['GNOMADAF', 'GNOMAD_AF'] gnomad_max_keys = ['GNOMADAF_POPMAX', 'GNOMADAF_MAX'] for test_key in thousand_genomes_keys: thousand_g = parse_frequency(variant, test_key) if thousand_g: frequencies['thousand_g'] = thousand_g break for test_key in thousand_genomes_max_keys: thousand_g_max = parse_frequency(variant, test_key) if thousand_g_max: frequencies['thousand_g_max'] = thousand_g_max break for test_key in exac_keys: exac = parse_frequency(variant, test_key) if exac: frequencies['exac'] = exac break for test_key in exac_max_keys: exac_max = parse_frequency(variant, test_key) if exac_max: frequencies['exac_max'] = exac_max break for test_key in gnomad_keys: gnomad = parse_frequency(variant, test_key) if gnomad: frequencies['gnomad'] = gnomad break for test_key in gnomad_max_keys: gnomad_max = parse_frequency(variant, test_key) if gnomad_max: frequencies['gnomad_max'] = gnomad_max break if not frequencies: for transcript in transcripts: exac = transcript.get('exac_maf') exac_max = transcript.get('exac_max') thousand_g = transcript.get('thousand_g_maf') thousandg_max = transcript.get('thousandg_max') gnomad = transcript.get('gnomad_maf') gnomad_max = transcript.get('gnomad_max') if exac: frequencies['exac'] = exac if exac_max: frequencies['exac_max'] = exac_max if thousand_g: frequencies['thousand_g'] = thousand_g if thousandg_max: frequencies['thousand_g_max'] = thousandg_max if gnomad: frequencies['gnomad'] = gnomad if gnomad_max: frequencies['gnomad_max'] = gnomad_max thousand_g_left = parse_frequency(variant, 'left_1000GAF') if thousand_g_left: frequencies['thousand_g_left'] = thousand_g_left thousand_g_right = parse_frequency(variant, 'right_1000GAF') if thousand_g_right: frequencies['thousand_g_right'] = thousand_g_right return frequencies
Add the frequencies to a variant Frequencies are parsed either directly from keys in info fieds or from the transcripts is they are annotated there. Args: variant(cyvcf2.Variant): A parsed vcf variant transcripts(iterable(dict)): Parsed transcripts Returns: frequencies(dict): A dictionary with the relevant frequencies
juraj-google-style
def _ParseRecord( self, parser_mediator, record_index, evt_record, recovered=False): event_data = self._GetEventData( parser_mediator, record_index, evt_record, recovered=recovered) try: creation_time = evt_record.get_creation_time_as_integer() except OverflowError as exception: parser_mediator.ProduceExtractionWarning(( 'unable to read creation time from event record: {0:d} ' 'with error: {1!s}').format(record_index, exception)) creation_time = None if creation_time: date_time = dfdatetime_posix_time.PosixTime(timestamp=creation_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) try: written_time = evt_record.get_written_time_as_integer() except OverflowError as exception: parser_mediator.ProduceExtractionWarning(( 'unable to read written time from event record: {0:d} ' 'with error: {1!s}').format(record_index, exception)) written_time = None if written_time: date_time = dfdatetime_posix_time.PosixTime(timestamp=written_time) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data) if not creation_time and not written_time: date_time = dfdatetime_semantic_time.SemanticTime('Not set') event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a Windows EventLog (EVT) record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. record_index (int): event record index. evt_record (pyevt.record): event record. recovered (Optional[bool]): True if the record was recovered.
juraj-google-style
def add_body_part(self, key, data, mime_type, size=None): if isinstance(data, str): size = len(data) if hasattr(data, 'fileno'): size = os.fstat(data.fileno())[stat.ST_SIZE] if (size is None): raise UnknownSize('Each part of the body must have a known size.') if ('Content-Length' in self.headers): content_length = int(self.headers['Content-Length']) else: content_length = 0 boundary_string = ('\r\n--%s\r\n' % (MIME_BOUNDARY,)) self._body_parts.append(boundary_string) content_length += (len(boundary_string) + size) cd = ('Content-Disposition: form-data; name="%s"' % key) mt = mime_type if hasattr(data, 'fileno'): cd += ('; filename="%s"' % data.name.split('/')[(- 1)]) mt = (mimetypes.guess_type(data.name)[0] or 'application/octet-stream') cd += '\r\n' type_string = ('Content-Type: %s\r\n\r\n' % mt) self._body_parts.append(cd) self._body_parts.append(type_string) content_length += (len(type_string) + len(cd)) self._body_parts.append(data) self.headers['Content-Length'] = str(content_length)
Adds data to the HTTP request body. If more than one part is added, this is assumed to be a mime-multipart request. This method is designed to create MIME 1.0 requests as specified in RFC 1341. Args: data: str or a file-like object containing a part of the request body. mime_type: str The MIME type describing the data size: int Required if the data is a file like object. If the data is a string, the size is calculated so this parameter is ignored.
codesearchnet
def from_string(contents): lines = contents.split("\n") num_sites = int(lines[0]) coords = [] sp = [] prop = [] coord_patt = re.compile( r"(\w+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+([0-9\-\.]+)\s+" + r"([0-9\-\.]+)" ) for i in range(2, 2 + num_sites): m = coord_patt.search(lines[i]) if m: sp.append(m.group(1)) coords.append([float(j) for j in [m.group(i) for i in [3, 4, 2]]]) prop.append(float(m.group(5))) return ZeoVoronoiXYZ( Molecule(sp, coords, site_properties={'voronoi_radius': prop}) )
Creates Zeo++ Voronoi XYZ object from a string. from_string method of XYZ class is being redefined. Args: contents: String representing Zeo++ Voronoi XYZ file. Returns: ZeoVoronoiXYZ object
juraj-google-style
def _remove_boring_lines(text): lines = text.split('\n') filtered = [line for line in lines if re.match('[a-zA-z"\']', line)] return '\n'.join(filtered)
Remove lines that do not start with a letter or a quote. From inspecting the data, this seems to leave in most prose and remove most weird stuff. Args: text: a string Returns: a string
codesearchnet
def create_deferred(self, func, input_layer, deferred_args, deferred_kwargs, name): my_defaults = _defaults def _with_method_complete(*args, **kwargs): input_layer = args[0] with input_layer.g.as_default(), defaults_scope(**my_defaults), tf.name_scope(name): return input_layer._method_complete(func(*args, **kwargs)) full_args = [input_layer] full_args.extend(deferred_args) partial_context = {} if isinstance(input_layer, _DeferredLayer): partial_context = input_layer._partial_context return _DeferredLayer(input_layer.bookkeeper, scopes.Template(None, _with_method_complete), full_args, deferred_kwargs, scope=input_layer._scope, defaults=input_layer.defaults, partial_context=partial_context)
Creates a deferred node with captured scope. Args: func: The original function to call. input_layer: The input_layer. deferred_args: The arguments that will be used bythe deferred function. deferred_kwargs: The keyword args for the deferred function. name: The name of this layer. Returns: A _DeferredLayer that will execute func in the correct scopes.
codesearchnet
def set_datastore_policy(self, func): if (func is None): func = self.default_datastore_policy elif isinstance(func, bool): func = (lambda unused_key, flag=func: flag) self._datastore_policy = func
Set the context datastore policy function. Args: func: A function that accepts a Key instance as argument and returns a bool indicating if it should use the datastore. May be None.
codesearchnet
def qualNorm(data, qualitative): genes, cells = data.shape clusters = qualitative.shape[1] output = np.zeros((genes, clusters)) missing_indices = [] qual_indices = [] thresholds = qualitative.min(1) + (qualitative.max(1) - qualitative.min(1))/2.0 for i in range(genes): if qualitative[i,:].max() == -1 and qualitative[i,:].min() == -1: missing_indices.append(i) continue qual_indices.append(i) threshold = thresholds[i] data_i = data[i,:] if sparse.issparse(data): data_i = data_i.toarray().flatten() assignments, means = poisson_cluster(data_i.reshape((1, cells)), 2) means = means.flatten() high_i = 1 low_i = 0 if means[0]>means[1]: high_i = 0 low_i = 1 high_mean = np.median(data_i[assignments==high_i]) low_mean = np.median(data_i[assignments==low_i]) for k in range(clusters): if qualitative[i,k]>threshold: output[i,k] = high_mean else: output[i,k] = low_mean if missing_indices: assignments, means = poisson_cluster(data[qual_indices, :], clusters, output[qual_indices, :], max_iters=1) for ind in missing_indices: for k in range(clusters): if len(assignments==k)==0: output[ind, k] = data[ind,:].mean() else: output[ind, k] = data[ind, assignments==k].mean() return output
Generates starting points using binarized data. If qualitative data is missing for a given gene, all of its entries should be -1 in the qualitative matrix. Args: data (array): 2d array of genes x cells qualitative (array): 2d array of numerical data - genes x clusters Returns: Array of starting positions for state estimation or clustering, with shape genes x clusters
juraj-google-style
def external_ids(self, **kwargs): path = self._get_series_id_season_number_path('external_ids') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the external ids that we have stored for a TV season by season number. Args: language: (optional) ISO 639 code. Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def bind_to_storage_buffer(self, binding=0, *, offset=0, size=-1) -> None: self.mglo.bind_to_storage_buffer(binding, offset, size)
Bind the buffer to a shader storage buffer. Args: binding (int): The shader storage binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all.
juraj-google-style
def _replace_variable_with_pattern(match): positional = match.group("positional") name = match.group("name") template = match.group("template") if name is not None: if not template: return _SINGLE_SEGMENT_PATTERN.format(name) elif template == "**": return _MULTI_SEGMENT_PATTERN.format(name) else: return _generate_pattern_for_template(template) elif positional == "*": return _SINGLE_SEGMENT_PATTERN elif positional == "**": return _MULTI_SEGMENT_PATTERN else: raise ValueError("Unknown template expression {}".format(match.group(0)))
Replace a variable match with a pattern that can be used to validate it. Args: match (re.Match): A regular expression match Returns: str: A regular expression pattern that can be used to validate the variable in an expanded path. Raises: ValueError: If an unexpected template expression is encountered.
juraj-google-style
def count_params(x): return np.prod(x.shape.as_list())
Returns the static number of elements in a variable or tensor. Args: x: Variable or tensor. Returns: Integer, the number of scalars in `x`. Example: >>> kvar = tf.keras.backend.zeros((2,3)) >>> tf.keras.backend.count_params(kvar) 6 >>> tf.keras.backend.eval(kvar) array([[0., 0., 0.], [0., 0., 0.]], dtype=float32)
github-repos
def from_bigquery(sql): if isinstance(sql, bq.Query): sql = sql._expanded_sql() parts = sql.split('.') if ((len(parts) == 1) or (len(parts) > 3) or any(((' ' in x) for x in parts))): sql = (('(' + sql) + ')') else: sql = (('`' + sql) + '`') metrics = Metrics(bigquery=sql) return metrics
Create a Metrics instance from a bigquery query or table. Returns: a Metrics instance. Args: sql: A BigQuery table name or a query.
codesearchnet
def disassemble(self, start=None, end=None, arch_mode=None): if (arch_mode is None): arch_mode = self.binary.architecture_mode curr_addr = (start if start else self.binary.ea_start) end_addr = (end if end else self.binary.ea_end) while (curr_addr < end_addr): encoding = self.__fetch_instr(curr_addr) asm_instr = self.disassembler.disassemble(encoding, curr_addr, architecture_mode=arch_mode) if (not asm_instr): return (yield (curr_addr, asm_instr, asm_instr.size)) curr_addr += asm_instr.size
Disassemble native instructions. Args: start (int): Start address. end (int): End address. arch_mode (int): Architecture mode. Returns: (int, Instruction, int): A tuple of the form (address, assembler instruction, instruction size).
codesearchnet
def rec_new(self, val): if (val not in self.things): for child in val.children(): self.rec_new(child) self.new(val) return val
Recursively add a new value and its children to me. Args: val (LispVal): The value to be added. Returns: LispVal: The added value.
codesearchnet
def set_name(self, name, anyway=False): set_name(self.startEA, name, anyway=anyway)
Set Function Name. Default behavior throws an exception when setting to a name that already exists in the IDB. to make IDA automatically add a counter to the name (like in the GUI,) use `anyway=True`. Args: name: Desired name. anyway: `True` to set anyway.
juraj-google-style
def get_timing_signal(length, min_timescale=1, max_timescale=1e4, num_timescales=16): positions = to_float(tf.range(length)) log_timescale_increment = ( math.log(max_timescale / min_timescale) / (num_timescales - 1)) inv_timescales = min_timescale * tf.exp( to_float(tf.range(num_timescales)) * -log_timescale_increment) scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0) return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
Create Tensor of sinusoids of different frequencies. Args: length: Length of the Tensor to create, i.e. Number of steps. min_timescale: a float max_timescale: a float num_timescales: an int Returns: Tensor of shape (length, 2*num_timescales)
juraj-google-style
def dump(node, ast, annotate_fields=True, include_attributes=True, indent=' '): def _format(node, level=0): if isinstance(node, ast.AST): fields = [(a, _format(b, level)) for a, b in ast.iter_fields(node)] if include_attributes and node._attributes: fields.extend([(a, _format(getattr(node, a), level)) for a in node._attributes]) return ''.join([node.__class__.__name__, '(', ', '.join(('%s=%s' % field for field in fields) if annotate_fields else (b for a, b in fields)), ')']) elif isinstance(node, list): lines = ['['] lines.extend((indent * (level + 2) + _format(x, level + 2) + ',' for x in node)) if len(lines) > 1: lines.append(indent * (level + 1) + ']') else: lines[-1] += ']' return '\n'.join(lines) return repr(node) if not isinstance(node, ast.AST): raise TypeError(f'expected AST, got {node.__class__!r}') return _format(node)
Return a formatted dump of the tree in *node*. This is mainly useful for debugging purposes. The returned string will show the names and the values for fields. This makes the code impossible to evaluate, so if evaluation is wanted *annotate_fields* must be set to False. Attributes such as line numbers and column offsets are dumped by default. If this is not wanted, *include_attributes* can be set to False. Arguments: node: Top AST node. ast: An module providing an AST class hierarchy. annotate_fields: Show field annotations. include_attributes: Show all attributes. indent: Indentation string. Returns: A formatted tree.
github-repos
def get(self, url, params=None, **kwargs): return self.call_api('GET', url, params=params, **kwargs)
Call the API with a GET request. Args: url (str): Resource location relative to the base URL. params (dict or None): Query-string parameters. Returns: ResultParser or ErrorParser.
codesearchnet
def from_options(cls, options): if cls != Environment: raise NotImplementedError portable_options = options.view_as(PortableOptions) environment_type = portable_options.environment_type if not environment_type: environment_urn = common_urns.environments.DOCKER.urn elif environment_type.startswith('beam:env:'): environment_urn = environment_type elif environment_type == 'LOOPBACK': environment_urn = python_urns.EMBEDDED_PYTHON_LOOPBACK else: try: environment_urn = getattr(common_urns.environments, environment_type).urn except AttributeError: raise ValueError('Unknown environment type: %s' % environment_type) env_class = Environment.get_env_cls_from_urn(environment_urn) return env_class.from_options(portable_options)
Creates an Environment object from PortableOptions. Args: options: The PortableOptions object.
github-repos
def implement(self, implementation, for_type=None, for_types=None): unbound_implementation = self.__get_unbound_function(implementation) for_types = self.__get_types(for_type, for_types) for t in for_types: self._write_lock.acquire() try: self.implementations.append((t, unbound_implementation)) finally: self._write_lock.release()
Registers an implementing function for for_type. Arguments: implementation: Callable implementation for this type. for_type: The type this implementation applies to. for_types: Same as for_type, but takes a tuple of types. for_type and for_types cannot both be passed (for obvious reasons.) Raises: ValueError
codesearchnet
def _ParseApplicationPasswordRecord(self, parser_mediator, record): key = record.get('_key_', None) if not key or not key.startswith(b'ssgp'): raise errors.ParseError(( 'Unsupported application password record key value does not start ' 'with: "ssgp".')) event_data = KeychainApplicationRecordEventData() event_data.account_name = self._ParseBinaryDataAsString( parser_mediator, record['acct']) event_data.comments = self._ParseBinaryDataAsString( parser_mediator, record['crtr']) event_data.entry_name = self._ParseBinaryDataAsString( parser_mediator, record['PrintName']) ssgp_hash = codecs.encode(key[4:], 'hex') event_data.ssgp_hash = codecs.decode(ssgp_hash, 'utf-8') event_data.text_description = self._ParseBinaryDataAsString( parser_mediator, record['desc']) date_time = self._ParseDateTimeValue(parser_mediator, record['cdat']) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) date_time = self._ParseDateTimeValue(parser_mediator, record['mdat']) if date_time: event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts the information from an application password record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. record (dict[str, object]): database record. Raises: ParseError: if Internet password record cannot be parsed.
juraj-google-style
def quaternion_from_axis_rotation(angle, axis): out = np.zeros(4, dtype=float) if axis == 'x': out[1] = 1 elif axis == 'y': out[2] = 1 elif axis == 'z': out[3] = 1 else: raise ValueError('Invalid axis input.') out *= math.sin(angle/2.0) out[0] = math.cos(angle/2.0) return Quaternion(out)
Return quaternion for rotation about given axis. Args: angle (float): Angle in radians. axis (str): Axis for rotation Returns: Quaternion: Quaternion for axis rotation. Raises: ValueError: Invalid input axis.
juraj-google-style
def cancel(self, workflow_id): self.logger.debug(('Canceling workflow: ' + workflow_id)) url = ('%(wf_url)s/%(wf_id)s/cancel' % {'wf_url': self.workflows_url, 'wf_id': workflow_id}) r = self.gbdx_connection.post(url, data='') r.raise_for_status()
Cancels a running workflow. Args: workflow_id (str): Workflow id. Returns: Nothing
codesearchnet
def parse_rdf_payload(self, data, headers): if headers['Content-Type'].startswith('text/plain'): logger.debug('text/plain Content-Type detected, using application/n-triples for parser') parse_format = 'application/n-triples' else: parse_format = headers['Content-Type'] if (';charset' in parse_format): parse_format = parse_format.split(';')[0] graph = rdflib.Graph().parse(data=data.decode('utf-8'), format=parse_format) return graph
small function to parse RDF payloads from various repository endpoints Args: data (response.data): data from requests response headers (response.headers): headers from requests response Returns: (rdflib.Graph): parsed graph
codesearchnet
def build_plans(self): if (not self.__build_plans): self.__build_plans = BuildPlans(self.__connection) return self.__build_plans
Gets the Build Plans API client. Returns: BuildPlans:
codesearchnet
def testWithSkip(self, verify_fn, symbolic_checkpoint, num_skips): def build_dataset(): def my_map(x): if x == 0: return dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3]) elif x == 1: return dataset_ops.Dataset.from_tensor_slices([4, 5, 6, 7]) else: return dataset_ops.Dataset.from_tensor_slices([8, 9, 10, 11]) indices = dataset_ops.Dataset.from_tensor_slices([0, 1, 2]) dataset = indices.flat_map(my_map) dataset = dataset.skip(num_skips) options = options_lib.Options() options.experimental_symbolic_checkpoint = symbolic_checkpoint return dataset.with_options(options) verify_fn(self, build_dataset, num_outputs=3 * 4 - num_skips)
Test `.flat_map().skip()` checkpointing behavior. `SkipInternal` and `GetNextInternal` are separate functions but with slightly different implementations. Therefore, we should test this op's behavior when used with `.skip()`. Args: verify_fn: Verify the correctness of this dataset's checkpointing. symbolic_checkpoint: Whether symbolic checkpointing is turned on. num_skips: `.skip(num_skips)`
github-repos
def get_help_datapacks(filepath, prefix="!"): help_contents = get_help_data(filepath) datapacks = [] for d in help_contents: heading = d content = "" if "commands" in d.lower(): for c in help_contents[d]: if "name" not in c: continue content += "- `" command = prefix + c["name"] content += "{}".format(command) if "params" in c: for param in c["params"]: content += " [{}]".format(param) content += "`: " if "description" in c: content += c["description"] content += "\n" else: content += help_contents[d] datapacks.append((heading, content, False)) return datapacks
Load help text from a file and give it as datapacks Args: filepath (str): The file to load help text from prefix (str): The prefix to use for commands Returns: datapacks (list): The datapacks from the file
juraj-google-style
def filter_bboxes(bboxes, rows, cols, min_area=0.0, min_visibility=0.0): resulting_boxes = [] for bbox in bboxes: transformed_box_area = calculate_bbox_area(bbox, rows, cols) bbox[:4] = np.clip(bbox[:4], 0, 1.0) clipped_box_area = calculate_bbox_area(bbox, rows, cols) if ((not transformed_box_area) or ((clipped_box_area / transformed_box_area) <= min_visibility)): continue else: bbox[:4] = np.clip(bbox[:4], 0, 1.0) if (calculate_bbox_area(bbox, rows, cols) <= min_area): continue resulting_boxes.append(bbox) return resulting_boxes
Remove bounding boxes that either lie outside of the visible area by more then min_visibility or whose area in pixels is under the threshold set by `min_area`. Also it crops boxes to final image size. Args: bboxes (list): List of bounding box with coordinates in the format used by albumentations rows (int): Image rows. cols (int): Image cols. min_area (float): minimum area of a bounding box. All bounding boxes whose visible area in pixels is less than this value will be removed. Default: 0.0. min_visibility (float): minimum fraction of area for a bounding box to remain this box in list. Default: 0.0.
codesearchnet
def draw_text(img, text, position=(10, 10), font='FreeSans.ttf', font_size=14, color=(0, 0, 0)): _check_pil() font_files = _find_font_file(font) if len(font_files) == 0: logger.warn("Failed to lookup font '{}', falling back to default".format(font)) font = ImageFont.load_default() else: font = ImageFont.truetype(font_files[0], font_size) img = Image.fromarray(img) draw = ImageDraw.Draw(img) draw.text(position, text, fill=color, font=font) return np.asarray(img)
Draws text over the image. Requires PIL. Args: img: The image to use. text: The text string to overlay. position: The text (x, y) position. (Default value = (10, 10)) font: The ttf or open type font to use. (Default value = 'FreeSans.ttf') font_size: The text font size. (Default value = 12) color: The (r, g, b) values for text color. (Default value = (0, 0, 0)) Returns: Image overlayed with text.
juraj-google-style
def clone_source_dir(source_dir, dest_dir): if os.path.isdir(dest_dir): print('removing', dest_dir) shutil.rmtree(dest_dir) shutil.copytree(source_dir, dest_dir)
Copies the source Protobuf files into a build directory. Args: source_dir (str): source directory of the Protobuf files dest_dir (str): destination directory of the Protobuf files
juraj-google-style
def calculate_mean_and_variance_from_sample_paths(samples, num_samples, dtype): log_s = tf.math.log(samples) mean = tf.reduce_mean(log_s, axis=-3, keepdims=True) var = tf.reduce_mean((log_s - mean) ** 2, axis=-3, keepdims=True) mean = tf.squeeze(mean, axis=[-1, -3]) var = tf.squeeze(var, axis=[-1, -3]) std_err_mean = tf.math.sqrt(var / num_samples) std_err_var = var * tf.math.sqrt(tf.constant(2.0, dtype=dtype) / (tf.constant(num_samples, dtype=dtype) - tf.constant(1.0, dtype=dtype))) return (mean, var, std_err_mean, std_err_var)
Returns the mean and variance of log(`samples`). Args: samples: A real `Tensor` of shape [batch_shape, `num_samples`, num_times, 1] containing the samples of random paths drawn from an Ito process. num_samples: A scalar integer. The number of sample paths in `samples`. dtype: The default dtype to use when converting values to `Tensor`s. Returns: A tuple of (mean, variance, standard_error of the mean, standard_error of the variance) of the log of the samples. Where the components of the tuple have shape [batch_shape, num_times].
github-repos
def add_multiple_servers(self, information, timeout=(- 1)): uri = '{}/discovery'.format(self.URI) return self.create(information, uri=uri, timeout=timeout)
Adds multiple rack-mount servers for management by the appliance. This API initiates the asynchronous addition of supported server models. Note: Servers in an enclosure are added by adding the enclosure resource. This is only supported on appliances that support rack-mounted servers. This is only supported for api version 600 Args: information (dict): Objects to create timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: dict: Created rack-mount servers.
codesearchnet
def as_qubit_order(val: 'qubit_order_or_list.QubitOrderOrList') -> 'QubitOrder': if isinstance(val, collections.Iterable): return QubitOrder.explicit(val) if isinstance(val, QubitOrder): return val raise ValueError("Don't know how to interpret <{}> as a Basis.".format(val))
Converts a value into a basis. Args: val: An iterable or a basis. Returns: The basis implied by the value.
codesearchnet
def map_(input_layer, fn): if not input_layer.is_sequence(): raise ValueError('Can only map a sequence.') return [fn(x) for x in input_layer]
Maps the given function across this sequence. To map an entire template across the sequence, use the `as_fn` method on the template. Args: input_layer: The input tensor. fn: A function of 1 argument that is applied to each item in the sequence. Returns: A new sequence Pretty Tensor. Raises: ValueError: If the input_layer does not hold a sequence.
juraj-google-style
def is_closed(self): old_training_data = self.training_data self.training_data = {x: [] for x in self.sm_vector} for t in self.smi_vector: src_state = t[:-1] symbol = t[-1:] found = False for dst_state in self.sm_vector: if self.observation_table[dst_state] == self.observation_table[t]: self._add_training_data(src_state, dst_state, symbol) found = True break if not found: return False, t assert self.training_data != old_training_data, \ "No update happened from previous round. The algo will loop infinetely" return True, None
_check if the observation table is closed. Args: None Returns: tuple (bool, str): True if the observation table is closed and false otherwise. If the table is not closed the escaping string is returned.
juraj-google-style
def show(self, frame): if (len(frame.shape) != 3): raise ValueError('frame should have shape with only 3 dimensions') if (not self.is_open): self.open() self._window.clear() self._window.switch_to() self._window.dispatch_events() image = ImageData(frame.shape[1], frame.shape[0], 'RGB', frame.tobytes(), pitch=(frame.shape[1] * (- 3))) image.blit(0, 0, width=self._window.width, height=self._window.height) self._window.flip()
Show an array of pixels on the window. Args: frame (numpy.ndarray): the frame to show on the window Returns: None
codesearchnet
def refresh(self, only_closed=False): if only_closed: opened = filter(self.__check_port, self.__closed) self.__closed = self.__closed.difference(opened) self.__ports = self.__ports.union(opened) else: ports = self.__closed.union(self.__ports) self.__ports = set(filter(self.__check_port, ports)) self.__closed = ports.difference(self.__ports)
refresh ports status Args: only_closed - check status only for closed ports
juraj-google-style
def reset_index(self, **kwargs): drop = kwargs.get('drop', False) new_index = pandas.RangeIndex(len(self.index)) if (not drop): if isinstance(self.index, pandas.MultiIndex): new_column_names = pandas.Index(self.index.names) new_columns = new_column_names.append(self.columns) index_data = pandas.DataFrame(list(zip(*self.index))).T result = self.data.from_pandas(index_data).concat(1, self.data) return self.__constructor__(result, new_index, new_columns) else: new_column_name = (self.index.name if (self.index.name is not None) else ('index' if ('index' not in self.columns) else 'level_0')) new_columns = self.columns.insert(0, new_column_name) result = self.insert(0, new_column_name, self.index) return self.__constructor__(result.data, new_index, new_columns) else: return self.__constructor__(self.data.copy(), new_index, self.columns.copy(), self._dtype_cache)
Removes all levels from index and sets a default level_0 index. Returns: A new QueryCompiler with updated data and reset index.
codesearchnet
def calculate_parity(n): if not is_natural(n): raise ValueError('Expected n to be a positive integer.') y = 0 n = abs(n) while n: y += n & 1 n = n >> 1 return y & 1
Calculates and returns the parity of a number. The parity of a number is ``1`` if the number has an odd number of ones in its binary representation, otherwise ``0``. Args: n (int): the number whose parity to calculate Returns: ``1`` if the number has an odd number of ones, otherwise ``0``. Raises: ValueError: if ``n`` is less than ``0``.
juraj-google-style
def add_tile(self, tile_source, **kw): tile_renderer = TileRenderer(tile_source=tile_source, **kw) self.renderers.append(tile_renderer) return tile_renderer
Adds new ``TileRenderer`` into ``Plot.renderers`` Args: tile_source (TileSource) : a tile source instance which contain tileset configuration Keyword Arguments: Additional keyword arguments are passed on as-is to the tile renderer Returns: TileRenderer : TileRenderer
juraj-google-style
def _dataset_merge_hdx_update(self, update_resources, update_resources_by_name, remove_additional_resources, create_default_views, hxl_update): merge_two_dictionaries(self.data, self.old_data) if ('resources' in self.data): del self.data['resources'] updated_resources = self.old_data.get('resources', None) filestore_resources = list() if (update_resources and updated_resources): ignore_fields = ['package_id'] if update_resources_by_name: resource_names = set() for resource in self.resources: resource_name = resource['name'] resource_names.add(resource_name) for updated_resource in updated_resources: if (resource_name == updated_resource['name']): logger.warning(('Resource exists. Updating %s' % resource_name)) self._dataset_merge_filestore_resource(resource, updated_resource, filestore_resources, ignore_fields) break updated_resource_names = set() for updated_resource in updated_resources: updated_resource_name = updated_resource['name'] updated_resource_names.add(updated_resource_name) if (not (updated_resource_name in resource_names)): self._dataset_merge_filestore_newresource(updated_resource, ignore_fields, filestore_resources) if remove_additional_resources: resources_to_delete = list() for (i, resource) in enumerate(self.resources): resource_name = resource['name'] if (resource_name not in updated_resource_names): logger.warning(('Removing additional resource %s!' % resource_name)) resources_to_delete.append(i) for i in sorted(resources_to_delete, reverse=True): del self.resources[i] else: for (i, updated_resource) in enumerate(updated_resources): if (len(self.resources) > i): updated_resource_name = updated_resource['name'] resource = self.resources[i] resource_name = resource['name'] logger.warning(('Resource exists. Updating %s' % resource_name)) if (resource_name != updated_resource_name): logger.warning(('Changing resource name to: %s' % updated_resource_name)) self._dataset_merge_filestore_resource(resource, updated_resource, filestore_resources, ignore_fields) else: self._dataset_merge_filestore_newresource(updated_resource, ignore_fields, filestore_resources) if remove_additional_resources: resources_to_delete = list() for (i, resource) in enumerate(self.resources): if (len(updated_resources) <= i): logger.warning(('Removing additional resource %s!' % resource['name'])) resources_to_delete.append(i) for i in sorted(resources_to_delete, reverse=True): del self.resources[i] if self.resources: self.data['resources'] = self._convert_hdxobjects(self.resources) ignore_field = self.configuration['dataset'].get('ignore_on_update') self.check_required_fields(ignore_fields=[ignore_field]) self._save_to_hdx('update', 'id') self._add_filestore_resources(filestore_resources, create_default_views, hxl_update)
Helper method to check if dataset or its resources exist and update them Args: update_resources (bool): Whether to update resources update_resources_by_name (bool): Compare resource names rather than position in list remove_additional_resources (bool): Remove additional resources found in dataset (if updating) create_default_views (bool): Whether to call package_create_default_resource_views. hxl_update (bool): Whether to call package_hxl_update. Returns: None
codesearchnet
def _add_query_parameter(url, name, value): if (value is None): return url else: return update_query_params(url, {name: value})
Adds a query parameter to a url. Replaces the current value if it already exists in the URL. Args: url: string, url to add the query parameter to. name: string, query parameter name. value: string, query parameter value. Returns: Updated query parameter. Does not update the url if value is None.
codesearchnet
def index_resources(self): if (not self.__index_resources): self.__index_resources = IndexResources(self.__connection) return self.__index_resources
Gets the Index Resources API client. Returns: IndexResources:
codesearchnet
def FetchURNsForAllSignedBinaries(token ): if _ShouldUseLegacyDatastore(): urns = [] aff4_roots = [GetAFF4PythonHackRoot(), GetAFF4ExecutablesRoot()] for _, descendant_urns in aff4.FACTORY.RecursiveMultiListChildren( aff4_roots): urns.extend(descendant_urns) aff4_streams = aff4.FACTORY.MultiOpen( urns, aff4_type=collects.GRRSignedBlob, mode="r", token=token) return [stream.urn for stream in aff4_streams] else: return [ _SignedBinaryURNFromID(i) for i in data_store.REL_DB.ReadIDsForAllSignedBinaries() ]
Returns URNs for all signed binaries in the datastore. Args: token: ACL token to use with the legacy (non-relational) datastore.
juraj-google-style
def Upgrade(self, aff4_class): _ValidateAFF4Type(aff4_class) if (self.__class__ == aff4_class): return self if (not isinstance(aff4_class, type)): raise InstantiationError(('aff4_class=%s must be a type' % aff4_class)) if (not issubclass(aff4_class, AFF4Object)): raise InstantiationError(('aff4_class=%s must be a subclass of AFF4Object.' % aff4_class)) if isinstance(self, aff4_class): return self result = aff4_class(self.urn, mode=self.mode, clone=self, parent=self.parent, token=self.token, age=self.age_policy, object_exists=self.object_exists, follow_symlinks=self.follow_symlinks, aff4_type=self.aff4_type, mutation_pool=self.mutation_pool, transaction=self.transaction) result.symlink_urn = self.urn result.Initialize() return result
Upgrades this object to the type specified. AFF4 Objects can be upgraded on the fly to other type - As long as the new type is derived from the current type. This feature allows creation of placeholder objects which can later be upgraded to the fully featured object. Note: It is not allowed to downgrade an object if that would result in a loss of information (since the new object has a smaller schema). This method tries to store the new object with its new attributes and will fail if any attributes can not be mapped. Args: aff4_class: A string representing the new class. Returns: an instance of the new class with all the same attributes as this current object. Raises: ValueError: When the object to upgrade is locked. AttributeError: When the new object can not accept some of the old attributes. InstantiationError: When we cannot instantiate the object type class.
codesearchnet
def get_event(self, event_key): event = self.event_key_map.get(event_key) if event: return event self.logger.error(('Event "%s" is not in datafile.' % event_key)) self.error_handler.handle_error(exceptions.InvalidEventException(enums.Errors.INVALID_EVENT_KEY_ERROR)) return None
Get event for the provided event key. Args: event_key: Event key for which event is to be determined. Returns: Event corresponding to the provided event key.
codesearchnet
def _events(self, using_url, filters=None, limit=None): if not isinstance(limit, (int, NoneType)): limit = None if filters is None: filters = [] if isinstance(filters, string_types): filters = filters.split(',') if not self.blocking: self.blocking = True while self.blocking: params = { 'since': self._last_seen_id, 'limit': limit, } if filters: params['events'] = ','.join(map(str, filters)) try: data = self.get(using_url, params=params, raw_exceptions=True) except (ConnectTimeout, ConnectionError) as e: data = None except Exception as e: reraise('', e) if data: self._last_seen_id = data[-1]['id'] for event in data: self._count += 1 yield event
A long-polling method that queries Syncthing for events.. Args: using_url (str): REST HTTP endpoint filters (List[str]): Creates an "event group" in Syncthing to only receive events that have been subscribed to. limit (int): The number of events to query in the history to catch up to the current state. Returns: generator[dict]
juraj-google-style
def time_pad(x, filter_size, dilations): x_shape = common_layers.shape_list(x) if filter_size == [1, 1, 1]: return x _, h, w = filter_size eff_h = h + (h - 1)*(dilations[2] - 1) eff_w = w + (w - 1)*(dilations[3] - 1) a = (eff_h - 1) b = (eff_w - 1) c = filter_size[0] - 1 padding = [[0, 0], [c, 0], [a, a], [b, b], [0, 0]] x_bias = tf.zeros(x_shape[:-1] + [1]) x_bias = tf.pad(x_bias, padding, constant_values=1) x_pad = tf.pad(x, padding) x_pad = tf.concat((x_bias, x_pad), axis=-1) return x_pad
Pad left across time and pad valid across the spatial components. Also concats a binary feature that indicates if a feature is padded or not. Args: x: 5-D Tensor, (NTHWC) filter_size: list of ints dilations: list of ints, dilations - 1 specifies the number of holes between two filter elements. Returns: x_pad: 5-D Tensor.
juraj-google-style
def client(self, service_name, version, component, **kw): service = _create_service_api( self._credentials, service_name, version, kw.get('developer_key'), kw.get('cache_discovery', False), self._http or _build_http()) return ServiceClient( gcp_service=service, component=component, credentials=self._credentials, rate_limiter=self._rate_limiter, use_cached_http=self._use_cached_http, http=self._http)
Safely initialize a repository class to a property. Args: repository_class (class): The class to initialize. version (str): The gcp service version for the repository. Returns: object: An instance of repository_class.
juraj-google-style
def __init__(self, channel): self.TranslateText = channel.unary_unary( "/google.cloud.translation.v3beta1.TranslationService/TranslateText", request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.TranslateTextRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.TranslateTextResponse.FromString, ) self.DetectLanguage = channel.unary_unary( "/google.cloud.translation.v3beta1.TranslationService/DetectLanguage", request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.DetectLanguageRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.DetectLanguageResponse.FromString, ) self.GetSupportedLanguages = channel.unary_unary( "/google.cloud.translation.v3beta1.TranslationService/GetSupportedLanguages", request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.GetSupportedLanguagesRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.SupportedLanguages.FromString, ) self.BatchTranslateText = channel.unary_unary( "/google.cloud.translation.v3beta1.TranslationService/BatchTranslateText", request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.BatchTranslateTextRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.CreateGlossary = channel.unary_unary( "/google.cloud.translation.v3beta1.TranslationService/CreateGlossary", request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.CreateGlossaryRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.ListGlossaries = channel.unary_unary( "/google.cloud.translation.v3beta1.TranslationService/ListGlossaries", request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.ListGlossariesRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.ListGlossariesResponse.FromString, ) self.GetGlossary = channel.unary_unary( "/google.cloud.translation.v3beta1.TranslationService/GetGlossary", request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.GetGlossaryRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.Glossary.FromString, ) self.DeleteGlossary = channel.unary_unary( "/google.cloud.translation.v3beta1.TranslationService/DeleteGlossary", request_serializer=google_dot_cloud_dot_translation__v3beta1_dot_proto_dot_translation__service__pb2.DeleteGlossaryRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def sendmail(subject, text, mailto, sender=None): def user_at_host(): from socket import gethostname return os.getlogin() + "@" + gethostname() try: sender = user_at_host() if sender is None else sender except OSError: sender = 'abipyscheduler@youknowwhere' if is_string(mailto): mailto = [mailto] from email.mime.text import MIMEText mail = MIMEText(text) mail["Subject"] = subject mail["From"] = sender mail["To"] = ", ".join(mailto) msg = mail.as_string() from subprocess import Popen, PIPE import sys sendmail = which("sendmail") if sendmail is None: return -1 if sys.version_info[0] < 3: p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE) else: p = Popen([sendmail, "-t"], stdin=PIPE, stderr=PIPE, universal_newlines=True) outdata, errdata = p.communicate(msg) return len(errdata)
Sends an e-mail with unix sendmail. Args: subject: String with the subject of the mail. text: String with the body of the mail. mailto: String or list of string with the recipients. sender: string with the sender address. If sender is None, username@hostname is used. Returns: Exit status
juraj-google-style
def match_alphabet(self, pattern): s = {} for char in pattern: s[char] = 0 for i in range(len(pattern)): s[pattern[i]] |= 1 << (len(pattern) - i - 1) return s
Initialise the alphabet for the Bitap algorithm. Args: pattern: The text to encode. Returns: Hash of character locations.
juraj-google-style
def GetHashersInformation(cls): hashers_information = [] for (_, hasher_class) in cls.GetHasherClasses(): description = getattr(hasher_class, 'DESCRIPTION', '') hashers_information.append((hasher_class.NAME, description)) return hashers_information
Retrieves the hashers information. Returns: list[tuple]: containing: str: hasher name. str: hasher description.
codesearchnet
def fft(x, axis=(- 1), padding_samples=0): if (padding_samples > 0): padded = np.concatenate([x, np.zeros((len(x), padding_samples), dtype=x.dtype)], axis=axis) else: padded = x transformed = np.fft.rfft(padded, axis=axis, norm='ortho') sr = audio_sample_rate(int((Seconds(1) / x.dimensions[axis].frequency))) scale = LinearScale.from_sample_rate(sr, transformed.shape[(- 1)]) new_dimensions = list(x.dimensions) new_dimensions[axis] = FrequencyDimension(scale) return ArrayWithUnits(transformed, new_dimensions)
Apply an FFT along the given dimension, and with the specified amount of zero-padding Args: x (ArrayWithUnits): an :class:`~zounds.core.ArrayWithUnits` instance which has one or more :class:`~zounds.timeseries.TimeDimension` axes axis (int): The axis along which the fft should be applied padding_samples (int): The number of padding zeros to apply along axis before performing the FFT
codesearchnet
def FindMessageTypeByName(self, full_name): full_name = _NormalizeFullyQualifiedName(full_name) if full_name not in self._descriptors: self._FindFileContainingSymbolInDb(full_name) return self._descriptors[full_name]
Loads the named descriptor from the pool. Args: full_name: The full name of the descriptor to load. Returns: The descriptor for the named type. Raises: KeyError: if the message cannot be found in the pool.
juraj-google-style
def list_vmss_vm_instance_view_pg(access_token, subscription_id, resource_group, vmss_name, link=None): if link is None: endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name, '/virtualMachines?$expand=instanceView&$select=instanceView', '&api-version=', COMP_API]) else: endpoint = link return do_get(endpoint, access_token)
Gets one page of a paginated list of scale set VM instance views. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. vmss_name (str): Name of the virtual machine scale set. link (str): Optional link to URI to get list (as part of a paginated API query). Returns: HTTP response. JSON body of list of VM instance views.
juraj-google-style
def archs(self, as_list=False): archs = self.arch_list().split('/') if as_list: return archs return set(archs)
Return all of the architectures for this target. Args: as_list (bool): Return a list instead of the default set object. Returns: set or list: All of the architectures used in this TargetSettings object.
juraj-google-style
def get(self, volume_id): return self.prepare_model(self.client.api.inspect_volume(volume_id))
Get a volume. Args: volume_id (str): Volume name. Returns: (:py:class:`Volume`): The volume. Raises: :py:class:`docker.errors.NotFound` If the volume does not exist. :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def get_label(self, label, params=None): return self.label(label, action='GET', params=params)
Gets a security label from a Indicator/Group/Victim Args: label: The name of the Security Label params:
juraj-google-style
def get_course_and_course_run(self, course_run_id): course_id = parse_course_key(course_run_id) course = self.get_course_details(course_id) course_run = None if course: course_run = None course_runs = [course_run for course_run in course['course_runs'] if (course_run['key'] == course_run_id)] if course_runs: course_run = course_runs[0] return (course, course_run)
Return the course and course run metadata for the given course run ID. Arguments: course_run_id (str): The course run ID. Returns: tuple: The course metadata and the course run metadata.
codesearchnet
async def send_message( self, request: str, response_expected: bool, **kwargs: Any ) -> Response: headers = dict(self.DEFAULT_HEADERS) headers.update(kwargs.pop("headers", {})) response = await self.client.fetch( self.endpoint, method="POST", body=request, headers=headers, **kwargs ) return Response(response.body.decode(), raw=response)
Transport the message to the server and return the response. Args: request: The JSON-RPC request string. response_expected: Whether the request expects a response. Returns: A Response object.
juraj-google-style
def ADOPT_module_key_flags( module, flag_values=FLAGS): if not isinstance(module, types.ModuleType): raise Error('Expected a module object, not %r.' % (module,)) _internal_declare_key_flags( [f.name for f in flag_values._GetKeyFlagsForModule(module.__name__)], flag_values=flag_values) if module == _helpers.GetModuleObjectAndName(globals())[0]: _internal_declare_key_flags( [f.name for f in six.itervalues(_helpers.SPECIAL_FLAGS.FlagDict())], flag_values=_helpers.SPECIAL_FLAGS, key_flag_values=flag_values)
Declares that all flags key to a module are key to the current module. Args: module: A module object. flag_values: A FlagValues object. This should almost never need to be overridden. Raises: Error: When given an argument that is a module name (a string), instead of a module object.
juraj-google-style
def calculate_dill_dG(seq_len, temp): Th = 373.5 Ts = 385 temp += 273.15 dH = (4.0 * seq_len + 143) * 1000 dS = 13.27 * seq_len + 448 dCp = (0.049 * seq_len + 0.85) * 1000 dG = dH + dCp * (temp - Th) - temp * dS - temp * dCp * math.log(float(temp) / Ts) return dG
Get free energy of unfolding (dG) using Dill method in units J/mol. Args: seq_len (int): Length of amino acid sequence temp (float): Temperature in degrees C Returns: float: Free energy of unfolding dG (J/mol)
juraj-google-style
def price(self, instrument, **kwargs): request = Request('GET', '/v3/instruments/{instrument}/price') request.set_path_param('instrument', instrument) request.set_param('time', kwargs.get('time')) response = self.ctx.request(request) if (response.content_type is None): return response if (not response.content_type.startswith('application/json')): return response jbody = json.loads(response.raw_body) parsed_body = {} if (str(response.status) == '200'): if (jbody.get('price') is not None): parsed_body['price'] = self.ctx.pricing_common.Price.from_dict(jbody['price'], self.ctx) elif (str(response.status) == '400'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') elif (str(response.status) == '401'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') elif (str(response.status) == '404'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') elif (str(response.status) == '405'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') else: parsed_body = jbody response.body = parsed_body return response
Fetch a price for an instrument. Accounts are not associated in any way with this endpoint. Args: instrument: Name of the Instrument time: The time at which the desired price is in effect. The current price is returned if no time is provided. Returns: v20.response.Response containing the results from submitting the request
codesearchnet