code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def receive(self, sequence, args): if (not self._reorder): self._callback(*args) return if ((self._next_expected is not None) and (sequence < self._next_expected)): print(('Dropping out of order packet, seq=%d' % sequence)) return self._out_of_order.append((sequence, args)) ...
Receive one packet If the sequence number is one we've already seen before, it is dropped. If it is not the next expected sequence number, it is put into the _out_of_order queue to be processed once the holes in sequence number are filled in. Args: sequence (int): The sequence number of the received packet args (lis...
codesearchnet
def get_energy(self, composition, strict=True): if strict and set(composition.keys()) > set(self.keys()): s = set(composition.keys()) - set(self.keys()) raise ValueError("Potentials not specified for {}".format(s)) return sum(self.get(k, 0) * v for k, v in composition.it...
Calculates the energy of a composition. Args: composition (Composition): input composition strict (bool): Whether all potentials must be specified
juraj-google-style
def add_request(self, input_ids: List[int], request_id: Optional[str]=None, max_new_tokens: Optional[int]=None) -> str: if request_id is None: with self._request_lock: request_id = f'req_{self._request_counter}' self._request_counter += 1 max_new_tokens = self.generation_config.m...
Add a new generation request to the queue. Args: input_ids: Input token IDs to use as prompt request_id: Optional custom request ID (auto-generated if None) **kwargs: Additional generation parameters Returns: str: The request ID
github-repos
def recipe_ga360_segmentology(config, auth_write, auth_read, view, recipe_slug): dataset(config, {'description': 'Create a dataset for bigquery tables.', 'hour': [4], 'auth': auth_write, 'dataset': recipe_slug}) bigquery(config, {'auth': auth_write, 'function': 'Pearson Significance Test', 'to': {'dataset': rec...
GA360 funnel analysis using Census data. Args: auth_write (authentication) - Authorization used for writing data. auth_read (authentication) - Authorization for reading GA360. view (string) - View Id recipe_slug (string) - Name of Google BigQuery dataset to create.
github-repos
def custom_returnvalue(self, printer, desc=None): self.return_info = ReturnInfo(None, printer, True, desc)
Use a custom function to print the return value. Args: printer (callable): A function that should take in the return value and convert it to a string. desc (str): An optional description of the return value.
codesearchnet
def __init__(self, start, end): if start > end: raise ValueError( "Invalid time-range: %s > %s." % (start.AsMicrosecondsSinceEpoch(), end.AsMicrosecondsSinceEpoch())) self._start = start self._end = end
Initializes a TimeRange. Args: start: An RDFDatetime that indicates the beginning of the time-range. end: An RDFDatetime that indicates the end of the time-range. Raises: ValueError: If the beginning of the time range is at a future time as compared to the end of the time-range.
juraj-google-style
def inplace_add(x, i, v): return alias_inplace_add(gen_array_ops.deep_copy(x), i, v)
Applies an inplace add on input x at index i with value v. Note that this function is not actually inplace - it allocates a copy of x. The utility is not avoiding memory copies but rather specifying a sparse update. If i is None, x and v must be the same shape. Computes y = x; y += v; If i is a scalar, x has a rank ...
github-repos
def get_ecommerce_client(url_postfix='', site_code=None): ecommerce_api_root = get_configuration('ECOMMERCE_API_ROOT', site_code=site_code) signing_key = get_configuration('JWT_SECRET_KEY', site_code=site_code) issuer = get_configuration('JWT_ISSUER', site_code=site_code) service_username = get_con...
Get client for fetching data from ecommerce API. Arguments: site_code (str): (Optional) The SITE_OVERRIDES key to inspect for site-specific values url_postfix (str): (Optional) The URL postfix value to append to the ECOMMERCE_API_ROOT value. Returns: EdxRestApiClient object
juraj-google-style
def unstack(df, level=-1, reset_index=True): df = df.unstack(level=level) if reset_index: df = df.reset_index() df.columns = df.columns.map(_join_names) return df
pd.DataFrame.unstack adapter. Call the `df.unstack` method using the indicated level and afterwards join the column names using an underscore. Args: df (pandas.DataFrame): DataFrame to unstack. level (str, int or list): Level(s) of index to unstack, can pass level name reset_index (bool): Whether to reset the index a...
juraj-google-style
def get(self, name): interface = name if not interface: raise ValueError("Vrrp.get(): interface must contain a value.") config = self.get_block('interface %s' % interface) if config is None: return config ...
Get the vrrp configurations for a single node interface Args: name (string): The name of the interface for which vrrp configurations will be retrieved. Returns: A dictionary containing the vrrp configurations on the interface. Returns None if no vrrp configurations are defined or if the interface is not configured.
juraj-google-style
def Add(self, rdf_value, mutation_pool=None): self.StaticAdd(self.urn, rdf_value, mutation_pool=mutation_pool)
Adds an rdf value to the queue. Adds an rdf value to the queue. Does not require that the queue be locked. Args: rdf_value: The rdf value to add to the queue. mutation_pool: A MutationPool object to write to. Raises: ValueError: rdf_value has unexpected type.
juraj-google-style
def get_appliance(self, appliance_id): url = ('https: headers = self.__gen_headers() headers['Content-Type'] = 'application/json' r = requests.get(url, headers=headers) return r.json()
Get the information for a specified appliance Args: appliance_id (string): identifiying string of appliance Returns: list: dictionary object containing information about the specified appliance
codesearchnet
def GetHashType(self, hash_str): for hash_type, hash_re in self.hashes: if hash_re.match(hash_str): return hash_type return "EMPTY"
Identify the type of hash in a hash string. Args: hash_str: A string value that may be a hash. Returns: A string description of the type of hash.
juraj-google-style
def resume(resume_delay=0): return ProcessContinuation(resume_delay=resume_delay)
A convenient method that produces a ``ProcessContinuation``. Args: resume_delay: delay after which processing current element should be resumed. Returns: a ``ProcessContinuation`` for signalling the runner that current input element has not been fully processed and should be resumed later.
github-repos
def __init__(self, file_object=None): super(SelfFeederMixIn, self).__init__() self.file_object = file_object
Initializes the lexer feeder min object. Args: file_object: Optional file-like object.
juraj-google-style
def clean_single_dict(indict, prepend_to_keys=None, remove_keys_containing=None): if (not prepend_to_keys): prepend_to_keys = '' outdict = {} for (k, v) in indict.items(): if remove_keys_containing: if (remove_keys_containing in k): continue outdict[(prepe...
Clean a dict with values that contain single item iterators to single items Args: indict (dict): Dictionary to be cleaned prepend_to_keys (str): String to prepend to all keys remove_keys_containing (str): Text to check for in keys to ignore Returns: dict: Cleaned dictionary Examples: >>> clean_single_dict(indict={'t...
codesearchnet
def interconnect_link_topologies(self): if (not self.__interconnect_link_topologies): self.__interconnect_link_topologies = InterconnectLinkTopologies(self.__connection) return self.__interconnect_link_topologies
Gets the InterconnectLinkTopologies API client. Returns: InterconnectLinkTopologies:
codesearchnet
def check_termination(self) -> None: if self._is_thread_joined: if self.is_alive(): raise RuntimeError('Thread was not joined with main thread, and is still running when the test finished.') else: self._testcase.fail('A checked thread was not joined.')
Returns whether the checked thread was properly used and did terminate. Every checked thread should be "join"ed after starting, and before the test tears down. If it is not joined, it is possible the thread will hang and cause flaky failures in tests. Raises: self._testcase.failureException: If check_termination was ...
github-repos
def get_pipeline_options(project: str, job_name: str, mode: str, device: str, num_workers: int=cfg.NUM_WORKERS, **kwargs: Any) -> PipelineOptions: job_name = f'{job_name}-{datetime.now().strftime('%Y%m%d%H%M%S')}' staging_bucket = f'gs: dataflow_options = {'runner': 'DirectRunner' if mode == 'local' else 'D...
Function to retrieve the pipeline options. Args: project: GCP project to run on mode: Indicator to run local, cloud or template num_workers: Number of Workers for running the job parallely Returns: Dataflow pipeline options
github-repos
def apply_gradients(self, grads_and_vars, global_step=None, name=None): if distribute_lib.in_cross_replica_context(): raise ValueError('apply_gradients() must be called in a replica context.') if not self._doing_dynamic_loss_scaling(): return self._optimizer.apply_gradients(grads_and_vars, globa...
Apply gradients to variables. This is the second part of `minimize()`. It returns an `Operation` that conditionally applies gradients if all gradient values are finite. Otherwise no update is performed (nor is `global_step` incremented). Args: grads_and_vars: List of (gradient, variable) pairs as returned by `compute...
github-repos
def _copy_attr(self, module, varname, cls, attrname=None): if not hasattr(module, varname): raise RuntimeError("Variable '{}' not found".format(varname)) obj = getattr(module, varname) if not isinstance(obj, cls): raise RuntimeError( "Expecting...
Copies attribute from module object to self. Raises if object not of expected class Args: module: module object varname: variable name cls: expected class of variable attrname: attribute name of self. Falls back to varname
juraj-google-style
def _escaped_token_to_subtoken_strings(self, escaped_token): ret = [] start = 0 token_len = len(escaped_token) while start < token_len: for end in range( min(token_len, start + self._max_subtoken_len), start, -1): subtoken = escaped_token[start:end] if subt...
Converts an escaped token string to a list of subtoken strings. Args: escaped_token: An escaped token as a unicode string. Returns: A list of subtokens as unicode strings.
juraj-google-style
def get_language_stemmer(language): from lunr.languages import SUPPORTED_LANGUAGES from nltk.stem.snowball import SnowballStemmer return SnowballStemmer(SUPPORTED_LANGUAGES[language])
Retrieves the SnowballStemmer for a particular language. Args: language (str): ISO-639-1 code of the language.
codesearchnet
def get_cached_item(cache_key, alternative_cache_key, *func_args, **func_kwargs): key = get_cache_key(cache_key, func, *func_args, **func_kwargs) return cache.get(key)
Not a decorator, but a helper function to retrieve the cached item for a key created via get_cache_key. Args: - cache_key: if there was a specific cache key used to cache the function, it should be provided here. If not this should be None - func: the function which was cache - *func_args: arguments of the function - *...
juraj-google-style
def init_logger(self, log_dir=None, level=logging.INFO): logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=level) logger = logging.getLogger(__name__) if (log_dir and (self.rank == 0)): filename = '{}.log'.format(self.timestamp) log_file = osp.join(log_dir, filena...
Init the logger. Args: log_dir(str, optional): Log file directory. If not specified, no log file will be used. level (int or str): See the built-in python logging module. Returns: :obj:`~logging.Logger`: Python logger.
codesearchnet
def update_args(self, args): for arg in vars(args): if self.get(arg) and getattr(args, arg) is not None: self._config[self.root_section][arg] = getattr(args, arg)
Update config dictionary with parsed args, as resolved by argparse. Only root positional arguments that already exist will overridden. Args: args (namespace): args parsed by argparse
juraj-google-style
def getThumbnailForItem(self, itemId, fileName, filePath): admin = None item = None try: admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler) item = admin.content.getItem(itemId = itemId) return item.saveThumbnail(fileName=...
Gets an item's thumbnail and saves it to disk. Args: itemId (str): The item's ID. fileName (str): The name of the output image. fileName (str): The directory on disk where to save the thumbnail. Returns: dict: The result from :py:func:`arcrest.manageorg._content.UserItem.saveThumbnail`
juraj-google-style
def distribute_data_input(per_process_batch, layout, batch_dim_name): from keras.src.distribution import TensorLayout if isinstance(layout, TensorLayout): layout = layout.backend_layout return jax.make_array_from_process_local_data(layout, per_process_batch)
Distribute the input data with the corresponding layout. Note that the inputs here is a local worker batch. Within the local worker, the data need to be further partitioned to map to each of the devices. Args: inputs: `jax.Array` that is already sharded to a local process size. layout: `TensorLayout` for the distribu...
github-repos
def GetUnavailableBonus(self): height = (Blockchain.Default().Height + 1) unspents = self.FindUnspentCoinsByAsset(Blockchain.SystemShare().Hash) refs = [coin.Reference for coin in unspents] try: unavailable_bonus = Blockchain.CalculateBonus(refs, height_end=height) return unavailable_bon...
Gets the total claimable amount of Gas in the wallet that is not available to claim because it has not yet been spent. Returns: Fixed8: the amount of Gas unavailable to claim.
codesearchnet
def cross_section(verts, tris, plane_orig, plane_normal, **kwargs): mesh = TriangleMesh(verts, tris) plane = Plane(plane_orig, plane_normal) return cross_section_mesh(mesh, plane, **kwargs)
Compute the planar cross section of a mesh. This returns a set of polylines. Args: verts: Nx3 array of the vertices position faces: Nx3 array of the faces, containing vertex indices plane_orig: 3-vector indicating the plane origin plane_normal: 3-vector indicating the plane normal Returns: A list of Nx3 arrays, each ...
codesearchnet
def set_position(self, x, y): self.attributes['x'] = str(x) self.attributes['y'] = str(y)
Sets the shape position. Args: x (int): the x coordinate y (int): the y coordinate
codesearchnet
def _convert(value, dtype=None): result = numpy_compat.np_asarray(value, dtype=dtype, order='C') if result.dtype.char == 'S' and result is not value: return numpy_compat.np_asarray(value, order='C', dtype=object) elif result.dtype.char == 'U' and result is not value: value = np.vectorize(lam...
Converts an arg to numpy, avoiding dangerous string and unicode dtypes. Numpy pads with zeros when using string and unicode dtypes if different components of a tensor have different lengths. This is bad: ignoring the padding is wrong for text data, and removing the padding is wrong for binary data. To avoid this bug...
github-repos
def _verify_pair(prev, curr): if prev._dimension != 2: raise ValueError("Curve not in R^2", prev) end = prev._nodes[:, -1] start = curr._nodes[:, 0] if not _helpers.vector_close(end, start): raise ValueError( "Not sufficiently close", ...
Verify a pair of sides share an endpoint. .. note:: This currently checks that edge endpoints match **exactly** but allowing some roundoff may be desired. Args: prev (.Curve): "Previous" curve at piecewise junction. curr (.Curve): "Next" curve at piecewise junction. Raises: ValueError: If the previous side is not i...
juraj-google-style
def apply_rules(self, rules, recursive=True): if recursive: new_args = [_apply_rules(arg, rules) for arg in self.args] new_kwargs = {key: _apply_rules(val, rules) for (key, val) in self.kwargs.items()} else: new_args = self.args new_kwargs = self.kwargs simplified = self.crea...
Rebuild the expression while applying a list of rules The rules are applied against the instantiated expression, and any sub-expressions if `recursive` is True. Rule application is best though of as a pattern-based substitution. This is different from the *automatic* rules that :meth:`create` uses (see :meth:`add_rule...
codesearchnet
def create_checksum_object_from_iterator( itr, algorithm=d1_common.const.DEFAULT_CHECKSUM_ALGORITHM ): checksum_str = calculate_checksum_on_iterator(itr, algorithm) checksum_pyxb = d1_common.types.dataoneTypes.checksum(checksum_str) checksum_pyxb.algorithm = algorithm return checksum_pyxb
Calculate the checksum of an iterator. Args: itr: iterable Object which supports the iterator protocol. algorithm: str Checksum algorithm, ``MD5`` or ``SHA1`` / ``SHA-1``. Returns: Populated Checksum PyXB object.
juraj-google-style
def service_configuration_check(config): ipv4_enabled = config.getboolean('daemon', 'ipv4') ipv6_enabled = config.getboolean('daemon', 'ipv6') services = config.sections() services.remove('daemon') ip_prefixes = [] for service in services: for option, getter in SERVICE_OPTIONS...
Perform a sanity check against options for each service check. Arguments: config (obj): A configparser object which holds our configuration. Returns: None if all sanity checks are successfully passed otherwise raises a ValueError exception.
juraj-google-style
def make_tests(self, sdkobject, testcase): tests = dict() attributes = sdkobject.get_attributes() for attribute in attributes: if attribute.local_name in self.IGNORED_ATTRIBUTES: continue for function_name, conditions in self._attributes_regist...
Make all tests that should be run for the given object in the specified testcase Args: sdkobject: the sdk object testcase: the test case Returns: It returns a dictionary of all tests to run
juraj-google-style
def ones(shape, dtype=None, **kwargs): data = np.ones(shape, dtype) return dc.array(data, **kwargs)
Create an array of given shape and type, filled with ones. Args: shape (sequence of ints): 2D shape of the array. dtype (data-type, optional): Desired data-type for the array. kwargs (optional): Other arguments of the array (*coords, attrs, and name). Returns: array (decode.array): Decode array filled with ones.
juraj-google-style
def flatten_top_level_keys(data, top_level_keys): flattened_data = {} for top_level_key in top_level_keys: if data[top_level_key] is None: flattened_data[top_level_key] = None else: for key in data[top_level_key]: flattened_data['{}_-_{}'.format(top_...
Helper method to flatten a nested dict of dicts (one level) Example: {'a': {'b': 'bbb'}} becomes {'a_-_b': 'bbb'} The separator '_-_' gets formatted later for the column headers Args: data: the dict to flatten top_level_keys: a list of the top level keys to flatten ('a' in the example above)
juraj-google-style
def AsDict(self): sources = [] for source in self.sources: source_definition = {'type': source.type_indicator, 'attributes': source.AsDict()} if source.supported_os: source_definition['supported_os'] = source.supported_os if source.conditions: source_definition['c...
Represents an artifact as a dictionary. Returns: dict[str, object]: artifact attributes.
codesearchnet
def update_port_monitor(self, resource, timeout=-1): data = resource.copy() if 'type' not in data: data['type'] = 'port-monitor' uri = "{}{}".format(self.data["uri"], self.PORT_MONITOR_PATH) return self._helper.update(data, uri=uri, timeout=timeout)
Updates the port monitor configuration of a logical interconnect. Args: resource: Port monitor configuration. Returns: dict: Port monitor configuration.
juraj-google-style
def get_signature_request_list(self, page=1, ux_version=None): request = self._get_request() parameters = { "page": page } if ux_version is not None: parameters['ux_version'] = ux_version return request.get(self.SIGNATURE_REQUEST_LIST_URL, para...
Get a list of SignatureRequest that you can access This includes SignatureRequests you have sent as well as received, but not ones that you have been CCed on. Args: page (int, optional): Which page number of the SignatureRequest list to return. Defaults to 1. ux_version (int): UX version, either 1 (default)...
juraj-google-style
def consume(self, source): manifest = OrderedDict() rules = parse_stylesheet( source, skip_comments=True, skip_whitespace=True, ) for rule in rules: name = self.digest_prelude(rule) if not n...
Parse source and consume tokens from tinycss2. Arguments: source (string): Source content to parse. Returns: dict: Retrieved rules.
juraj-google-style
def _validate_isvalid_orcid(self, isvalid_orcid, field, value): if (isvalid_orcid and ('ORCID' in value)): try: res = search_orcid(value['ORCID']) except ConnectionError: warn('network not available, ORCID not validated.') return except HTTPError: ...
Checks for valid ORCID if given. Args: isvalid_orcid (`bool`): flag from schema indicating ORCID to be checked. field (`str`): 'author' value (`dict`): dictionary of author metadata. The rule's arguments are validated against this schema: {'isvalid_orcid': {'type': 'bool'}, 'field': {'type': 'str'}, 'value': {'type':...
codesearchnet
def parse_variable(self, variable): data = None if (variable is not None): variable = variable.strip() if re.match(self._variable_match, variable): var = re.search(self._variable_parse, variable) data = {'root': var.group(0), 'job_id': var.group(2), 'name': var.group(3), ...
Method to parse an input or output variable. **Example Variable**:: #App:1234:output!String Args: variable (string): The variable name to parse. Returns: (dictionary): Result of parsed string.
codesearchnet
def distribute_equally(daily_data, divide=False): index = hourly_index(daily_data.index) hourly_data = daily_data.reindex(index) hourly_data = hourly_data.groupby(hourly_data.index.day).transform((lambda x: x.fillna(method='ffill', limit=23))) if divide: hourly_data /= 24 return hourly_data
Obtains hourly values by equally distributing the daily values. Args: daily_data: daily values divide: if True, divide resulting values by the number of hours in order to preserve the daily sum (required e.g. for precipitation). Returns: Equally distributed hourly values.
codesearchnet
def get_parameter_names(self, include_frozen=False): if include_frozen: return self.parameter_names return tuple((p for (p, f) in zip(self.parameter_names, self.unfrozen_mask) if f))
Get a list of the parameter names Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
codesearchnet
def redraw(self, reset_camera=False): self.ren.RemoveAllViewProps() self.picker = None self.add_picker_fixed() self.helptxt_mapper = vtk.vtkTextMapper() tprops = self.helptxt_mapper.GetTextProperty() tprops.SetFontSize(14) tprops.SetFontFamilyToTimes() tprops.SetColor(0, 0, 0) if (se...
Redraw the render window. Args: reset_camera: Set to True to reset the camera to a pre-determined default for each structure. Defaults to False.
codesearchnet
def Convert(self, metadata, stat_entry, token=None): return self.BatchConvert([(metadata, stat_entry)], token=token)
Converts StatEntry to ExportedFile. Does nothing if StatEntry corresponds to a registry entry and not to a file. Args: metadata: ExportedMetadata to be used for conversion. stat_entry: StatEntry to be converted. token: Security token. Returns: List or generator with resulting RDFValues. Empty list if StatEntry corre...
juraj-google-style
def port_get_tag(port): cmd = 'ovs-vsctl get port {0} tag'.format(port) result = __salt__['cmd.run_all'](cmd) retcode = result['retcode'] stdout = result['stdout'] return _stdout_list_split(retcode, stdout)
Lists tags of the port. Args: port: A string - port name. Returns: List of tags (or empty list), False on failure. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' openvswitch.port_get_tag tap0
codesearchnet
def _process_rules(self, rules): cidr = [] non_cidr = [] for rule in rules: if ('.' in rule['app']): self.log.debug('Custom CIDR rule: %s', rule) self._validate_cidr(rule) cidr.append(rule) else: self.log.debug('SG reference rule: %s', rule) ...
Process rules into cidr and non-cidr lists. Args: rules (list): Allowed Security Group ports and protocols. Returns: (list, list): Security Group reference rules and custom CIDR rules.
codesearchnet
def Dump(obj): text = yaml.safe_dump(obj, default_flow_style=False, allow_unicode=True) if compatibility.PY2: text = text.decode('utf-8') return text
Stringifies a Python object into its YAML representation. Args: obj: A Python object to convert to YAML. Returns: A YAML representation of the given object.
codesearchnet
def score_braycurtis(self, term1, term2, **kwargs): t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) return 1-distance.braycurtis(t1_kde, t2_kde)
Compute a weighting score based on the "City Block" distance between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float
juraj-google-style
def dagify_min_edge(g): while (not nx.is_directed_acyclic_graph(g)): cycle = next(nx.simple_cycles(g)) scores = [] edges = [] for (i, j) in zip(cycle[:1], cycle[:1]): edges.append((i, j)) scores.append(g[i][j]['weight']) (i, j) = edges[scores.index(min...
Input a graph and output a DAG. The heuristic is to reverse the edge with the lowest score of the cycle if possible, else remove it. Args: g (networkx.DiGraph): Graph to modify to output a DAG Returns: networkx.DiGraph: DAG made out of the input graph.
codesearchnet
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): values_dict = {} if registry_key.number_of_values == 0: values_dict['Value'] = 'No values stored in key.' else: for registry_value in registry_key.GetValues(): value_name = registry_value.name or '(default)' ...
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
juraj-google-style
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None): min_patches = images_kwargs.get('min_patches', None) or self.min_patches max_patches = images_kwargs.get('max_patches', None) or self.max_patches patch_size = images_kwargs.get('size', None) or self.size crop_to_patches ...
A utility that returns number patches for a given image size. Args: height (`int`): Height of the input image. width (`int`): Width of the input image. images_kwargs (`dict`, *optional*) Any kwargs to override defaults of the image processor. Returns: `int`: Number of patches per image.
github-repos
def add_relationship(self, txn_id, predecessors): all_pred = set(predecessors) for pred in predecessors: all_pred.update(self._predecessors_by_id[pred]) self._predecessors_by_id[txn_id] = all_pred
Add a predecessor-successor relationship between one txn id and a set of predecessors. Args: txn_id (str): The transaction id of the transaction. predecessors (set): The transaction ids of the transaction's predecessors Returns: None
juraj-google-style
def hpo_terms(self, query=None, hpo_term=None, text=None, limit=None): query_dict = {} search_term = None if query: query_dict = {'$or': [{'hpo_id': {'$regex': query, '$options': 'i'}}, {'description': {'$regex': query, '$options': 'i'}}]} search_term = query elif text: new_strin...
Return all HPO terms If a query is sent hpo_terms will try to match with regex on term or description. Args: query(str): Part of a hpoterm or description hpo_term(str): Search for a specific hpo term limit(int): the number of desired results Returns: result(pymongo.Cursor): A cursor with hpo terms
codesearchnet
def eval(self, expr, **kwargs): columns = self.index if self._is_transposed else self.columns index = self.columns if self._is_transposed else self.index columns_copy = pandas.DataFrame(columns=self.columns) columns_copy = columns_copy.eval(expr, inplace=False...
Returns a new QueryCompiler with expr evaluated on columns. Args: expr: The string expression to evaluate. Returns: A new QueryCompiler with new columns after applying expr.
juraj-google-style
def str_to_etree(xml_str, encoding='utf-8'): parser = xml.etree.ElementTree.XMLParser(encoding=encoding) return xml.etree.ElementTree.fromstring(xml_str, parser=parser)
Deserialize API XML doc to an ElementTree. Args: xml_str: bytes DataONE API XML doc encoding: str Decoder to use when converting the XML doc ``bytes`` to a Unicode str. Returns: ElementTree: Matching the API version of the XML doc.
codesearchnet
def summarize_variables(var_list=None, tag=None): if var_list is None: var_list = tf.trainable_variables() if tag is None: tag = "training_variables/" name_to_var = {v.name: v for v in var_list} for v_name in list(name_to_var): v = name_to_var[v_name] tf.summary.histogram(tag + v_name, v)
Summarize the variables. Args: var_list: a list of variables; defaults to trainable_variables. tag: name scope of the summary; defaults to training_variables/.
juraj-google-style
def assertAllGreater(self, a, comparison_target): a, comparison_target = self.evaluate_if_both_tensors(a, comparison_target) a = self._GetNdArray(a) self.assertGreater(np.min(a), comparison_target)
Assert element values are all greater than a target value. Args: a: The numpy `ndarray`, or anything that can be converted into a numpy `ndarray` (including Tensor). comparison_target: The target value of comparison.
github-repos
def requested_test_names_dict(self): return {'Requested Tests': copy.deepcopy(self.requested)}
Gets the requested test names of a test run in a dict format. Note a test can be requested multiple times, so there can be duplicated values Returns: A dict with a key and the list of strings.
github-repos
def _truncate(self, new_rank: int) -> 'DynamicRaggedShape.Spec': if self.rank is None: return self._set_rank_if_unknown(new_rank)._truncate(new_rank) if new_rank == 0: return DynamicRaggedShape.Spec._from_tensor_shape([], 0, self.dtype) if new_rank == 1: vector_size = self._dimension...
Truncate a ragged shape spec. For example, if the original spec s was for a shape: [3, [4, 1], 2, 7] Then truncate_dynamic_ragged_shape_spec(s, 3) is a spec for: [3, [4, 1], 2] Args: new_rank: the new rank Returns: A truncated DynamicRaggedShape.Spec.
github-repos
def getKeyName(username, date, blob_key): sep = FileMetadata.__SEP return str(((((username + sep) + str(date)) + sep) + blob_key))
Returns the internal key for a particular item in the database. Our items are stored with keys of the form 'user/date/blob_key' ('/' is not the real separator, but __SEP is). Args: username: The given user's e-mail address. date: A datetime object representing the date and time that an input file was uploaded to this...
codesearchnet
def terminate_ec2_instance(client, resource): instance = EC2Instance.get(resource.id) if (instance.state == 'terminated'): return (ActionStatus.IGNORED, {}) client.terminate_instances(InstanceIds=[resource.id]) return (ActionStatus.SUCCEED, {'instance_type': resource.instance_type, 'public_ip': ...
Terminate an EC2 Instance This function will terminate an EC2 Instance. Args: client (:obj:`boto3.session.Session.client`): A boto3 client object resource (:obj:`Resource`): The resource object to terminate Returns: `ActionStatus`
codesearchnet
def with_min_execution_time(self, min_micros=0, min_accelerator_micros=0, min_cpu_micros=0): self._options['min_micros'] = min_micros self._options['min_accelerator_micros'] = min_accelerator_micros self._options['min_cpu_micros'] = min_cpu_micros return self
Only show profiler nodes consuming no less than 'min_micros'. Args: min_micros: Only show profiler nodes with execution time no less than this. It sums accelerator and cpu times. min_accelerator_micros: Only show profiler nodes spend no less than this time on accelerator (e.g. GPU). min_cpu_micros: Only show profiler ...
github-repos
def read(self, size=None): data = b'' while ((size and (len(data) < size)) and (self._current_offset < self.uncompressed_data_size)): member = self._GetMemberForOffset(self._current_offset) member_offset = (self._current_offset - member.uncompressed_data_offset) data_read = member.ReadAt...
Reads a byte string from the gzip file at the current offset. The function will read a byte string up to the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the...
codesearchnet
def rebin(d, n_x, n_y=None): if (d.ndim == 2): if (n_y is None): n_y = 1 if (n_x is None): n_x = 1 d = d[(:(int((d.shape[0] d = d.reshape(((d.shape[0] d = d.mean(axis=3) d = d.mean(axis=1) elif (d.ndim == 1): d = d[:(int((d.shape[...
Rebin data by averaging bins together Args: d (np.array): data n_x (int): number of bins in x dir to rebin into one n_y (int): number of bins in y dir to rebin into one Returns: d: rebinned data with shape (n_x, n_y)
codesearchnet
def load_glove(file): model = {} with open(file, encoding="utf8", errors='ignore') as f: for line in f: line = line.split(' ') word = line[0] vector = np.array([float(val) for val in line[1:]]) model[word] = vector return model
Loads GloVe vectors in numpy array. Args: file (str): a path to a glove file. Return: dict: a dict of numpy arrays.
juraj-google-style
def get_all_options(self, drop_default=False, add_extra_args_fn: Optional[Callable[[_BeamArgumentParser], None]]=None, retain_unknown_options=False) -> Dict[str, Any]: subset = {} parser = _BeamArgumentParser(allow_abbrev=False) for cls in PipelineOptions.__subclasses__(): subset[str(cls)] = cls ...
Returns a dictionary of all defined arguments. Returns a dictionary of all defined arguments (arguments that are defined in any subclass of PipelineOptions) into a dictionary. Args: drop_default: If set to true, options that are equal to their default values, are not returned as part of the result dictionary. add_ext...
github-repos
def add_user(self, user_obj): LOG.info("Adding user %s to the database", user_obj['email']) if not '_id' in user_obj: user_obj['_id'] = user_obj['email'] try: self.user_collection.insert_one(user_obj) LOG.debug("User inserted") except Dup...
Add a user object to the database Args: user_obj(scout.models.User): A dictionary with user information Returns: user_info(dict): a copy of what was inserted
juraj-google-style
def get_models(self, model, page=None): if page is not None: return self._store.find_all(self._get_model_class(model), params={'page': int(page)}) else: return self._store.find_all(self._get_model_class(model))
Get all the models from the server. Args: model (string): The class as a string. page (string, optional): The page number as a string Returns: list: A list of instances of the requested model.
juraj-google-style
def add_number_parameters(self, number): if isinstance(number, list): for x in number: self.add_number_parameters(x) return self._parameters.append("{ \"value\": " + str(number) + " }")
Add given number parameters to the internal list. Args: number (list of int or list of float): A number or list of numbers to add to the parameters.
juraj-google-style
def aggregate_and_return_name_for_output(self, fused_op_name, output_index, out_graphdef): flattened = self.flatten_nodes() if self.aggregation == OpHint.AGGREGATE_FIRST or self.aggregation == OpHint.AGGREGATE_LAST: assert len(flattened) == 1 if len(flattened) == 1 and self.aggregation != OpHint.AGG...
This adds to `out_graphdef` all the unaggregated outputs. I.e. we are outputting from a fused stub, but we need to make it compatible with the unfused original graph so we insert an unpack. Ideally in a later stage the unpack -> pack sequences will be removed. Args: fused_op_name: The name of the stub we are in the p...
github-repos
def fa_peft_integration_check(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, target_dtype: Optional[torch.dtype]=None): if target_dtype is None: return (query, key, value) input_dtype = query.dtype if input_dtype == torch.float32: logger.warning_once(f'The input hidden states s...
PEFT usually casts the layer norms in float32 for training stability reasons therefore the input hidden states gets silently casted in float32. Hence, we need cast them back in float16 / bfloat16 just to be sure everything works as expected. This might slowdown training & inference so it is recommended to not cast the ...
github-repos
def group_by(self, key, field=(lambda x: x.xfer)): return Transactions([t for t in self.trans if (field(t) == key)])
Returns all transactions whose given ``field`` matches ``key``. Returns: A ``Transactions`` object.
codesearchnet
def add_server(self, name, prefer=False): if ((not name) or re.match('^[\\s]+$', name)): raise ValueError('ntp server name must be specified') if prefer: name = ('%s prefer' % name) cmd = self.command_builder('ntp server', value=name) return self.configure(cmd)
Add or update an NTP server entry to the node config Args: name (string): The IP address or FQDN of the NTP server. prefer (bool): Sets the NTP server entry as preferred if True. Returns: True if the operation succeeds, otherwise False.
codesearchnet
def poll_for_job_completion(runner, result, duration, state_update_callback=None): if result.state == PipelineState.DONE: return last_message_time = None current_seen_messages = set() last_error_rank = float('-inf') last_error_msg = None last_job_state = None final_countdown_timer_se...
Polls for the specified job to finish running (successfully or not). Updates the result with the new job information before returning. Args: runner: DataflowRunner instance to use for polling job state. result: DataflowPipelineResult instance used for job information. duration (int): The time to wait (in milliseconds...
github-repos
def set_conf_str(conf, optstrs): falsy = ['0', 'no', 'n', 'off', 'false', 'f'] bool_actions = ['store_true', 'store_false', internal.Switch] for optstr in optstrs: (opt, val) = optstr.split('=', 1) (sec, opt) = opt.split('.', 1) if (sec not in conf): raise error.SectionEr...
Set options from a list of section.option=value string. Args: conf (:class:`~loam.manager.ConfigurationManager`): the conf to update. optstrs (list of str): the list of 'section.option=value' formatted string.
codesearchnet
def deconstruct_single_qubit_matrix_into_angles(mat: np.ndarray) -> Tuple[(float, float, float)]: right_phase = (cmath.phase((mat[(0, 1)] * np.conj(mat[(0, 0)]))) + math.pi) mat = np.dot(mat, _phase_matrix((- right_phase))) bottom_phase = cmath.phase((mat[(1, 0)] * np.conj(mat[(0, 0)]))) mat = np.dot(_p...
Breaks down a 2x2 unitary into more useful ZYZ angle parameters. Args: mat: The 2x2 unitary matrix to break down. Returns: A tuple containing the amount to phase around Z, then rotate around Y, then phase around Z (all in radians).
codesearchnet
def variable_shape(handle, out_type=None): if out_type is None: if flags.config().tf_shape_default_int64.value(): out_type = dtypes.int64 else: out_type = dtypes.int32 handle_data = get_eager_safe_handle_data(handle) if handle_data is None or not handle_data.is_set: ...
Returns the shape of the variable from the handle. If the output shape dtype is not specified, it will be set to int64 if tf_shape_default_int64 is enabled, otherwise it will be set to int32. Args: handle: The handle of the variable. out_type: The dtype of the output shape. Returns: The shape of the variable.
github-repos
def delete(self, resource, timeout=(- 1)): if (type(resource) is dict): headers = {'If-Match': resource.get('eTag', '*')} else: headers = {'If-Match': '*'} return self._client.delete(resource, timeout=timeout, custom_headers=headers)
Deletes a Scope. Args: resource: dict object to delete timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView; it just stops waiting for its completion. Returns: bool: Indicates if the resource was successfully deleted.
codesearchnet
def Gamma(cls, shape: 'TensorFluent', scale: 'TensorFluent', batch_size: Optional[int]=None) -> Tuple[(Distribution, 'TensorFluent')]: if (shape.scope != scale.scope): raise ValueError('Gamma distribution: parameters must have same scope!') concentration = shape.tensor rate = (1 / scale.tensor) ...
Returns a TensorFluent for the Gamma sampling op with given shape and scale parameters. Args: shape: The shape parameter of the Gamma distribution. scale: The scale parameter of the Gamma distribution. batch_size: The size of the batch (optional). Returns: The Gamma distribution and a TensorFluent sample drawn from t...
codesearchnet
def gym_space_spec(gym_space): try: tf_dtype = tf.as_dtype(gym_space.dtype) except TypeError as e: tf.logging.error("Cannot convert space's type [%s] to tf.dtype", gym_space.dtype) raise e if isinstance(gym_space, Box): return box_space_spec(gym_space, tf_dtype) el...
Returns a reading spec of a gym space. NOTE: Only implemented currently for Box and Discrete. Args: gym_space: instance of gym.spaces whose spec we want. Returns: Reading spec for that space. Raises: NotImplementedError: For spaces whose reading spec we haven't implemented.
juraj-google-style
def send(self, config, log, obs_id, beam_id): log.info('Starting Pulsar Data Transfer...') socket = self._ftp.transfercmd('STOR {0}_{1}'.format(obs_id, beam_id)) socket.send(json.dumps(config).encode()) socket.send(bytearray((1000 * 1000))) config['metadata']['name'] = 'candidate_two' socket.sen...
Send the pulsar data to the ftp server Args: config (dict): Dictionary of settings log (logging.Logger): Python logging object obs_id: observation id beam_id: beam id
codesearchnet
def reinit_nested_vars(variables, indices=None): if isinstance(variables, (tuple, list)): return tf.group(*[ reinit_nested_vars(variable, indices) for variable in variables]) if indices is None: return variables.assign(tf.zeros_like(variables)) else: zeros = tf.zeros([tf.shape(indices)[0]] ...
Reset all variables in a nested tuple to zeros. Args: variables: Nested tuple or list of variables. indices: Batch indices to reset, defaults to all. Returns: Operation.
juraj-google-style
def __init__(self, channel): self.SendEvents = channel.stream_stream('/tensorflow.EventListener/SendEvents', request_serializer=tensorflow_dot_core_dot_util_dot_event__pb2.Event.SerializeToString, response_deserializer=tensorflow_dot_core_dot_debug_dot_debug__service__pb2.EventReply.FromString) self.SendTraceba...
Constructor. Args: channel: A grpc.Channel.
github-repos
def update_function_configuration(self, vpc_config): LOG.info('Updating configuration for lambda function: %s', self.app_name) try: self.lambda_client.update_function_configuration( Environment=self.lambda_environment, FunctionName=self.app_name, ...
Update existing Lambda function configuration. Args: vpc_config (dict): Dictionary of SubnetIds and SecurityGroupsIds for using a VPC in lambda
juraj-google-style
async def _try_catch_coro(emitter, event, listener, coro): try: (await coro) except Exception as exc: if (event == emitter.LISTENER_ERROR_EVENT): raise emitter.emit(emitter.LISTENER_ERROR_EVENT, event, listener, exc)
Coroutine wrapper to catch errors after async scheduling. Args: emitter (EventEmitter): The event emitter that is attempting to call a listener. event (str): The event that triggered the emitter. listener (async def): The async def that was used to generate the coro. coro (coroutine): The coroutine that should be trie...
codesearchnet
def _build(self, input_batch, is_training, test_local_stats=True): input_shape = input_batch.get_shape() if (self._axis is not None): if (len(self._axis) > len(input_shape)): raise base.IncompatibleShapeError('Too many indices specified in axis: len({}) > len({}).'.format(self._axis, input_s...
Connects the BatchNorm module into the graph. Args: input_batch: A Tensor of arbitrary dimension. By default, the final dimension is not reduced over when computing the minibatch statistics. is_training: A boolean to indicate if the module should be connected in training mode, meaning the moving averages are updated. ...
codesearchnet
def median(data): ordered = sorted(data) length = len(ordered) if length % 2 == 0: return ( ordered[math.floor(length / 2) - 1] + ordered[math.floor(length / 2)] ) / 2.0 elif length % 2 != 0: return ordered[math.floor(length / 2)]
Calculates the median of a list of integers or floating point numbers. Args: data: A list of integers or floating point numbers Returns: Sorts the list numerically and returns the middle number if the list has an odd number of items. If the list contains an even number of items the mean of the two middle numbers is ...
juraj-google-style
def get_permissions(obj_name, principal=None, obj_type='file'): obj_dacl = dacl(obj_name, obj_type) if (principal is None): return obj_dacl.list_aces() return obj_dacl.get_ace(principal)
Get the permissions for the passed object Args: obj_name (str): The name of or path to the object. principal (Optional[str]): The name of the user or group for which to get permissions. Can also pass a SID. If None, all ACEs defined on the object will be returned. Default is None obj_type (Optional[str]): The type ...
codesearchnet
def _shard_counts(layout: layout_lib.Layout, batch_dim: Optional[str]=None) -> List[int]: shard_counts = [] for spec in layout.sharding_specs: if spec in (batch_dim, layout_lib.UNSHARDED): shard_counts.append(1) else: shard_counts.append(layout.mesh.dim_size(spec)) re...
Computes a list of the number of shards in each dimension of the layout. The shard counts are used to slice each dataset element. The batch dimension's count is overridden to 1 since we only consider how many shards to make locally (within each local replica). Sharding across clients is handled by either tf.data.Datas...
github-repos
def sequence_accuracy(labels, outputs): all_correct = tf.reduce_all( tf.logical_or(tf.equal(labels, outputs), tf.equal(labels, 0)), axis=-1 ) return tf.metrics.mean(all_correct)
Compute the sequence-level accuracy. A sequence is only considered correct if all of its entries were predicted correctly. Args: labels: ground-truth labels, shape=(batch, packed_seq_length) outputs: predicted tokens, shape=(batch, seq_length) Returns: Two ops, one for getting the current average accuracy and another...
juraj-google-style
def locked_get(self): query = {self.key_name: self.key_value} entities = self.model_class.objects.filter(**query) if (len(entities) > 0): credential = getattr(entities[0], self.property_name) if (getattr(credential, 'set_store', None) is not None): credential.set_store(self) ...
Retrieve stored credential from the Django ORM. Returns: oauth2client.Credentials retrieved from the Django ORM, associated with the ``model``, ``key_value``->``key_name`` pair used to query for the model, and ``property_name`` identifying the ``CredentialsProperty`` field, all of which are defined in the constructor ...
codesearchnet
def get_timestamp(self, url, xpath=None): if not path.exists(self.db_path): return None if self._query(url, xpath).count() > 0: return self._query(url, xpath).one().queried_on
Get time stamp of cached query result. If DB has not yet been initialized or url/xpath has not been queried yet, return None. Args: url (str): If given, clear specific item only. Otherwise remove the DB file. xpath (str): xpath to search (may be ``None``) Returns: datetime.datetime: cached response timestamp, None i...
juraj-google-style
def flip_channel_order(image: np.ndarray, data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: input_data_format = infer_channel_dimension_format(image) if input_data_format is None else input_data_format if input_data_format == ChannelDime...
Flips the channel order of the image. If the image is in RGB format, it will be converted to BGR and vice versa. Args: image (`np.ndarray`): The image to flip. data_format (`ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_chan...
github-repos
def expect_false(condition, msg, extras=None): try: asserts.assert_false(condition, msg, extras) except signals.TestSignal as e: logging.exception('Expected a `False` value, got `True`.') recorder.add_error(e)
Expects an expression evaluates to False. If the expectation is not met, the test is marked as fail after its execution finishes. Args: expr: The expression that is evaluated. msg: A string explaining the details in case of failure. extras: An optional field for extra information to be included in test result.
codesearchnet
def UserAgentFragment(self): if self.operating_system == OperatingSystem.LINUX: return '({name} {version})'.format(name=self.operating_system.name, version=platform.release()) elif self.operating_system == OperatingSystem.WINDOWS: return '({name} NT {version})'.format(name=self.operating_system....
Generates the fragment of the User-Agent that represents the OS. Examples: (Linux 3.2.5-gg1236) (Windows NT 6.1.7601) (Macintosh; PPC Mac OS X 12.4.0) (Macintosh; Intel Mac OS X 12.4.0) Returns: str, The fragment of the User-Agent string.
github-repos