code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __init__(self, report_job_id): super(AdManagerReportError, self).__init__( 'Ad Manager report job failed. The ID of the failed report is: %s' % report_job_id) self.report_job_id = report_job_id
Initializes a AdManagerReportError. Args: report_job_id: The ID of the report job which failed.
juraj-google-style
def summarize_dist_params(dist, name, name_scope='dist_params'): with tf.compat.v1.name_scope(name_scope): tf.compat.v2.summary.histogram(name='{}/{}'.format(name, 'mean'), data=dist.mean(), step=tf.compat.v1.train.get_or_create_global_step()) tf.compat.v2.summary.histogram(name='{}/{}'.format(name, 'stddev'), data=dist.stddev(), step=tf.compat.v1.train.get_or_create_global_step())
Summarize the parameters of a distribution. Args: dist: A Distribution object with mean and standard deviation parameters. name: The name of the distribution. name_scope: The name scope of this summary.
codesearchnet
def LateBind(self, target=None): if not issubclass(target, RDFProtoStruct): raise TypeError("Field %s expects a protobuf, but target is %s" % (self, target)) self.late_bound = False self.type = target self.owner.AddDescriptor(self)
Late binding callback. This method is called on this field descriptor when the target RDFValue class is finally defined. It gives the field descriptor an opportunity to initialize after the point of definition. Args: target: The target nested class. Raises: TypeError: If the target class is not of the expected type.
juraj-google-style
def _original_path(self, path): def components_to_path(): if len(path_components) > len(normalized_components): normalized_components.extend( path_components[len(normalized_components):]) sep = self._path_separator(path) normalized_path = sep.join(normalized_components) if path.startswith(sep) and not normalized_path.startswith(sep): normalized_path = sep + normalized_path return normalized_path if self.is_case_sensitive or not path: return path path_components = self._path_components(path) normalized_components = [] current_dir = self.root for component in path_components: if not isinstance(current_dir, FakeDirectory): return components_to_path() dir_name, current_dir = self._directory_content( current_dir, component) if current_dir is None or ( isinstance(current_dir, FakeDirectory) and current_dir._byte_contents is None and current_dir.st_size == 0): return components_to_path() normalized_components.append(dir_name) return components_to_path()
Return a normalized case version of the given path for case-insensitive file systems. For case-sensitive file systems, return path unchanged. Args: path: the file path to be transformed Returns: A version of path matching the case of existing path elements.
juraj-google-style
def _read_marcxml(xml): marc_xml = _read_content_or_path(xml) marc_xml = _oai_to_xml(marc_xml) marc_xml = _add_namespace(marc_xml) file_obj = StringIO.StringIO(marc_xml) return ET.parse(file_obj)
Read MARC XML or OAI file, convert, add namespace and return XML in required format with all necessities. Args: xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. Returns: obj: Required XML parsed with ``lxml.etree``.
juraj-google-style
def sg_max(tensor, opt): return tf.reduce_max(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name)
r"""Computes the maximum of elements across axis of a tensor. See `tf.reduce_max()` in tensorflow. Args: tensor: A `Tensor` (automatically given by chain). opt: axis : A tuple/list of integers or an integer. The axis to reduce. keep_dims: If true, retains reduced dimensions with length 1. name: If provided, replace current tensor's name. Returns: A `Tensor`.
codesearchnet
def get_permissions(self, namespace, explicit=False): if not isinstance(namespace, Namespace): namespace = Namespace(namespace) keys = namespace.keys p, _ = self._check(keys, self.index, explicit=explicit) return p
Returns the permissions level for the specified namespace Arguments: namespace -- permissioning namespace (str) explicit -- require explicitly set permissions to the provided namespace Returns: int -- permissioning flags
juraj-google-style
def entry_point(__func: Callable) -> Callable: if (__func.__module__ == '__main__'): import sys sys.exit(__func()) else: return __func
Execute function when module is run directly. Note: This allows fall through for importing modules that use it. Args: __func: Function to run
codesearchnet
def _format_value(value): literal = repr(value) try: if parse_value(literal) == value: return literal except SyntaxError: pass return None
Returns `value` in a format parseable by `parse_value`, or `None`. Simply put, This function ensures that when it returns a string value, the following will hold: parse_value(_format_value(value)) == value Args: value: The value to format. Returns: A string representation of `value` when `value` is literally representable, or `None`.
juraj-google-style
def json(self, ondemand=False): self._request_entity = 'indicator' self._request_uri = '{}/{}'.format(self._api_uri, 'json') self._stream = True if ondemand: self._request.add_payload('runNow', True)
Update request URI to return JSON data. For onDemand bulk generation to work it must first be enabled in the ThreatConnect platform under System settings. Args: ondemand (boolean): Enable on demand bulk generation.
juraj-google-style
def get(self, accountID, **kwargs): request = Request('GET', '/v3/accounts/{accountID}/pricing') request.set_path_param('accountID', accountID) request.set_param('instruments', kwargs.get('instruments')) request.set_param('since', kwargs.get('since')) request.set_param('includeUnitsAvailable', kwargs.get('includeUnitsAvailable')) request.set_param('includeHomeConversions', kwargs.get('includeHomeConversions')) response = self.ctx.request(request) if (response.content_type is None): return response if (not response.content_type.startswith('application/json')): return response jbody = json.loads(response.raw_body) parsed_body = {} if (str(response.status) == '200'): if (jbody.get('prices') is not None): parsed_body['prices'] = [self.ctx.pricing.ClientPrice.from_dict(d, self.ctx) for d in jbody.get('prices')] if (jbody.get('homeConversions') is not None): parsed_body['homeConversions'] = [self.ctx.pricing.HomeConversions.from_dict(d, self.ctx) for d in jbody.get('homeConversions')] if (jbody.get('time') is not None): parsed_body['time'] = jbody.get('time') elif (str(response.status) == '400'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') elif (str(response.status) == '401'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') elif (str(response.status) == '404'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') elif (str(response.status) == '405'): if (jbody.get('errorCode') is not None): parsed_body['errorCode'] = jbody.get('errorCode') if (jbody.get('errorMessage') is not None): parsed_body['errorMessage'] = jbody.get('errorMessage') else: parsed_body = jbody response.body = parsed_body return response
Get pricing information for a specified list of Instruments within an Account. Args: accountID: Account Identifier instruments: List of Instruments to get pricing for. since: Date/Time filter to apply to the response. Only prices and home conversions (if requested) with a time later than this filter (i.e. the price has changed after the since time) will be provided, and are filtered independently. includeUnitsAvailable: Flag that enables the inclusion of the unitsAvailable field in the returned Price objects. includeHomeConversions: Flag that enables the inclusion of the homeConversions field in the returned response. An entry will be returned for each currency in the set of all base and quote currencies present in the requested instruments list. Returns: v20.response.Response containing the results from submitting the request
codesearchnet
def recursepath(path, reverse=False): if (path in '/'): return ['/'] path = (abspath(normpath(path)) + '/') paths = ['/'] find = path.find append = paths.append pos = 1 len_path = len(path) while (pos < len_path): pos = find('/', pos) append(path[:pos]) pos += 1 if reverse: return paths[::(- 1)] return paths
Get intermediate paths from the root to the given path. Arguments: path (str): A PyFilesystem path reverse (bool): Reverses the order of the paths (default `False`). Returns: list: A list of paths. Example: >>> recursepath('a/b/c') ['/', '/a', '/a/b', '/a/b/c']
codesearchnet
def dispatch(op, args, kwargs): for dispatcher in getattr(op, FALLBACK_DISPATCH_ATTR): result = dispatcher.handle(args, kwargs) if result is not OpDispatcher.NOT_SUPPORTED: return result for dispatcher in _GLOBAL_DISPATCHERS: result = dispatcher.handle(op, args, kwargs) if result is not OpDispatcher.NOT_SUPPORTED: return result return OpDispatcher.NOT_SUPPORTED
Returns the result from the first successful dispatcher for a given op. Calls the `handle` method of each `OpDispatcher` that has been registered to handle `op`, and returns the value from the first successful handler. Args: op: Python function: the operation to dispatch for. args: The arguments to the operation. kwargs: They keyword arguments to the operation. Returns: The result of the operation, or `NOT_SUPPORTED` if no registered dispatcher can handle the given arguments.
github-repos
def DeregisterHelper(cls, analyzer_helper): if (analyzer_helper.type_indicator not in cls._analyzer_helpers): raise KeyError('Analyzer helper object not set for type indicator: {0:s}.'.format(analyzer_helper.type_indicator)) analyzer_helper = cls._analyzer_helpers[analyzer_helper.type_indicator] cls._FlushCache(analyzer_helper.format_categories) del cls._analyzer_helpers[analyzer_helper.type_indicator]
Deregisters a format analyzer helper. Args: analyzer_helper (AnalyzerHelper): analyzer helper. Raises: KeyError: if analyzer helper object is not set for the corresponding type indicator.
codesearchnet
def Set(self, name, value): if (self.writeback is None): logging.warning('Attempting to modify a read only config object for %s.', name) if (name in self.constants): raise ConstModificationError(('Attempting to modify constant value %s' % name)) writeback_data = self.writeback_data if (value is not None): if isinstance(value, Text): value = self.EscapeString(value) writeback_data[name] = value self.FlushCache()
Update the configuration option with a new value. Note that this forces the value to be set for all contexts. The value is written to the writeback location if Save() is later called. Args: name: The name of the parameter to set. value: The value to set it to. The value will be validated against the option's type descriptor. Raises: ConstModificationError: When attempting to change a constant option.
codesearchnet
def set_log_prefix(self, prefix: str) -> None: self.debug('Setting the log prefix to "%s".', prefix) self.extra[PrefixLoggerAdapter.EXTRA_KEY_LOG_PREFIX] = prefix
Sets the log prefix to the given string. Args: prefix: The new log prefix.
github-repos
def move_to_destination(source, destination, job_name, sagemaker_session): parsed_uri = urlparse(destination) if parsed_uri.scheme == 'file': recursive_copy(source, parsed_uri.path) final_uri = destination elif parsed_uri.scheme == 's3': bucket = parsed_uri.netloc path = "%s%s" % (parsed_uri.path.lstrip('/'), job_name) final_uri = 's3: sagemaker_session.upload_data(source, bucket, path) else: raise ValueError('Invalid destination URI, must be s3: shutil.rmtree(source) return final_uri
move source to destination. Can handle uploading to S3 Args: source (str): root directory to move destination (str): file:// or s3:// URI that source will be moved to. job_name (str): SageMaker job name. sagemaker_session (sagemaker.Session): a sagemaker_session to interact with S3 if needed Returns: (str): destination URI
juraj-google-style
def _has_connection(hostname, port): try: host = socket.gethostbyname(hostname) socket.create_connection((host, port), 2) return True except Exception: return False
Checks if internet connection exists to host via specified port. If any exception is raised while trying to open a socket this will return false. Args: hostname (str): Hostname to connect to. port (int): Port to connect to Returns: bool: Has connection or not
juraj-google-style
def output(self, original_filename): for contract in self.contracts: for function in contract.functions + contract.modifiers: filename = "{}-{}-{}.dot".format(original_filename, contract.name, function.full_name) self.info('Export {}'.format(filename)) function.slithir_cfg_to_dot(filename)
_filename is not used Args: _filename(string)
juraj-google-style
def save_yaml(dictionary, path, pretty=False, sortkeys=False): if sortkeys: dictionary = dict(dictionary) with open(path, 'w') as f: if pretty: pyaml.dump(dictionary, f) else: yaml.dump(dictionary, f, default_flow_style=None, Dumper=yamlloader.ordereddict.CDumper)
Save dictionary to YAML file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to YAML file pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None
codesearchnet
def relative_probability( self, l1, l2, c1, c2 ): if self.site_energies: site_delta_E = self.site_energies[ l2 ] - self.site_energies[ l1 ] else: site_delta_E = 0.0 if self.nn_energy: delta_nn = c2 - c1 - 1 site_delta_E += delta_nn * self.nn_energy return metropolis( site_delta_E )
The relative probability for a jump between two sites with specific site types and coordination numbers. Args: l1 (Str): Site label for the initial site. l2 (Str): Site label for the final site. c1 (Int): Coordination number for the initial site. c2 (Int): Coordination number for the final site. Returns: (Float): The relative probability of this jump occurring.
juraj-google-style
def from_numbers(cls, dna_values: List[Union[int, float, str]], dna_spec: DNASpec) -> 'DNA': context = dict(index=0) def _next_decision(): if context['index'] >= len(dna_values): raise ValueError(f'The input {dna_values!r} is too short for {dna_spec!r}.') decision = dna_values[context['index']] context['index'] += 1 return decision def _bind_decisions(dna_spec): value = None children = None if dna_spec.is_space: children = [_bind_decisions(elem) for elem in dna_spec.elements] elif dna_spec.is_categorical: if dna_spec.num_choices == 1: value = _next_decision() if value < 0 or value >= len(dna_spec.candidates): raise ValueError(f"Candidate index out of range at choice '{dna_spec.name or dna_spec.id}'. Index={value}, Number of candidates={len(dna_spec.candidates)}.") children = [_bind_decisions(dna_spec.candidates[value])] else: children = [_bind_decisions(spec) for spec in dna_spec.choice_specs] else: value = _next_decision() return DNA(value, children, spec=dna_spec) dna = _bind_decisions(dna_spec) if context['index'] != len(dna_values): end_pos = context['index'] raise ValueError(f'The input {dna_values!r} is too long for {dna_spec!r}. Remaining: {dna_values[end_pos:]!r}.') return dna
Create a DNA from a flattened list of dna values. Args: dna_values: A list of DNA values. dna_spec: DNASpec that interprets the dna values. Returns: A DNA object.
github-repos
def set_control_scheme(self, agent_name, control_scheme): if agent_name not in self.agents: print("No such agent %s" % agent_name) else: self.agents[agent_name].set_control_scheme(control_scheme)
Set the control scheme for a specific agent. Args: agent_name (str): The name of the agent to set the control scheme for. control_scheme (int): A control scheme value (see :obj:`holodeck.agents.ControlSchemes`)
juraj-google-style
def _get_flags_defined_by_module(self, module): if not isinstance(module, str): module = module.__name__ return list(self.flags_by_module_dict().get(module, []))
Returns the list of flags defined by a module. Args: module: module|str, the module to get flags from. Returns: [Flag], a new list of Flag instances. Caller may update this list as desired: none of those changes will affect the internals of this FlagValue instance.
juraj-google-style
def create_alias(target_path, alias_path): if platform.system() == 'Windows' and (not alias_path.endswith('.lnk')): alias_path += '.lnk' if os.path.lexists(alias_path): os.remove(alias_path) if platform.system() == 'Windows': from win32com import client shell = client.Dispatch('WScript.Shell') shortcut = shell.CreateShortCut(alias_path) shortcut.Targetpath = target_path shortcut.save() else: os.symlink(target_path, alias_path)
Creates an alias at 'alias_path' pointing to the file 'target_path'. On Unix, this is implemented via symlink. On Windows, this is done by creating a Windows shortcut file. Args: target_path: Destination path that the alias should point to. alias_path: Path at which to create the new alias.
github-repos
def point_probability(self, threshold): point_prob = np.zeros(self.data.shape[1:]) for t in range(self.data.shape[1]): point_prob[t] = np.where((self.data[(:, t)] >= threshold), 1.0, 0.0).mean(axis=0) return EnsembleConsensus(point_prob, 'point_probability', self.ensemble_name, self.run_date, (self.variable + '_{0:0.2f}_{1}'.format(threshold, self.units.replace(' ', '_'))), self.start_date, self.end_date, '')
Determine the probability of exceeding a threshold at a grid point based on the ensemble forecasts at that point. Args: threshold: If >= threshold assigns a 1 to member, otherwise 0. Returns: EnsembleConsensus
codesearchnet
def get_forced_variation(self, experiment, user_id): forced_variations = experiment.forcedVariations if (forced_variations and (user_id in forced_variations)): variation_key = forced_variations.get(user_id) variation = self.config.get_variation_from_key(experiment.key, variation_key) if variation: self.logger.info(('User "%s" is forced in variation "%s".' % (user_id, variation_key))) return variation return None
Determine if a user is forced into a variation for the given experiment and return that variation. Args: experiment: Object representing the experiment for which user is to be bucketed. user_id: ID for the user. Returns: Variation in which the user with ID user_id is forced into. None if no variation.
codesearchnet
def run(self, fn, args=(), kwargs=None, options=None): return super(CentralStorageStrategy, self).run(fn, args, kwargs, options)
Run `fn` on each replica, with the given arguments. In `CentralStorageStrategy`, `fn` is called on each of the compute replicas, with the provided "per replica" arguments specific to that device. Args: fn: The function to run. The output must be a `tf.nest` of `Tensor`s. args: (Optional) Positional arguments to `fn`. kwargs: (Optional) Keyword arguments to `fn`. options: (Optional) An instance of `tf.distribute.RunOptions` specifying the options to run `fn`. Returns: Return value from running `fn`.
github-repos
def get_if_not_set(self, addresses): with self._lock: results = [] for add in addresses: results.append(self._get_if_not_set(add)) return results
Returns the value at an address if it was an input to the txn but never set. It returns None if that address was never set in the merkle database, or if the address is not within the context. Args: addresses (list of str): The full 70 character addresses. Returns: (list): bytes at that address but not set within the context
juraj-google-style
def _OpenFileObject(self, path_spec): if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') resolver.Resolver.key_chain.ExtractCredentialsFromPathSpec(path_spec) file_object = resolver.Resolver.OpenFileObject( path_spec.parent, resolver_context=self._resolver_context) bde_volume = pybde.volume() bde.BDEVolumeOpen( bde_volume, path_spec, file_object, resolver.Resolver.key_chain) return bde_volume
Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. Returns: pyvde.volume: BDE volume file-like object. Raises: PathSpecError: if the path specification is incorrect.
juraj-google-style
def generate_output_newline(self, line='0', colorize=True): return generate_output( line=line, is_parent=True, colorize=colorize )
The function for generating a CLI output new line. Args: line (:obj:`str`): The line number (0-4). Determines indentation. Defaults to '0'. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. Returns: str: The generated output.
juraj-google-style
def _fix(node): pri_cfg = cfg.CFG.build_cfg(node.body[0]) defined = cfg.Defined() defined.visit(pri_cfg.entry) reaching = cfg.ReachingDefinitions() reaching.visit(pri_cfg.entry) cfg.forward(node.body[1], cfg.Defined()) cfg.forward(node.body[1], cfg.ReachingDefinitions()) fixes.CleanStack().visit(node) fixes.FixStack().visit(node.body[0]) fixes.CleanGrad().visit(node.body[1]) fixes.FixGrad().visit(node.body[1]) return (node, defined.exit, reaching.exit)
Fix the naive construction of the adjont. See `fixes.py` for details. This function also returns the result of reaching definitions analysis so that `split` mode can use this to carry over the state from primal to adjoint. Args: node: A module with the primal and adjoint function definitions as returned by `reverse_ad`. Returns: node: A module with the primal and adjoint function with additional variable definitions and such added so that pushes onto the stack and gradient accumulations are all valid. defined: The variables defined at the end of the primal. reaching: The variable definitions that reach the end of the primal.
codesearchnet
def dump(self): reading = self.reading if (reading is not None): reading = reading.asdict() return {u'selector': str(self.selector), u'reading': reading}
Serialize the state of this stream walker. Returns: dict: The serialized state.
codesearchnet
def can_fetch(self, request: Request, file=None) -> bool: try: return self.can_fetch_pool(request) except NotInPoolError: pass yield from self.fetch_robots_txt(request, file=file) return self.can_fetch_pool(request)
Return whether the request can fetched. Args: request: Request. file: A file object to where the robots.txt contents are written. Coroutine.
juraj-google-style
def __init__(self, code, error, content, message=None): super(AdWordsReportError, self).__init__( message if message else ('AdWords report download failed with HTTP ' 'status code: %s' % code)) self.code = code self.error = error self.content = content
Initializes an AdWordsReportError. Args: code: The HTTP status code number that was returned. error: The urllib2.HTTPError (Python 2) or urllib.error.HTTPError (Python 3) describing the failure. content: The HTTP response body as a string. [optional] message: A user-friendly error message string. If one is not provided, a default message will be used.
juraj-google-style
def wait_for_notification(self, notification_class=BaseNotification): if notification_class: if (notification_class is BaseNotification): message = 'No notification was shown.' else: message = '{0} was not shown.'.format(notification_class.__name__) self.wait.until((lambda _: isinstance(self.notification, notification_class)), message=message) return self.notification else: self.wait.until((lambda _: (self.notification is None)), message='Unexpected notification shown.')
Wait for the specified notification to be displayed. Args: notification_class (:py:class:`BaseNotification`, optional): The notification class to wait for. If `None` is specified it will wait for any notification to be closed. Defaults to `BaseNotification`. Returns: :py:class:`BaseNotification`: Firefox notification.
codesearchnet
def rescale_data(data, data_min, data_max, out_min=0.0, out_max=100.0): return ((((out_max - out_min) / (data_max - data_min)) * (data - data_min)) + out_min)
Rescale your input data so that is ranges over integer values, which will perform better in the watershed. Args: data: 2D or 3D ndarray being rescaled data_min: minimum value of input data for scaling purposes data_max: maximum value of input data for scaling purposes out_min: minimum value of scaled data out_max: maximum value of scaled data Returns: Linearly scaled ndarray
codesearchnet
def run(run_type, module, config): print(' -----------------------------------------------------------------') print(((' Beginning ' + run_type.lower()) + ' test suite ')) print(' -----------------------------------------------------------------') print('') summary = run_quiet(module, config) print(' -----------------------------------------------------------------') print(((' ' + run_type.capitalize()) + ' test suite complete ')) print(' -----------------------------------------------------------------') print('') return summary
Collects the analyses cases to be run and launches processes for each of them. Args: run_type: A string representation of the run type (eg. verification) module: The module corresponding to the run. Must have a run_suite function config: The configuration for the module
codesearchnet
def _feed_to_dict(self, parse=True): if not self.raw_feed: return None result = [] header = self.raw_feed[0] for line in self.raw_feed[1:]: if line and ''.join(line): i = 0 item = {} for index, column_header in enumerate(header): if index < len(line): item[column_header] = self._parse_value(line[index].strip()) if parse else line[index].strip() else: item[column_header] = '' result.append(item) else: break return result
Turns a raw feed from Google Sheets into a list of dictionaries. Args: raw_feed: List of list of strings representing the feed from Google Sheets. Returns: List of dictionaries with the data from the feed
github-repos
def _add_to_schema(self, field_name, schema): super(ForeignKeyField, self)._add_to_schema(field_name, schema) if self.get_field_value('convert_fks', default=True): self.attribute = field_name.replace('_id', '')
Set the ``attribute`` attr to the field in question so this always gets deserialzed into the field name without ``_id``. Args: field_name (str): The name of the field (the attribute name being set in the schema). schema (marshmallow.Schema): The actual parent schema this field belongs to.
juraj-google-style
def write_entry_to_file(file_descriptor, entry_comment, entry_key): escaped_key = re.sub('([^\\\\])"', '\\1\\"', entry_key) file_descriptor.write((u'\n' % entry_comment)) file_descriptor.write((u'"%s" = "%s";\n' % (escaped_key, escaped_key)))
Writes a localization entry to the file Args: file_descriptor (file, instance): The file to write the entry to. entry_comment (str): The entry's comment. entry_key (str): The entry's key.
codesearchnet
def _validate_config(config): required_keys = [KEY_ADDRESS, KEY_MODEL, KEY_PORT, KEY_PATHS] for key in required_keys: if key not in config: raise Error('Required key %s missing from config %s', (key, config))
Verifies that a config dict for an attenuator device is valid. Args: config: A dict that is the configuration for an attenuator device. Raises: attenuator.Error: A config is not valid.
github-repos
def checkCoordinatedReadGroups(self, results, num_consumers): groups = [results[start:start + num_consumers] for start in range(0, len(results), num_consumers)] incorrect_groups = [] for group in groups: for offset in range(1, len(group)): if group[0] + offset != group[offset]: incorrect_groups.append(group) break self.assertEmpty(incorrect_groups, 'Incorrect groups: {}.\nAll groups: {}'.format(incorrect_groups, groups))
Validates results from a `make_coordinted_read_dataset` dataset. Each group of `num_consumers` results should be consecutive, indicating that they were produced by the same worker. Args: results: The elements produced by the dataset. num_consumers: The number of consumers.
github-repos
def task_ids(self): if not self.id: raise WorkflowError('Workflow is not running. Cannot get task IDs.') if self.batch_values: raise NotImplementedError("Query Each Workflow Id within the Batch Workflow for task IDs.") wf = self.workflow.get(self.id) return [task['id'] for task in wf['tasks']]
Get the task IDs of a running workflow Args: None Returns: List of task IDs
juraj-google-style
def module_help(self, module): helplist = [] self._render_our_module_key_flags(module, helplist) return '\n'.join(helplist)
Describes the key flags of a module. Args: module: module|str, the module to describe the key flags for. Returns: str, describing the key flags of a module.
juraj-google-style
def _get_nan_block_id(partition_class, n_row=1, n_col=1, transpose=False): global _NAN_BLOCKS if transpose: n_row, n_col = n_col, n_row shape = (n_row, n_col) if shape not in _NAN_BLOCKS: arr = np.tile(np.array(np.NaN), shape) _NAN_BLOCKS[shape] = partition_class.put(pandas.DataFrame(data=arr)) return _NAN_BLOCKS[shape]
A memory efficient way to get a block of NaNs. Args: partition_class (BaseFramePartition): The class to use to put the object in the remote format. n_row(int): The number of rows. n_col(int): The number of columns. transpose(bool): If true, swap rows and columns. Returns: ObjectID of the NaN block.
juraj-google-style
def forward(self, input): sl,bs = input.size() if bs!=self.bs: self.bs=bs self.reset() with set_grad_enabled(self.training): emb = self.encoder_with_dropout(input, dropout=self.dropoute if self.training else 0) emb = self.dropouti(emb) raw_output = emb new_hidden,raw_outputs,outputs = [],[],[] for l, (rnn,drop) in enumerate(zip(self.rnns, self.dropouths)): current_input = raw_output with warnings.catch_warnings(): warnings.simplefilter("ignore") raw_output, new_h = rnn(raw_output, self.hidden[l]) new_hidden.append(new_h) raw_outputs.append(raw_output) if l != self.n_layers - 1: raw_output = drop(raw_output) outputs.append(raw_output) self.hidden = repackage_var(new_hidden) return raw_outputs, outputs
Invoked during the forward propagation of the RNN_Encoder module. Args: input (Tensor): input of shape (sentence length x batch_size) Returns: raw_outputs (tuple(list (Tensor), list(Tensor)): list of tensors evaluated from each RNN layer without using dropouth, list of tensors evaluated from each RNN layer using dropouth,
juraj-google-style
def _get_value(self, scalar_data_blob, dtype_enum): tensorflow_dtype = tf.DType(dtype_enum) buf = np.frombuffer(scalar_data_blob, dtype=tensorflow_dtype.as_numpy_dtype) return np.asscalar(buf)
Obtains value for scalar event given blob and dtype enum. Args: scalar_data_blob: The blob obtained from the database. dtype_enum: The enum representing the dtype. Returns: The scalar value.
juraj-google-style
def cache_json(filename): def cache_decorator(cacheable_function): @wraps(cacheable_function) def cache_wrapper(*args, **kwargs): path = CACHE_DIRECTORY + filename check_create_folder(path) if os.path.exists(path): with open(path) as infile: return json.load(infile) else: function_output = cacheable_function(*args, **kwargs) with open(path, 'w') as outfile: json.dump(function_output, outfile) return function_output return cache_wrapper return cache_decorator
Caches the JSON-serializable output of the function to a given file Args: filename (str) The filename (sans directory) to store the output Returns: decorator, applicable to a function that produces JSON-serializable output
juraj-google-style
def imread(img_or_path, flag='color'): if isinstance(img_or_path, np.ndarray): return img_or_path elif is_str(img_or_path): flag = imread_flags[flag] if is_str(flag) else flag check_file_exist(img_or_path, 'img file does not exist: {}'.format(img_or_path)) return cv2.imread(img_or_path, flag) else: raise TypeError('"img" must be a numpy array or a filename')
Read an image. Args: img_or_path (ndarray or str): Either a numpy array or image path. If it is a numpy array (loaded image), then it will be returned as is. flag (str): Flags specifying the color type of a loaded image, candidates are `color`, `grayscale` and `unchanged`. Returns: ndarray: Loaded image array.
juraj-google-style
def get_unique_directives(ast): if not ast.directives: return dict() result = dict() for directive_obj in ast.directives: directive_name = directive_obj.name.value if directive_name in ALLOWED_DUPLICATED_DIRECTIVES: pass elif directive_name in result: raise GraphQLCompilationError(u'Directive was unexpectedly applied twice in the same ' u'location: {} {}'.format(directive_name, ast.directives)) else: result[directive_name] = directive_obj return result
Return a dict of directive name to directive object for the given AST node. Any directives that are allowed to exist more than once on any AST node are ignored. For any directives that can only exist up to once, we verify that they are not duplicated raising GraphQLCompilationError in case we find them more than once on the AST node. Args: ast: GraphQL AST node, obtained from the graphql library Returns: dict of string to directive object
juraj-google-style
def do_put(endpoint, body, access_token): headers = {"content-type": "application/json", "Authorization": 'Bearer ' + access_token} headers['User-Agent'] = get_user_agent() return requests.put(endpoint, data=body, headers=headers)
Do an HTTP PUT request and return JSON. Args: endpoint (str): Azure Resource Manager management endpoint. body (str): JSON body of information to put. access_token (str): A valid Azure authentication token. Returns: HTTP response. JSON body.
juraj-google-style
def get_quats(self) -> torch.Tensor: if self._rot_mats is not None: return rot_to_quat(self._rot_mats) elif self._quats is not None: return self._quats else: raise ValueError('Both rotations are None')
Returns the underlying rotation as a quaternion tensor. Depending on whether the Rotation was initialized with a quaternion, this function may call torch.linalg.eigh. Returns: The rotation as a quaternion tensor.
github-repos
def __call__(self, context): context._counters.increment(self.counter_name, self.delta)
Execute operation. Args: context: mapreduce context as context.Context.
juraj-google-style
def instance_default(self, obj): return self.property.themed_default(obj.__class__, self.name, obj.themed_values())
Get the default value that will be used for a specific instance. Args: obj (HasProps) : The instance to get the default value for. Returns: object
codesearchnet
def create(self, subject, displayName, issuerToken, expiration, secret): check_type(subject, basestring) check_type(displayName, basestring) check_type(issuerToken, basestring) check_type(expiration, basestring) check_type(secret, basestring) payload = {'sub': subject, 'name': displayName, 'iss': issuerToken, 'exp': expiration} key = base64.b64decode(secret) jwt_token = jwt.encode(payload, key, algorithm='HS256') url = (((self._session.base_url + API_ENDPOINT) + '/') + 'login') headers = {'Authorization': ('Bearer ' + jwt_token.decode('utf-8'))} response = requests.post(url, headers=headers) check_response_code(response, EXPECTED_RESPONSE_CODE['GET']) return self._object_factory(OBJECT_TYPE, response.json())
Create a new guest issuer using the provided issuer token. This function returns a guest issuer with an api access token. Args: subject(basestring): Unique and public identifier displayName(basestring): Display Name of the guest user issuerToken(basestring): Issuer token from developer hub expiration(basestring): Expiration time as a unix timestamp secret(basestring): The secret used to sign your guest issuers Returns: GuestIssuerToken: A Guest Issuer with a valid access token. Raises: TypeError: If the parameter types are incorrect ApiError: If the webex teams cloud returns an error.
codesearchnet
def register_read_multiple(self, register_indices): num_regs = len(register_indices) buf = (ctypes.c_uint32 * num_regs)(*register_indices) data = (ctypes.c_uint32 * num_regs)(0) statuses = (ctypes.c_uint8 * num_regs)(0) res = self._dll.JLINKARM_ReadRegs(buf, data, statuses, num_regs) if res < 0: raise errors.JLinkException(res) return list(data)
Retrieves the values from the registers specified. Args: self (JLink): the ``JLink`` instance register_indices (list): list of registers to read Returns: A list of values corresponding one-to-one for each of the given register indices. The returned list of values are the values in order of which the indices were specified. Raises: JLinkException: if a given register is invalid or an error occurs.
juraj-google-style
def _VerifyGradient(self, pool_func, pool_grad_func, input_sizes, ksize, strides, padding, pool_grad_grad_func=None): ksize = [1] + ksize + [1] strides = [1] + strides + [1] total_size = np.prod(input_sizes) x = np.arange(1, total_size + 1, dtype=np.float32).reshape(input_sizes) with self.session() as sess: with ops.device('CPU'): inputs = array_ops.placeholder(dtypes.float32, shape=input_sizes) outputs = pool_func(inputs, ksize=ksize, strides=strides, padding=padding) output_vals = np.array(sess.run(outputs, {inputs: x})) output_gradient_vals = np.arange(1, output_vals.size + 1, dtype=np.float32) output_gradient_vals = output_gradient_vals.reshape(output_vals.shape) output_grad_grad_vals = np.arange(1, x.size + 1, dtype=np.float32) output_grad_grad_vals = output_grad_grad_vals.reshape(x.shape) with ops.device('CPU'): output_gradients = array_ops.placeholder(dtypes.float32, shape=output_vals.shape) expected_input_gradients = pool_grad_func(inputs, outputs, output_gradients, ksize=ksize, strides=strides, padding=padding) expected_input_gradient_vals = sess.run(expected_input_gradients, {inputs: x, output_gradients: output_gradient_vals}) output_grad_gradients = array_ops.placeholder(dtypes.float32, shape=expected_input_gradient_vals.shape) if pool_grad_grad_func is not None: expected_grad_gradients = pool_grad_grad_func(inputs, outputs, output_grad_gradients, ksize=ksize, strides=strides, padding=padding, data_format='NDHWC') expected_grad_gradients_vals = sess.run(expected_grad_gradients, {inputs: x, output_grad_gradients: output_grad_grad_vals}) with self.test_scope(): outputs = array_ops.placeholder(dtypes.float32, shape=output_vals.shape) actual_input_gradients = pool_grad_func(inputs, outputs, output_gradients, ksize=ksize, strides=strides, padding=padding) if pool_grad_grad_func is not None: actual_grad_gradients = pool_grad_grad_func(inputs, outputs, output_grad_gradients, ksize=ksize, strides=strides, padding=padding, data_format='NDHWC') actual = sess.run(actual_input_gradients, {inputs: x, outputs: output_vals, output_gradients: output_gradient_vals}) self.assertAllClose(expected_input_gradient_vals.flatten(), actual.flatten(), rtol=1e-05, atol=1e-06) self.assertShapeEqual(actual, inputs) if pool_grad_grad_func is not None: actual_grad_gradients_vals = sess.run(actual_grad_gradients, {inputs: x, outputs: output_vals, output_grad_gradients: output_grad_grad_vals}) self.assertAllClose(expected_grad_gradients_vals, actual_grad_gradients_vals, rtol=0.0001, atol=1e-06) self.assertShapeEqual(actual_grad_gradients_vals, outputs)
Verifies the output values of the pooling gradient function. Args: pool_func: Forward pooling function pool_grad_func: Pooling gradient function for pool_grad_func input_sizes: Input tensor dimensions. ksize: The kernel size dimensions strides: The stride dimensions padding: Padding type. pool_grad_grad_func: Second-order gradient function, if available.
github-repos
def render(self, time: float, frame_time: float): self.example.render(time, frame_time)
Renders the assigned example Args: time (float): Current time in seconds frame_time (float): Delta time from last frame in seconds
juraj-google-style
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): axis = self._get_axis_number(axis) if axis is not None else 0 if numeric_only is not None and not numeric_only: self._validate_dtypes(numeric_only=True) return self._reduce_dimension( self._query_compiler.median( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs ) )
Computes median across the DataFrame. Args: axis (int): The axis to take the median on. skipna (bool): True to skip NA values, false otherwise. Returns: The median of the DataFrame. (Pandas series)
juraj-google-style
def parse_row(schema, data): def parse_value(data_type, value): if value is not None: if value == 'null': value = None elif data_type == 'INTEGER': value = int(value) elif data_type == 'FLOAT': value = float(value) elif data_type == 'TIMESTAMP': value = datetime.datetime.utcfromtimestamp(float(value)) elif data_type == 'BOOLEAN': value = value == 'true' elif (type(value) != str): value = str(value) return value row = {} if data is None: return row for i, (field, schema_field) in enumerate(zip(data['f'], schema)): val = field['v'] name = schema_field['name'] data_type = schema_field['type'] repeated = True if 'mode' in schema_field and schema_field['mode'] == 'REPEATED' else False if repeated and val is None: row[name] = [] elif data_type == 'RECORD': sub_schema = schema_field['fields'] if repeated: row[name] = [Parser.parse_row(sub_schema, v['v']) for v in val] else: row[name] = Parser.parse_row(sub_schema, val) elif repeated: row[name] = [parse_value(data_type, v['v']) for v in val] else: row[name] = parse_value(data_type, val) return row
Parses a row from query results into an equivalent object. Args: schema: the array of fields defining the schema of the data. data: the JSON row from a query result. Returns: The parsed row object.
juraj-google-style
def PrintExtractionStatusHeader(self, processing_status): self._output_writer.Write( 'Source path\t\t: {0:s}\n'.format(self._source_path)) self._output_writer.Write( 'Source type\t\t: {0:s}\n'.format(self._source_type)) if self._artifact_filters: artifacts_string = ', '.join(self._artifact_filters) self._output_writer.Write('Artifact filters\t: {0:s}\n'.format( artifacts_string)) if self._filter_file: self._output_writer.Write('Filter file\t\t: {0:s}\n'.format( self._filter_file)) self._PrintProcessingTime(processing_status) self._PrintTasksStatus(processing_status) self._output_writer.Write('\n')
Prints the extraction status header. Args: processing_status (ProcessingStatus): processing status.
juraj-google-style
def populate_native_libraries(version): with open(BINARY_EXT_TEMPLATE, "r") as file_obj: template = file_obj.read() contents = template.format(revision=version) with open(BINARY_EXT_FILE, "w") as file_obj: file_obj.write(contents)
Populates ``binary-extension.rst`` with release-specific data. Args: version (str): The current version.
juraj-google-style
def _wait_for_and_process_task(self, task): function_descriptor = FunctionDescriptor.from_bytes_list(task.function_descriptor_list()) driver_id = task.driver_id() if (not task.actor_creation_id().is_nil()): assert self.actor_id.is_nil() self.actor_id = task.actor_creation_id() self.actor_creation_task_id = task.task_id() actor_class = self.function_actor_manager.load_actor_class(driver_id, function_descriptor) self.actors[self.actor_id] = actor_class.__new__(actor_class) self.actor_checkpoint_info[self.actor_id] = ActorCheckpointInfo(num_tasks_since_last_checkpoint=0, last_checkpoint_timestamp=int((1000 * time.time())), checkpoint_ids=[]) execution_info = self.function_actor_manager.get_execution_info(driver_id, function_descriptor) function_name = execution_info.function_name extra_data = {'name': function_name, 'task_id': task.task_id().hex()} if task.actor_id().is_nil(): if task.actor_creation_id().is_nil(): title = 'ray_worker:{}()'.format(function_name) next_title = 'ray_worker' else: actor = self.actors[task.actor_creation_id()] title = 'ray_{}:{}()'.format(actor.__class__.__name__, function_name) next_title = 'ray_{}'.format(actor.__class__.__name__) else: actor = self.actors[task.actor_id()] title = 'ray_{}:{}()'.format(actor.__class__.__name__, function_name) next_title = 'ray_{}'.format(actor.__class__.__name__) with profiling.profile('task', extra_data=extra_data): with _changeproctitle(title, next_title): self._process_task(task, execution_info) self.task_context.current_task_id = TaskID.nil() self.task_context.task_index = 0 self.task_context.put_index = 1 if self.actor_id.is_nil(): self.task_driver_id = DriverID.nil() ray_signal.reset() self.function_actor_manager.increase_task_counter(driver_id, function_descriptor) reached_max_executions = (self.function_actor_manager.get_task_counter(driver_id, function_descriptor) == execution_info.max_calls) if reached_max_executions: self.raylet_client.disconnect() sys.exit(0)
Wait for a task to be ready and process the task. Args: task: The task to execute.
codesearchnet
def split_arg_into_blocks(block_dims, block_dims_fn, arg, axis=-1): block_sizes = [dim.value for dim in block_dims] if any((d is None for d in block_sizes)): block_sizes = block_dims_fn() return array_ops.split(arg, block_sizes, axis=axis)
Split `x` into blocks matching `operators`'s `domain_dimension`. Specifically, if we have a blockwise lower-triangular matrix, with block sizes along the diagonal `[M_j, M_j] j = 0,1,2..J`, this method splits `arg` on `axis` into `J` tensors, whose shape at `axis` is `M_j`. Args: block_dims: Iterable of `TensorShapes`. block_dims_fn: Callable returning an iterable of `Tensor`s. arg: `Tensor`. `arg` is split into `J` tensors. axis: Python `Integer` representing the axis to split `arg` on. Returns: A list of `Tensor`s.
github-repos
def __get__(self, instance, owner): if not instance and owner: return self value = self._cache.get(instance) if self._cache.get(instance) is not None else self.default if hasattr(instance, 'prepare_' + self.alias): return getattr(instance, 'prepare_' + self.alias)(value) return value
Python descriptor protocol `__get__` magic method. Args: instance(object): The instance with descriptor attribute. owner(object): Instance class. Returns: The cached value for the class instance or None.
juraj-google-style
def annotate(self, records, **kwargs): self.annotator_params.update(**kwargs) chunk_size = self.annotator_params.get('chunk_size', self.CHUNK_SIZE) chunk = [] for (i, record) in enumerate(records): chunk.append(record) if (((i + 1) % chunk_size) == 0): for r in self._execute(chunk): (yield r) chunk = [] if chunk: for r in self._execute(chunk): (yield r) chunk = []
Annotate a set of records with stored fields. Args: records: A list or iterator (can be a Query object) chunk_size: The number of records to annotate at once (max 500). Returns: A generator that yields one annotated record at a time.
codesearchnet
def get_jwt_dict(jwt_bu64): jwt_tup = get_jwt_tup(jwt_bu64) try: jwt_dict = json.loads(jwt_tup[0].decode('utf-8')) jwt_dict.update(json.loads(jwt_tup[1].decode('utf-8'))) jwt_dict['_sig_sha1'] = hashlib.sha1(jwt_tup[2]).hexdigest() except TypeError as e: raise JwtException('Decode failed. error="{}"'.format(e)) return jwt_dict
Parse Base64 encoded JWT and return as a dict. - JWTs contain a set of values serialized to a JSON dict. This decodes the JWT and returns it as a dict containing Unicode strings. - In addition, a SHA1 hash is added to the dict for convenience. Args: jwt_bu64: bytes JWT, encoded using a a URL safe flavor of Base64. Returns: dict: Values embedded in and derived from the JWT.
juraj-google-style
def _release(self, lease): if lease.exist: os.unlink(lease.path) LOGGER.debug('Removed subnet lease {}'.format(lease.path))
Free the given lease Args: lease (lago.subnet_lease.Lease): The lease to free
codesearchnet
def _MergeSameId(self): a_not_merged = [] b_not_merged = [] for a in self._GetIter(self.feed_merger.a_schedule): try: b = self._GetById(self.feed_merger.b_schedule, self._GetId(a)) except KeyError: a_not_merged.append(a) continue try: self._Add(a, b, self._MergeEntities(a, b)) self._num_merged += 1 except MergeError as merge_error: a_not_merged.append(a) b_not_merged.append(b) self._ReportSameIdButNotMerged(self._GetId(a), merge_error) for b in self._GetIter(self.feed_merger.b_schedule): try: a = self._GetById(self.feed_merger.a_schedule, self._GetId(b)) except KeyError: b_not_merged.append(b) for a in a_not_merged: newid = self._HasId(self.feed_merger.b_schedule, self._GetId(a)) self._Add(a, None, self._Migrate(a, self.feed_merger.a_schedule, newid)) for b in b_not_merged: newid = self._HasId(self.feed_merger.a_schedule, self._GetId(b)) self._Add(None, b, self._Migrate(b, self.feed_merger.b_schedule, newid)) self._num_not_merged_a = len(a_not_merged) self._num_not_merged_b = len(b_not_merged) return self._num_merged
Tries to merge entities based on their ids. This tries to merge only the entities from the old and new schedules which have the same id. These are added into the merged schedule. Entities which do not merge or do not have the same id as another entity in the other schedule are simply migrated into the merged schedule. This method is less flexible than _MergeDifferentId since it only tries to merge entities which have the same id while _MergeDifferentId tries to merge everything. However, it is faster and so should be used whenever possible. This method makes use of various methods like _Merge and _Migrate which are not implemented in the abstract DataSetMerger class. These method should be overwritten in a subclass to allow _MergeSameId to work with different entity types. Returns: The number of merged entities.
codesearchnet
def op(scalars_layout, collections=None): import tensorflow.compat.v1 as tf assert isinstance(scalars_layout, layout_pb2.Layout) summary_metadata = metadata.create_summary_metadata() return tf.summary.tensor_summary(name=metadata.CONFIG_SUMMARY_TAG, tensor=tf.constant( scalars_layout.SerializeToString(), dtype=tf.string), collections=collections, summary_metadata=summary_metadata)
Creates a summary that contains a layout. When users navigate to the custom scalars dashboard, they will see a layout based on the proto provided to this function. Args: scalars_layout: The scalars_layout_pb2.Layout proto that specifies the layout. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A tensor summary op that writes the layout to disk.
juraj-google-style
def RegisterDecompressor(cls, decompressor): compression_method = decompressor.COMPRESSION_METHOD.lower() if (compression_method in cls._decompressors): raise KeyError('Decompressor for compression method: {0:s} already set.'.format(decompressor.COMPRESSION_METHOD)) cls._decompressors[compression_method] = decompressor
Registers a decompressor for a specific compression method. Args: decompressor (type): decompressor class. Raises: KeyError: if the corresponding decompressor is already set.
codesearchnet
def add(reader, writer, column, start, stop, value): for i, row in enumerate(reader): if i >= start and i <= stop: row[column] = type(value)(row[column]) + value writer.appendRecord(row)
Adds a value over a range of rows. Args: reader: A FileRecordStream object with input data. writer: A FileRecordStream object to write output data to. column: The column of data to modify. start: The first row in the range to modify. end: The last row in the range to modify. value: The value to add.
juraj-google-style
def setValues(self, values): if isinstance(values, (list, set)): if any((isinstance(value, basestring) for value in values)): values = list(map(str, values)) self._impl.setValuesStr(values, len(values)) elif all((isinstance(value, Real) for value in values)): values = list(map(float, values)) self._impl.setValuesDbl(values, len(values)) elif all((isinstance(value, tuple) for value in values)): self._impl.setValues(Utils.toTupleArray(values), len(values)) else: raise TypeError else: if ((np is not None) and isinstance(values, np.ndarray)): self.setValues(DataFrame.fromNumpy(values).toList()) return Entity.setValues(self, values)
Set the tuples in this set. Valid only for non-indexed sets. Args: values: A list of tuples or a :class:`~amplpy.DataFrame`. In the case of a :class:`~amplpy.DataFrame`, the number of indexing columns of the must be equal to the arity of the set. In the case of a list of tuples, the arity of each tuple must be equal to the arity of the set. For example, considering the following AMPL entities and corresponding Python objects: .. code-block:: ampl set A := 1..2; param p{i in A} := i+10; set AA; The following is valid: .. code-block:: python A, AA = ampl.getSet('A'), ampl.getSet('AA') AA.setValues(A.getValues()) # AA has now the members {1, 2}
codesearchnet
def _rename_if_arg_found_and_add_loss_reduction_transformer(parent, node, full_name, name, logs, arg_names=None, arg_ok_predicate=None, remove_if_ok=False, message=None): for arg_name in arg_names: rename_node = _rename_if_arg_found_transformer(parent, node, full_name, name, logs, arg_name, arg_ok_predicate, remove_if_ok, message) node = rename_node if rename_node else node return node
Combination of _rename_if_arg_found and _add_loss_reduction transformers. Args: parent: Parent of node. node: ast.Call node to maybe modify. full_name: full name of function to modify name: name of function to modify logs: list of logs to append to arg_names: list of names of the argument to look for arg_ok_predicate: predicate callable with the ast of the argument value, returns whether the argument value is allowed. remove_if_ok: remove the argument if present and ok as determined by arg_ok_predicate. message: message to print if a non-ok arg is found (and hence, the function is renamed to its compat.v1 version). Returns: node, if it was modified, else None.
github-repos
def locked_put(self, credentials): (entity, _) = self.model_class.objects.get_or_create(**{self.key_name: self.key_value}) setattr(entity, self.property_name, credentials) entity.save()
Write a Credentials to the Django datastore. Args: credentials: Credentials, the credentials to store.
codesearchnet
def tool(self): htablettool = self._libinput.libinput_event_tablet_tool_get_tool(self._handle) return TabletTool(htablettool, self._libinput)
The tool that was in use during this event. If the caller keeps a reference to a tool, the tool object will compare equal to the previously obtained tool object. Note: Physical tool tracking requires hardware support. If unavailable, libinput creates one tool per type per tablet. See `Tracking unique tools`_ for more details. Returns: ~libinput.define.TabletTool: The new tool triggering this event.
codesearchnet
def __init__(self, parent=None, range_offset=None, range_size=None, **kwargs): if not range_offset or not range_size or not parent: raise ValueError('Missing range offset, range size or parent value.') super(DataRangePathSpec, self).__init__(parent=parent, **kwargs) self.range_offset = range_offset self.range_size = range_size
Initializes a path specification. Note that the data range path specification must have a parent. Args: parent (Optional[PathSpec]): parent path specification. range_offset (Optional[int]): start offset of the data range. range_size (Optional[int]): size of the data range. Raises: ValueError: when range offset, range offset or parent are not set.
juraj-google-style
def get_chunk_size(path): filesystem = FileSystems.get_filesystem(path) return filesystem.CHUNK_SIZE
Get the correct chunk size for the FileSystem. Args: path: string path that needs to be checked. Returns: integer size for parallelization in the FS operations.
github-repos
def CreateBudget(client): budget_service = client.GetService('BudgetService', version='v201809') budget = {'name': ('Interplanetary Cruise App Budget budget_operations = [{'operator': 'ADD', 'operand': budget}] budget_id = budget_service.mutate(budget_operations)['value'][0]['budgetId'] return budget_id
Creates a budget and returns its budgetId. Args: client: An AdWordsClient instance. Returns: An int budgetId for the created Budget.
codesearchnet
def parse_document(text, options=0): encoded_text = text.encode('utf-8') return _cmark.lib.cmark_parse_document( encoded_text, len(encoded_text), options)
Parse a document and return the root node. Args: text (str): The text to parse. options (int): The cmark options. Returns: Any: Opaque reference to the root node of the parsed syntax tree.
juraj-google-style
def _extractBoldNumbers(self, out, start_line): floats = [] for i in range(start_line, len(out.lines)): if i not in out.font_attr_segs: continue line_attrs = out.font_attr_segs[i] for begin, end, attr_value in line_attrs: if attr_value == 'bold': floats.append(float(out.lines[i][begin:end])) return floats
Extract all numbers that have the bold font attribute. Args: out: An instance of RichTextLines. start_line: 0-based index to start from. Returns: A list of floats.
github-repos
def __init__(self, conf_path, project_key=None, run_asyncore_thread=True): self.conf_path = conf_path super(ZEOConfWrapper, self).__init__( project_key=project_key, run_asyncore_thread=run_asyncore_thread, )
Initialize the object. Args: conf_path (str): See :attr:`conf_path`. project_key (str, default None): See :attr:`project_key`. If not set, the root of the database is used (this may cause performace issues). run_asyncore_thread (bool, default True): Run external asyncore thread, which handles connections to database? Default True.
juraj-google-style
def initialise_site_lookup_table( self ): self.site_lookup = {} for site in self.sites: self.site_lookup[ site.number ] = site
Create a lookup table allowing sites in this lattice to be queried using `self.site_lookup[n]` where `n` is the identifying site numbe. Args: None Returns: None
juraj-google-style
def start_raylet(self, use_valgrind=False, use_profiler=False): (stdout_file, stderr_file) = self.new_log_files('raylet') process_info = ray.services.start_raylet(self._redis_address, self._node_ip_address, self._raylet_socket_name, self._plasma_store_socket_name, self._ray_params.worker_path, self._temp_dir, self._ray_params.num_cpus, self._ray_params.num_gpus, self._ray_params.resources, self._ray_params.object_manager_port, self._ray_params.node_manager_port, self._ray_params.redis_password, use_valgrind=use_valgrind, use_profiler=use_profiler, stdout_file=stdout_file, stderr_file=stderr_file, config=self._config, include_java=self._ray_params.include_java, java_worker_options=self._ray_params.java_worker_options, load_code_from_local=self._ray_params.load_code_from_local) assert (ray_constants.PROCESS_TYPE_RAYLET not in self.all_processes) self.all_processes[ray_constants.PROCESS_TYPE_RAYLET] = [process_info]
Start the raylet. Args: use_valgrind (bool): True if we should start the process in valgrind. use_profiler (bool): True if we should start the process in the valgrind profiler.
codesearchnet
def check_onnxruntime_requirements(minimum_version: Version): try: import onnxruntime ort_version = parse(onnxruntime.__version__) if ort_version < ORT_QUANTIZE_MINIMUM_VERSION: raise ImportError(f'We found an older version of onnxruntime ({onnxruntime.__version__}) but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\nPlease update onnxruntime by running `pip install --upgrade onnxruntime`') except ImportError: raise ImportError("onnxruntime doesn't seem to be currently installed. Please install the onnxruntime by running `pip install onnxruntime` and relaunch the conversion.")
Check onnxruntime is installed and if the installed version match is recent enough Raises: ImportError: If onnxruntime is not installed or too old version is found
github-repos
def dispatch_non_api_requests(self, request, start_response): for (path_regex, dispatch_function) in self._dispatchers: if path_regex.match(request.relative_url): return dispatch_function(request, start_response) if (request.http_method == 'OPTIONS'): cors_handler = self._create_cors_handler(request) if cors_handler.allow_cors_request: return util.send_wsgi_response('200', [], '', start_response, cors_handler) return None
Dispatch this request if this is a request to a reserved URL. If the request matches one of our reserved URLs, this calls start_response and returns the response body. This also handles OPTIONS CORS requests. Args: request: An ApiRequest, the request from the user. start_response: A function with semantics defined in PEP-333. Returns: None if the request doesn't match one of the reserved URLs this handles. Otherwise, returns the response body.
codesearchnet
def join(self, basepath: str, *paths: str) -> str: raise NotImplementedError
Join two or more pathname components for the filesystem Args: basepath: string path of the first component of the path paths: path components to be added Returns: full path after combining all the passed components
github-repos
def DecryptMessage(self, encrypted_response): try: response_comms = rdf_flows.ClientCommunication.FromSerializedString( encrypted_response) return self.DecodeMessages(response_comms) except (rdfvalue.DecodeError, type_info.TypeValueError, ValueError, AttributeError) as e: raise DecodingError("Error while decrypting messages: %s" % e)
Decrypt the serialized, encrypted string. Args: encrypted_response: A serialized and encrypted string. Returns: a Packed_Message_List rdfvalue
juraj-google-style
class _Call(beam.PTransform[beam.PCollection[RequestT], beam.PCollection[ResponseT]]): def __init__(self, caller: Caller[RequestT, ResponseT], timeout: Optional[float]=DEFAULT_TIMEOUT_SECS, should_backoff: Optional[ShouldBackOff]=None, repeater: Repeater=None, throttler: PreCallThrottler=None): self._caller = caller self._timeout = timeout self._should_backoff = should_backoff self._repeater = repeater self._throttler = throttler def expand(self, requests: beam.PCollection[RequestT]) -> beam.PCollection[ResponseT]: return requests | beam.ParDo(_CallDoFn(self._caller, self._timeout, self._repeater, self._throttler))
(Internal-only) PTransform that invokes a remote function on each element of the input PCollection. This PTransform uses a `Caller` object to invoke the actual API calls, and uses ``__enter__`` and ``__exit__`` to manage setup and teardown of clients when applicable. Additionally, a timeout value is specified to regulate the duration of each call, defaults to 30 seconds. Args: caller: a `Caller` object that invokes API call. timeout (float): timeout value in seconds to wait for response from API. should_backoff: (Optional) provides methods for backoff. repeater: (Optional) provides methods to repeat requests to API. throttler: (Optional) provides methods to pre-throttle a request.
github-repos
def convert_timestamps_to_datetimes(ts: Iterable[Timestamp]) -> List[datetime.datetime]: return [convert_timestamp_to_datetime(t) for t in ts]
Converts unix timestamps in seconds to a list of datetimes (UTC). Example: ```python > convert_timestamps_to_datetimes([0, 1689791856]) [datetime.datetime(1970, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), datetime.datetime(2023, 7, 19, 18, 37, 36, tzinfo=datetime.timezone.utc)] Args: ts: Iterable of timestamps, in seconds. Returns: List of UTC datetimes.
github-repos
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): for subkey in registry_key.GetSubkeys(): name = subkey.name if not name: continue values_dict = {} values_dict['Volume'] = name label_value = subkey.GetValueByName('_LabelFromReg') if label_value: values_dict['Label'] = label_value.GetDataAsObject() if name.startswith('{'): values_dict['Type'] = 'Volume' elif name.startswith(' values_dict['Type'] = 'Remote Drive' server_name, _, share_name = name[2:].partition(' values_dict['Remote_Server'] = server_name values_dict['Share_Name'] = '\\{0:s}'.format( share_name.replace(' else: values_dict['Type'] = 'Drive' event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = subkey.offset event_data.regvalue = values_dict event_data.urls = self.URLS event = time_events.DateTimeValuesEvent( subkey.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
juraj-google-style
class FlaxTopKLogitsWarper(FlaxLogitsWarper): def __init__(self, top_k: int, filter_value: float=-float('Inf'), min_tokens_to_keep: int=1): if not isinstance(top_k, int) or top_k <= 0: raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}') self.top_k = max(top_k, min_tokens_to_keep) self.filter_value = filter_value def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: batch_size, vocab_size = scores.shape next_scores_flat = jnp.full(batch_size * vocab_size, self.filter_value) topk = min(self.top_k, scores.shape[-1]) topk_scores, topk_indices = lax.top_k(scores, topk) shift = jnp.broadcast_to((jnp.arange(batch_size) * vocab_size)[:, None], (batch_size, topk)).flatten() topk_scores_flat = topk_scores.flatten() topk_indices_flat = topk_indices.flatten() + shift next_scores_flat = next_scores_flat.at[topk_indices_flat].set(topk_scores_flat) next_scores = next_scores_flat.reshape(batch_size, vocab_size) return next_scores
[`FlaxLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. Args: top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. filter_value (`float`, *optional*, defaults to -inf): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered.
github-repos
def _create_controller_info_record(self, controller_module_name): module = self._controller_modules[controller_module_name] controller_info = None try: controller_info = module.get_info( copy.copy(self._controller_objects[controller_module_name])) except AttributeError: logging.warning('No optional debug info found for controller ' '%s. To provide it, implement `get_info`.', controller_module_name) try: yaml.dump(controller_info) except TypeError: logging.warning('The info of controller %s in class "%s" is not ' 'YAML serializable! Coercing it to string.', controller_module_name, self._class_name) controller_info = str(controller_info) return records.ControllerInfoRecord( self._class_name, module.MOBLY_CONTROLLER_CONFIG_NAME, controller_info)
Creates controller info record for a particular controller type. Info is retrieved from all the controller objects spawned from the specified module, using the controller module's `get_info` function. Args: controller_module_name: string, the name of the controller module to retrieve info from. Returns: A records.ControllerInfoRecord object.
juraj-google-style
class DataCollatorSpeechSeq2SeqWithPadding: processor: Any decoder_start_token_id: int forward_attention_mask: bool def __call__(self, features: list[dict[str, Union[list[int], torch.Tensor]]]) -> dict[str, torch.Tensor]: model_input_name = self.processor.model_input_names[0] input_features = [{model_input_name: feature[model_input_name]} for feature in features] label_features = [{'input_ids': feature['labels']} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors='pt') if self.forward_attention_mask: batch['attention_mask'] = torch.LongTensor([feature['attention_mask'] for feature in features]) labels_batch = self.processor.tokenizer.pad(label_features, return_tensors='pt') labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), -100) if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item(): labels = labels[:, 1:] batch['labels'] = labels return batch
Data collator that will dynamically pad the inputs received. Args: processor ([`WhisperProcessor`]) The processor used for processing the data. decoder_start_token_id (`int`) The begin-of-sentence of the decoder. forward_attention_mask (`bool`) Whether to return attention_mask.
github-repos
def query_google(point, max_distance, key): if not key: return [] if from_cache(GG_CACHE, point, max_distance): return from_cache(GG_CACHE, point, max_distance) req = requests.get(GOOGLE_PLACES_URL % ( point.lat, point.lon, max_distance, key )) if req.status_code != 200: return [] response = req.json() results = response['results'] final_results = [] for local in results: final_results.append({ 'label': local['name'], 'distance': Point(local['geometry']['location']['lat'], local['geometry']['location']['lng'], None).distance(point), 'types': local['types'], 'suggestion_type': 'GOOGLE' }) google_insert_cache(point, final_results) return final_results
Queries google maps API for a location Args: point (:obj:`Point`): Point location to query max_distance (float): Search radius, in meters key (str): Valid google maps api key Returns: :obj:`list` of :obj:`dict`: List of locations with the following format: { 'label': 'Coffee house', 'types': 'Commerce', 'suggestion_type': 'GOOGLE' }
juraj-google-style
def _ni(field, filter_value): valid = False if field not in filter_value: valid = True return valid
Validate field **NOT IN** string or list. Args: filter_value (string | list): A string or list of values. Returns: (boolean): Results of validation
juraj-google-style
def del_instance(self, obj): to_remove = set() for wrkey, _obj in self.iter_instances(): if obj is _obj: to_remove.add(wrkey) for wrkey in to_remove: del self[wrkey]
Remove any stored instance methods that belong to an object Args: obj: The instance object to remove
juraj-google-style
def _AddExtractionProcessStatusTableRow(self, process_status, table_view): used_memory = self._FormatSizeInUnitsOf1024(process_status.used_memory) sources = '' if (process_status.number_of_produced_sources is not None and process_status.number_of_produced_sources_delta is not None): sources = '{0:d} ({1:d})'.format( process_status.number_of_produced_sources, process_status.number_of_produced_sources_delta) events = '' if (process_status.number_of_produced_events is not None and process_status.number_of_produced_events_delta is not None): events = '{0:d} ({1:d})'.format( process_status.number_of_produced_events, process_status.number_of_produced_events_delta) table_view.AddRow([ process_status.identifier, process_status.pid, process_status.status, used_memory, sources, events, process_status.display_name])
Adds an extraction process status table row. Args: process_status (ProcessStatus): processing status. table_view (CLITabularTableView): table view.
juraj-google-style
def _timesfm_shift_padded_seq(mask: torch.Tensor, seq: torch.Tensor) -> torch.Tensor: batch_size, num_seq, feature_dim = seq.shape new_mask: torch.BoolTensor = mask == 0 indices = new_mask.to(torch.int32).argmax(dim=1) indices[~new_mask.any(dim=1)] = -1 idx_range = torch.arange(num_seq, device=seq.device).view(1, -1, 1).expand(batch_size, -1, feature_dim) shifted_idx = (idx_range - indices[:, None, None]) % num_seq shifted_seq = seq.gather(1, shifted_idx) return shifted_seq
Shifts rows of seq based on the first 0 in each row of the mask. Args: mask: mask tensor of shape [B, N] seq: seq tensor of shape [B, N, P] Returns: The shifted sequence.
github-repos