code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def from_row_limits(cls, row_limits, validate=True, dtype=None, dtype_hint=None): if not isinstance(validate, bool): raise TypeError('validate must have type bool') with ops.name_scope(None, 'RowPartitionFromRowLimits', [row_limits]): row_limits = cls._convert_row_partition(row_limits, 'row_limits', dtype_hint=dtype_hint, dtype=dtype) row_limits.shape.assert_has_rank(1) if validate: msg = 'Arguments to from_row_limits do not form a valid RaggedTensor' checks = [check_ops.assert_rank(row_limits, 1, message=msg), check_ops.assert_non_negative(row_limits[:1], message=msg), _assert_monotonic_increasing(row_limits, message=msg)] row_limits = control_flow_ops.with_dependencies(checks, row_limits) zero = array_ops.zeros([1], row_limits.dtype) row_splits = array_ops.concat([zero, row_limits], axis=0) return cls(row_splits=row_splits, internal=_row_partition_factory_key)
Creates a `RowPartition` with rows partitioned by `row_limits`. Equivalent to: `from_row_splits(values, concat([0, row_limits], axis=0))`. Args: row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in ascending order. validate: If true, then use assertions to check that the arguments form a valid `RowPartition`. dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of `row_limits`, dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. Returns: A `RowPartition`.
github-repos
def attribute(*args, **kw): return operator(kind=Operator.Type.ATTRIBUTE, *args, **kw)
Registers a new attribute only operator function in the test engine. Arguments: *args: variadic arguments. **kw: variadic keyword arguments. Returns: function
juraj-google-style
def with_min_occurrence(self, min_occurrence): self._options['min_occurrence'] = min_occurrence return self
Only show profiler nodes including no less than 'min_occurrence' graph nodes. A "node" means a profiler output node, which can be a python line (code view), an operation type (op view), or a graph node (graph/scope view). A python line includes all graph nodes created by that line, while an operation type includes all graph nodes of that type. Args: min_occurrence: Only show nodes including no less than this. Returns: self
github-repos
def _CreateFolder(self, parent, name, visible=True, description=None): folder = ET.SubElement(parent, 'Folder') name_tag = ET.SubElement(folder, 'name') name_tag.text = name if (description is not None): desc_tag = ET.SubElement(folder, 'description') desc_tag.text = description if (not visible): visibility = ET.SubElement(folder, 'visibility') visibility.text = '0' return folder
Create a KML Folder element. Args: parent: The parent ElementTree.Element instance. name: The folder name as a string. visible: Whether the folder is initially visible or not. description: A description string or None. Returns: The folder ElementTree.Element instance.
codesearchnet
def _ReadTableHeader(self, file_object, table_header_offset): data_type_map = self._GetDataTypeMap('keychain_table_header') (table_header, _) = self._ReadStructureFromFileObject(file_object, table_header_offset, data_type_map) return table_header
Reads the table header. Args: file_object (file): file-like object. table_header_offset (int): offset of the tables header relative to the start of the file. Returns: keychain_table_header: table header. Raises: ParseError: if the table header cannot be read.
codesearchnet
def __init__(self, datafile, logger, error_handler): config = json.loads(datafile) self.logger = logger self.error_handler = error_handler self.version = config.get('version') if self.version not in SUPPORTED_VERSIONS: raise exceptions.UnsupportedDatafileVersionException( enums.Errors.UNSUPPORTED_DATAFILE_VERSION.format(self.version) ) self.account_id = config.get('accountId') self.project_id = config.get('projectId') self.revision = config.get('revision') self.groups = config.get('groups', []) self.experiments = config.get('experiments', []) self.events = config.get('events', []) self.attributes = config.get('attributes', []) self.audiences = config.get('audiences', []) self.typed_audiences = config.get('typedAudiences', []) self.feature_flags = config.get('featureFlags', []) self.rollouts = config.get('rollouts', []) self.anonymize_ip = config.get('anonymizeIP', False) self.bot_filtering = config.get('botFiltering', None) self.group_id_map = self._generate_key_map(self.groups, 'id', entities.Group) self.experiment_key_map = self._generate_key_map(self.experiments, 'key', entities.Experiment) self.event_key_map = self._generate_key_map(self.events, 'key', entities.Event) self.attribute_key_map = self._generate_key_map(self.attributes, 'key', entities.Attribute) self.audience_id_map = self._generate_key_map(self.audiences, 'id', entities.Audience) for typed_audience in self.typed_audiences: typed_audience['conditions'] = json.dumps(typed_audience['conditions']) typed_audience_id_map = self._generate_key_map(self.typed_audiences, 'id', entities.Audience) self.audience_id_map.update(typed_audience_id_map) self.rollout_id_map = self._generate_key_map(self.rollouts, 'id', entities.Layer) for layer in self.rollout_id_map.values(): for experiment in layer.experiments: self.experiment_key_map[experiment['key']] = entities.Experiment(**experiment) self.audience_id_map = self._deserialize_audience(self.audience_id_map) for group in self.group_id_map.values(): experiments_in_group_key_map = self._generate_key_map(group.experiments, 'key', entities.Experiment) for experiment in experiments_in_group_key_map.values(): experiment.__dict__.update({ 'groupId': group.id, 'groupPolicy': group.policy }) self.experiment_key_map.update(experiments_in_group_key_map) self.experiment_id_map = {} self.variation_key_map = {} self.variation_id_map = {} self.variation_variable_usage_map = {} for experiment in self.experiment_key_map.values(): self.experiment_id_map[experiment.id] = experiment self.variation_key_map[experiment.key] = self._generate_key_map( experiment.variations, 'key', entities.Variation ) self.variation_id_map[experiment.key] = {} for variation in self.variation_key_map.get(experiment.key).values(): self.variation_id_map[experiment.key][variation.id] = variation self.variation_variable_usage_map[variation.id] = self._generate_key_map( variation.variables, 'id', entities.Variation.VariableUsage ) self.feature_key_map = self._generate_key_map(self.feature_flags, 'key', entities.FeatureFlag) self.experiment_feature_map = {} for feature in self.feature_key_map.values(): feature.variables = self._generate_key_map(feature.variables, 'key', entities.Variable) for exp_id in feature.experimentIds: self.experiment_feature_map[exp_id] = [feature.id] experiment_in_feature = self.experiment_id_map[exp_id] if experiment_in_feature.groupId: feature.groupId = experiment_in_feature.groupId break self.forced_variation_map = {}
ProjectConfig init method to load and set project config data. Args: datafile: JSON string representing the project. logger: Provides a log message to send log messages to. error_handler: Provides a handle_error method to handle exceptions.
juraj-google-style
def call(self, sequence_output, cell_index, cell_mask, allow_empty_column_selection) -> tf.Tensor: token_logits = tf.einsum('bsj,j->bs', sequence_output, self.column_output_weights) + self.column_output_bias cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index) column_index = cell_index.project_inner(cell_logits_index) column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index) cell_count, _ = reduce_sum(cell_mask, column_index) column_logits /= cell_count + EPSILON_ZERO_DIVISION is_padding = tf.logical_and(cell_count < 0.5, tf.not_equal(out_index.indices, 0)) column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(is_padding, tf.float32) if not allow_empty_column_selection: column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(tf.equal(out_index.indices, 0), tf.float32) return column_logits
Computes the column logits. Args: sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. cell_index (`ProductIndexMap`): Index that groups tokens into cells. cell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). allow_empty_column_selection (`bool`): Whether to allow not to select any column Returns: column_logits (`tf.Tensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits for every example in the batch.
github-repos
def generate_password(length=32): return ''.join(random.SystemRandom().choice(string.ascii_letters + '!@
Generate a cryptographically secure random string to use for passwords Args: length (int): Length of password, defaults to 32 characters Returns: Randomly generated string
juraj-google-style
def save_counter(self): return self.checkpointer().save_counter
An integer variable numbering the checkpoint events. This is maintained by the underlying tf.train.Checkpoint object employed by AsyncCheckpoint class. The number starts at 0 and gets incremented for each checkpoint event. Returns: The save counter variable.
github-repos
def generate_hashfile(directory, blacklist=_BLACKLIST): checksums = generate_checksums(directory, blacklist) out = "" for fn, checksum in sorted(checksums.items()): out += "%s %s\n" % (checksum, fn) return out
Compute checksum for each file in `directory`, with exception of files specified in `blacklist`. Args: directory (str): Absolute or relative path to the directory. blacklist (list/set/tuple): List of blacklisted filenames. Only filenames are checked, not paths! Returns: str: Content of hashfile as it is specified in ABNF specification for \ project.
juraj-google-style
def handle_worker_messages(self, timeout): msgs = self.messaging_backend.popn(self.incoming_mailbox, n=20) for msg in msgs: self.handle_single_message(msg)
Read messages that are placed in self.incoming_mailbox, and then update the job states corresponding to each message. Args: timeout: How long to wait for an incoming message, if the mailbox is empty right now. Returns: None
codesearchnet
def load_flag_values(self, flags=None): if flags is None: flags = self._flags for keyval in flags.config_value: k, v = keyval.split('=', 1) v = self._modules['yaml'].load(v) if isinstance(v, str) else v k = k.decode() if isinstance(k, bytes) else k v = v.decode() if isinstance(v, bytes) else v self._flag_values.setdefault(k, v)
Load flag values given from command line flags. Args: flags: An argparse Namespace containing the command line flags.
juraj-google-style
def get_template_edit_url(self, template_id): request = self._get_request() return request.get(self.EMBEDDED_TEMPLATE_EDIT_URL + template_id)
Retrieves a embedded template for editing Retrieves an embedded object containing a template url that can be opened in an iFrame. Args: template_id (str): The id of the template to get a signature url for Returns: An Embedded object
juraj-google-style
def get_substring_idxs(substr, string): return [match.start() for match in re.finditer(substr, string)]
Return a list of indexes of substr. If substr not found, list is empty. Arguments: substr (str): Substring to match. string (str): String to match in. Returns: list of int: Start indices of substr.
juraj-google-style
def run(self): for aws_region in AWS_REGIONS: self.log.debug('Checking trails for {}/{}'.format(self.account.account_name, aws_region)) ct = self.session.client('cloudtrail', region_name=aws_region) trails = ct.describe_trails() if (len(trails['trailList']) == 0): if (aws_region == self.global_ct_region): self.create_cloudtrail(aws_region) else: for trail in trails['trailList']: if (trail['Name'] in ('Default', self.trail_name)): if (not trail['IsMultiRegionTrail']): if ((trail['Name'] == self.trail_name) and (self.global_ct_region == aws_region)): ct.update_trail(Name=trail['Name'], IncludeGlobalServiceEvents=True, IsMultiRegionTrail=True) auditlog(event='cloudtrail.update_trail', actor=self.ns, data={'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'changes': [{'setting': 'IsMultiRegionTrail', 'oldValue': False, 'newValue': True}]}) else: ct.delete_trail(name=trail['Name']) auditlog(event='cloudtrail.delete_trail', actor=self.ns, data={'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'reason': 'Incorrect region, name or not multi-regional'}) elif (trail['HomeRegion'] == aws_region): if ((self.global_ct_region != aws_region) or (trail['Name'] == 'Default')): ct.delete_trail(Name=trail['Name']) auditlog(event='cloudtrail.delete_trail', actor=self.ns, data={'trailName': trail['Name'], 'account': self.account.account_name, 'region': aws_region, 'reason': 'Incorrect name or region for multi-region trail'}) trails = ct.describe_trails() for trail in trails['trailList']: if ((trail['Name'] == self.trail_name) and (trail['HomeRegion'] == aws_region)): self.validate_trail_settings(ct, aws_region, trail)
Configures and enables a CloudTrail trail and logging on a single AWS Account. Has the capability to create both single region and multi-region trails. Will automatically create SNS topics, subscribe to SQS queues and turn on logging for the account in question, as well as reverting any manual changes to the trails if applicable. Returns: None
codesearchnet
def get_collection(self, lang=None, task=None): if lang: id = '{}{}'.format(Downloader.LANG_PREFIX, lang) elif task: id = '{}{}'.format(Downloader.TASK_PREFIX, task) else: raise ValueError('You should pass either the task or the lang') try: return self.info(id) except ValueError as e: if lang: raise LanguageNotSupported('Language {} is not supported'.format(id)) if task: raise TaskNotSupported('Task {} is not supported'.format(id))
Return the collection that represents a specific language or task. Args: lang (string): Language code. task (string): Task name.
codesearchnet
def remove_duplicate_sg(security_groups): for (each_sg, duplicate_sg_name) in SECURITYGROUP_REPLACEMENTS.items(): if ((each_sg in security_groups) and (duplicate_sg_name in security_groups)): LOG.info('Duplicate SG found. Removing %s in favor of %s.', duplicate_sg_name, each_sg) security_groups.remove(duplicate_sg_name) return security_groups
Removes duplicate Security Groups that share a same name alias Args: security_groups (list): A list of security group id to compare against SECURITYGROUP_REPLACEMENTS Returns: security_groups (list): A list of security groups with duplicate aliases removed
codesearchnet
def numeric_columns(self, include_bool=True): columns = [] for (col, dtype) in zip(self.columns, self.dtypes): if (is_numeric_dtype(dtype) and (include_bool or ((not include_bool) and (dtype != np.bool_)))): columns.append(col) return columns
Returns the numeric columns of the Manager. Returns: List of index names.
codesearchnet
def rematch_entry(envkernel, gamma = 0.1, threshold = 1e-6): n, m = envkernel.shape K = np.exp(-(1 - envkernel) / gamma) u = np.ones((n,)) / n v = np.ones((m,)) / m en = np.ones((n,)) / float(n) em = np.ones((m,)) / float(m) Kp = (1 / en).reshape(-1, 1) * K itercount = 0 error = 1 while (error > threshold): uprev = u vprev = v v = np.divide(em, np.dot(K.T, u)) u = np.divide(en, np.dot(K, v)) if itercount % 5: error = np.sum((u - uprev) ** 2) / np.sum((u) ** 2) + np.sum((v - vprev) ** 2) / np.sum((v) ** 2) itercount += 1 pity = np.multiply( np.multiply(K, u.reshape((-1,1))) , v) glosim = np.sum( np.multiply( pity, envkernel)) return glosim
Compute the global similarity between two structures A and B. It uses the Sinkhorn algorithm as reported in: Phys. Chem. Chem. Phys., 2016, 18, p. 13768 Args: envkernel: NxM matrix of structure A with N and structure B with M atoms gamma: parameter to control between best match gamma = 0 and average kernel gamma = inf.
juraj-google-style
def build(cls, seqs: Iterable[int], uid: bool=False) -> 'SequenceSet': seqs_list = sorted(set(seqs)) groups: List[Union[(int, Tuple[(int, int)])]] = [] group: Union[(int, Tuple[(int, int)])] = seqs_list[0] for i in range(1, len(seqs_list)): group_i = seqs_list[i] if isinstance(group, int): if (group_i == (group + 1)): group = (group, group_i) else: groups.append(group) group = group_i elif isinstance(group, tuple): if (group_i == (group[1] + 1)): group = (group[0], group_i) else: groups.append(group) group = group_i groups.append(group) return SequenceSet(groups, uid)
Build a new sequence set that contains the given values using as few groups as possible. Args: seqs: The sequence values to build. uid: True if the sequences refer to message UIDs.
codesearchnet
def match_global_phase(a: np.ndarray, b: np.ndarray ) -> Tuple[np.ndarray, np.ndarray]: if a.shape != b.shape: return a, b k = max(np.ndindex(*a.shape), key=lambda t: abs(b[t])) def dephase(v): r = np.real(v) i = np.imag(v) if i == 0: return -1 if r < 0 else 1 if r == 0: return 1j if i < 0 else -1j return np.exp(-1j * np.arctan2(i, r)) return a * dephase(a[k]), b * dephase(b[k])
Phases the given matrices so that they agree on the phase of one entry. To maximize precision, the position with the largest entry from one of the matrices is used when attempting to compute the phase difference between the two matrices. Args: a: A numpy array. b: Another numpy array. Returns: A tuple (a', b') where a' == b' implies a == b*exp(i t) for some t.
juraj-google-style
def _find_index_of_defining_frame(tb): size = len(tb) filenames = [frame.filename for frame in tb] for idx, filename in enumerate(reversed(filenames)): is_framework = _is_framework_filename(filename) if not is_framework: return size - idx - 1 return 0
Return index in op.traceback with first 'useful' frame. This method reads through the stack stored in op.traceback looking for the innermost frame which (hopefully) belongs to the caller. It accomplishes this by rejecting frames deemed to be part of the TensorFlow framework (by pattern matching the filename). Args: tb: A list of traceback frames (as from Operation.traceback). Returns: Integer index into op.traceback where the first non-TF file was found (innermost to outermost), or 0 (for the outermost stack frame) if all files came from TensorFlow.
github-repos
def authentication(self, username, password): _auth_text = '{}:{}'.format(username, password) if int(sys.version[0]) > 2: _auth_bin = base64.encodebytes(_auth_text.encode()) _auth = _auth_bin.decode() _auth = _auth.replace('\n', '') self._auth = _auth else: _auth = base64.encodestring(_auth_text) self._auth = str(_auth).replace('\n', '') _LOGGER.debug('Autentication string is: {}:***'.format(username))
Configures the user authentication for eAPI This method configures the username and password combination to use for authenticating to eAPI. Args: username (str): The username to use to authenticate the eAPI connection with password (str): The password in clear text to use to authenticate the eAPI connection with
juraj-google-style
def list_vnets(access_token, subscription_id): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Network/', '/virtualNetworks?api-version=', NETWORK_API]) return do_get(endpoint, access_token)
List the VNETs in a subscription . Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of VNets list with properties.
codesearchnet
def reserve(self, *args, **kwargs): data = self.get_data('floating_ips/', type=POST, params={'region': self.region_slug}) if data: self.ip = data['floating_ip']['ip'] self.region = data['floating_ip']['region'] return self
Creates a FloatingIP in a region without assigning it to a specific Droplet. Note: Every argument and parameter given to this method will be assigned to the object. Args: region_slug: str - region's slug (e.g. 'nyc3')
codesearchnet
def convolution_kernel(self, name='convolution_kernel'): with self._name_scope(name): h = self._ifft(_to_complex(self.spectrum)) return math_ops.cast(h, self.dtype)
Convolution kernel corresponding to `self.spectrum`. The `D` dimensional DFT of this kernel is the frequency domain spectrum of this operator. Args: name: A name to give this `Op`. Returns: `Tensor` with `dtype` `self.dtype`.
github-repos
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, utc=None, box=True, format=None, exact=True, unit=None, infer_datetime_format=False, origin='unix', cache=False): if (not isinstance(arg, DataFrame)): return pandas.to_datetime(arg, errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache) pandas.to_datetime(pandas.DataFrame(columns=arg.columns), errors=errors, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, box=box, format=format, exact=exact, unit=unit, infer_datetime_format=infer_datetime_format, origin=origin, cache=cache) return arg._query_compiler.to_datetime()
Convert the arg to datetime format. If not Ray DataFrame, this falls back on pandas. Args: errors ('raise' or 'ignore'): If 'ignore', errors are silenced. Pandas blatantly ignores this argument so we will too. dayfirst (bool): Date format is passed in as day first. yearfirst (bool): Date format is passed in as year first. utc (bool): retuns a UTC DatetimeIndex if True. box (bool): If True, returns a DatetimeIndex. format (string): strftime to parse time, eg "%d/%m/%Y". exact (bool): If True, require an exact format match. unit (string, default 'ns'): unit of the arg. infer_datetime_format (bool): Whether or not to infer the format. origin (string): Define the reference date. Returns: Type depends on input: - list-like: DatetimeIndex - Series: Series of datetime64 dtype - scalar: Timestamp
codesearchnet
def _UpdateEtag(self, response): etag = response.headers.get('etag', self.etag) etag_updated = self.etag != etag self.etag = etag return etag_updated
Update the etag from an API response. Args: response: HTTP response with a header field. Returns: bool, True if the etag in the response header updated.
juraj-google-style
def List(self, request, global_params=None): config = self.GetMethodConfig('List') return self._RunMethod(config, request, global_params=global_params)
List all `BitbucketServerConfigs` for a given project. This API is experimental. Args: request: (CloudbuildProjectsLocationsBitbucketServerConfigsListRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (ListBitbucketServerConfigsResponse) The response message.
github-repos
def _parse_line(cls, line): try: pkg, rest = line.split(None, 1) except ValueError: rpm = cls._parse_package(line.strip()) return rpm rpm = cls._parse_package(pkg) rest = rest.split('\t') for i, value in enumerate(rest): rpm[cls.SOSREPORT_KEYS[i]] = value return rpm
Helper method for parsing package line with or without SOS report information. Args: line (str): package line with or without SOS report information Returns: dict: dictionary containing 'name', 'version', 'release' and 'arch' keys plus additionally 'installtime', 'buildtime', 'vendor', 'buildserver', 'pgpsig', 'pgpsig_short' if these are present.
juraj-google-style
def GetPointWithDistanceTraveled(self, shape_dist_traveled): if not self.distance: return None if shape_dist_traveled <= self.distance[0]: return self.points[0] if shape_dist_traveled >= self.distance[-1]: return self.points[-1] index = bisect.bisect(self.distance, shape_dist_traveled) (lat0, lng0, dist0) = self.points[index - 1] (lat1, lng1, dist1) = self.points[index] ca = shape_dist_traveled - dist0 bc = dist1 - shape_dist_traveled ba = bc + ca if ba == 0: return None lat = (lat1 * ca + lat0 * bc) / ba lng = (lng1 * ca + lng0 * bc) / ba return (lat, lng, shape_dist_traveled)
Returns a point on the shape polyline with the input shape_dist_traveled. Args: shape_dist_traveled: The input shape_dist_traveled. Returns: The shape point as a tuple (lat, lng, shape_dist_traveled), where lat and lng is the location of the shape point, and shape_dist_traveled is an increasing metric representing the distance traveled along the shape. Returns None if there is data error in shape.
juraj-google-style
def SetStorageProfiler(self, storage_profiler): self._storage_profiler = storage_profiler if self._storage_file: self._storage_file.SetStorageProfiler(storage_profiler)
Sets the storage profiler. Args: storage_profiler (StorageProfiler): storage profiler.
juraj-google-style
def refresh(self, id_or_uri, timeout=(- 1)): uri = (self._client.build_uri(id_or_uri) + '/refresh') return self._client.update_with_zero_body(uri, timeout=timeout)
The Refresh action reclaims the top-of-rack switches in a logical switch. Args: id_or_uri: Can be either the Logical Switch ID or URI timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion. Returns: dict: The Logical Switch
codesearchnet
def generate_flat_data(self): all_statements = [] all_targets = [] self.sequence_sizes_in = [] self.sequence_sizes_out = [] for _ in six.moves.range(self._batch_size): (length, nest) = self.curriculum_obj.fetch() seq_size_in = self._max_seq_length is_valid_sample = False tries_remaining = 10 while (not is_valid_sample): (value, code) = generate_code(length, nest, self._ops) (tokens_in, seq_size_in) = self.tokenize(code, self._max_seq_length, self._token_by_char) (tokens_out, seq_size_out) = self.tokenize(value, self._max_seq_length, self._token_by_char) is_valid_sample = (self._max_seq_length >= seq_size_in) if is_valid_sample: self.sequence_sizes_in.append(seq_size_in) self.sequence_sizes_out.append(seq_size_out) if (tries_remaining == 0): raise ValueError('Could not generate a sample below the allowable maximum, consider reducing either max_length or max_nest.') else: tries_remaining -= 1 all_statements += tokens_in all_targets += tokens_out self.flat_data = np.array(all_statements, dtype=np.int64) self.num_tokens = self.flat_data.shape[0] self.flat_targets = np.array(all_targets, dtype=np.int64) self.num_tokens_target = self.flat_targets.shape[0] self.start_token = np.array(self.tokenize([get_start_token()], 1)[0], dtype=np.int64) self.end_token = np.array(self.tokenize([get_end_token()], 1)[0], dtype=np.int64)
Generates batched data in flat numpy arrays. Raises: ValueError: When too many generate calls are required.
codesearchnet
def request_status(r, detailed=False): base_string = "HTTP {r.request.method} {r.request.url}: {r.status_code}" if r.status_code in range(200,99): string = base_string if detailed is True: string += " - {r.json()}" else: string += " - 👍" return string.format(r=r) else: string = base_string return string.format(r=r)
Returns a formatted string about the status, useful for logging. args: r - takes requests.models.Response
juraj-google-style
def add_observer(self, observer, identify_observed=False): if hasattr(observer, '__self__'): result = self._add_bound_method(observer, identify_observed) else: result = self._add_function(observer, identify_observed) return result
Register an observer to observe me. Args: observer: The callable to register as an observer. identify_observed: If True, then the observer will get myself passed as an additional first argument whenever it is invoked. See ObserverFunction and ObserverBoundMethod to see how this works. Returns: True if the observer was added, False otherwise. The observing function or method will be called whenever I am called, and with the same arguments and keyword arguments. If a bound method or function has already been registered as an observer, trying to add it again does nothing. In other words, there is no way to sign up an observer to be called back multiple times. This was a conscious design choice which users are invited to complain about if there is a compelling use case where this is inconvenient.
codesearchnet
def retrieve_clang_version(clang_executable): stderr = open(os.devnull, 'wb') curr_version = run_shell([clang_executable, '--version'], allow_non_zero=True, stderr=stderr) curr_version_split = curr_version.lower().split('clang version ') if len(curr_version_split) > 1: curr_version = curr_version_split[1].split()[0].split('git') if len(curr_version) > 1: print('WARNING: current clang installation is not a release version.\n') curr_version = curr_version[0] curr_version_int = convert_version_to_int(curr_version) if not curr_version_int: print('WARNING: current clang installation version unknown.\n') return None print('You have Clang %s installed.\n' % curr_version) return curr_version
Retrieve installed clang version. Args: clang_executable: (String) path to clang executable Returns: The clang version detected.
github-repos
def name(self): return ctypes.cast(self.sName, ctypes.c_char_p).value.decode()
Returns the name of the device. Args: self (JLinkDeviceInfo): the ``JLinkDeviceInfo`` instance Returns: Device name.
codesearchnet
def _GetPathSegmentIndexForValueWeights(self, value_weights): largest_weight = value_weights.GetLargestWeight() if (largest_weight > 0): value_weight_indexes = value_weights.GetIndexesForWeight(largest_weight) else: value_weight_indexes = [] if value_weight_indexes: path_segment_index = value_weight_indexes[0] else: path_segment_index = value_weights.GetFirstAvailableIndex() if (path_segment_index is None): raise RuntimeError('No path segment index found.') return path_segment_index
Retrieves the index of the path segment based on value weights. Args: value_weights: the value weights object (instance of _PathSegmentWeights). Returns: An integer containing the path segment index. Raises: RuntimeError: is no path segment index can be found.
codesearchnet
def matches_all(expected): def _matches(actual): from hamcrest.core import assert_that as hamcrest_assert from hamcrest.library.collection import contains_inanyorder expected_list = list(expected) hamcrest_assert(actual, contains_inanyorder(*expected_list)) return _matches
Matcher used by assert_that to check a set of matchers. Args: expected: A list of elements or hamcrest matchers to be used to match the elements of a single PCollection.
github-repos
def transform(self, data, data_type='S3Prefix', content_type=None, compression_type=None, split_type=None, job_name=None): local_mode = self.sagemaker_session.local_mode if ((not local_mode) and (not data.startswith('s3: raise ValueError('Invalid S3 URI: {}'.format(data)) if (job_name is not None): self._current_job_name = job_name else: base_name = (self.base_transform_job_name or base_name_from_image(self._retrieve_image_name())) self._current_job_name = name_from_base(base_name) if (self.output_path is None): self.output_path = 's3: self.latest_transform_job = _TransformJob.start_new(self, data, data_type, content_type, compression_type, split_type)
Start a new transform job. Args: data (str): Input data location in S3. data_type (str): What the S3 location defines (default: 'S3Prefix'). Valid values: * 'S3Prefix' - the S3 URI defines a key name prefix. All objects with this prefix will be used as inputs for the transform job. * 'ManifestFile' - the S3 URI points to a single manifest file listing each S3 object to use as an input for the transform job. content_type (str): MIME type of the input data (default: None). compression_type (str): Compression type of the input data, if compressed (default: None). Valid values: 'Gzip', None. split_type (str): The record delimiter for the input object (default: 'None'). Valid values: 'None', 'Line', 'RecordIO', and 'TFRecord'. job_name (str): job name (default: None). If not specified, one will be generated.
codesearchnet
def make_processor(self, name, mappings, processor_type, **kwargs): from .processor import Processor if self.processors.get(name): raise LookupError("processor has already been created") if isinstance(mappings, list): mappings = [self.get_rml(item) for item in mappings] else: mappings = [self.get_rml(mappings)] self.processors[name] = Processor[processor_type](mappings, **kwargs) self.processors[name].name = name return self.processors[name]
Instantiates a RmlProcessor and registers it in the manager Args: ----- name: the name to register the processor mappings: the list RML mapping definitions to use processor_type: the name of the RML processor to use
juraj-google-style
def _compute_elemwise_op_output_shape(self, shape1, shape2): if None in [shape1, shape2]: return None elif len(shape1) < len(shape2): return self._compute_elemwise_op_output_shape(shape2, shape1) elif not shape2: return shape1 output_shape = list(shape1[:-len(shape2)]) for i, j in zip(shape1[-len(shape2):], shape2): if i is None or j is None: output_shape.append(None) elif i == 1: output_shape.append(j) elif j == 1: output_shape.append(i) else: if i != j: raise ValueError('Operands could not be broadcast together with shapes ' + str(shape1) + ' ' + str(shape2)) output_shape.append(i) return tuple(output_shape)
Computes the shape of the resultant of an elementwise operation. Args: shape1: tuple or None. Shape of the first tensor shape2: tuple or None. Shape of the second tensor Returns: expected output shape when an element-wise operation is carried out on 2 tensors with shapes shape1 and shape2. tuple or None. Raises: ValueError: if shape1 and shape2 are not compatible for element-wise operations.
github-repos
def export(self, path, session): if (self._graph is not tf_v1.get_default_graph()): raise RuntimeError('default graph differs from the graph where the module was instantiated.') if (self._graph is not session.graph): raise RuntimeError('session graph differs from the graph where the module was instantiated.') self._impl.export(path, session)
Exports the module with the variables from the session in `path`. Note that it is the module definition in the ModuleSpec used to create this module that gets exported. The session is only used to provide the value of variables. Args: path: path where to export the module to. session: session where to export the variables from. Raises: RuntimeError: if there is an issue during the export.
codesearchnet
def reduce_max(x, disable_positional_args=None, output_shape=None, reduced_dim=None, name=None): output_shape = convert_to_shape(output_shape) reduced_dim = convert_to_dimension(reduced_dim) assert disable_positional_args is None output_shape = _reduction_output_shape(x, output_shape, reduced_dim) if output_shape is None: output_shape = Shape([]) if output_shape == x.shape: return x return ReduceOperation( x, output_shape, "MAX", name=name or "reduce_max").outputs[0]
Reduction on 1 or more axes. Args: x: a Tensor disable_positional_args: None output_shape: an optional Shape. Must be a subsequence of x.shape. reduced_dim: an optional Dimension name: an optional string Returns: a Tensor
juraj-google-style
def __init__(self, reader, genTexts=False): if genTexts is not None: self.genTexts = genTexts self._reader = reader
Creates an instance of *Borrower* class. Args: reader: a *reader* object Keyword Args: genText: indicates whether this borrower should be looking for transformed MIBs that include human-oriented texts
juraj-google-style
def Match(self, encoded): logging.log(1, 'Decoding %s: %s', self.name, encoded) decoded = self.msg.encoding.ParseFromString(encoded, self.msg) logging.info('Matching message value:\nExpected: %s\nActual: %s\n', self.value_dict_or_array, decoded) return MessageValue._MatchValue(self.value_dict_or_array, decoded)
Whether or not |encoded| is compatible with this message instance. If |encoded| has all required fields, and values of all fields are same to those of this message instance, it is compatible. Otherwise, i.e 1) it doesn't have some required fields 2) it has some values of fields different from specified in |value_dict| of this message instance Args: encoded: A string expected to be encoded with same encoding method of this message instance. Returns: Whether or not |encoded| is compatible with this message instance.
github-repos
def force_in_A_to_force_in_B(force_A, torque_A, pose_A_in_B): pos_A_in_B = pose_A_in_B[(:3, 3)] rot_A_in_B = pose_A_in_B[(:3, :3)] skew_symm = _skew_symmetric_translation(pos_A_in_B) force_B = rot_A_in_B.T.dot(force_A) torque_B = ((- rot_A_in_B.T.dot(skew_symm.dot(force_A))) + rot_A_in_B.T.dot(torque_A)) return (force_B, torque_B)
Converts linear and rotational force at a point in frame A to the equivalent in frame B. Args: force_A: 3-dim iterable for linear force in A torque_A: 3-dim iterable for rotational force (moment) in A pose_A_in_B: numpy array of shape (4,4) corresponding to the pose of A in frame B Returns: force_B, torque_B: two numpy arrays of shape (3,) for the forces in B
codesearchnet
def remove_item(self, item): for (idx, _item) in enumerate(self.items): if (item == _item): del self.items[idx] return True return False
Remove the specified item from the menu. Args: item (MenuItem): the item to be removed. Returns: bool: True if the item was removed; False otherwise.
codesearchnet
def box_area(boxes): boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box.
github-repos
def parse_cartouche_text(lines): indent_lines = unindent(lines) indent_lines = pad_blank_lines(indent_lines) indent_lines = first_paragraph_indent(indent_lines) indent_paragraphs = gather_lines(indent_lines) parse_tree = group_paragraphs(indent_paragraphs) syntax_tree = extract_structure(parse_tree) result = syntax_tree.render_rst() ensure_terminal_blank(result) return result
Parse text in cartouche format and return a reStructuredText equivalent Args: lines: A sequence of strings representing the lines of a single docstring as read from the source by Sphinx. This string should be in a format that can be parsed by cartouche. Returns: A list of lines containing the transformed docstring as reStructuredText as produced by cartouche. Raises: RuntimeError: If the docstring cannot be parsed.
juraj-google-style
def query_parameters(param_list, defaults=None): script_params = collections.OrderedDict(([k, []] for k in param_list)) for (param, default) in zip(list(script_params.keys()), defaults): user_input = click.prompt(('%s' % param), default=default) script_params[param] = ast.literal_eval(user_input) return script_params
Asks the user for parameters. If available, proposes some defaults. Args: param_list (list): List of parameters to ask the user for values. defaults (list): A list of proposed defaults. It must be a list of the same length as param_list. A value of None in one element of the list means that no default will be proposed for the corresponding parameter.
codesearchnet
def __init__(self, text: str, sctx: SchemaContext): super().__init__(text) self.sctx = sctx
Initialize the parser instance. Args: sctx: Schema context for XPath expression parsing.
juraj-google-style
def ExportClientsByKeywords(keywords, filename, token=None): index = client_index.CreateClientIndex(token=token) client_list = index.LookupClients(keywords) logging.info('found %d clients', len(client_list)) if (not client_list): return writer = csv.DictWriter([u'client_id', u'hostname', u'last_seen', u'os', u'os_release', u'os_version', u'users', u'ips', u'macs']) writer.WriteHeader() for client in aff4.FACTORY.MultiOpen(client_list, token=token): s = client.Schema writer.WriteRow({u'client_id': client.urn.Basename(), u'hostname': client.Get(s.HOSTNAME), u'os': client.Get(s.SYSTEM), u'os_release': client.Get(s.OS_RELEASE), u'os_version': client.Get(s.OS_VERSION), u'ips': client.Get(s.HOST_IPS), u'macs': client.Get(s.MAC_ADDRESS), u'users': '\n'.join(client.Get(s.USERNAMES, [])), u'last_seen': client.Get(s.PING)}) with io.open(filename, 'w') as csv_out: csv_out.write(writer.Content())
r"""A script to export clients summaries selected by a keyword search. This script does a client search for machines matching all of keywords and writes a .csv summary of the results to filename. Multi-value fields are '\n' separated. Args: keywords: a list of keywords to search for filename: the name of the file to write to, will be replaced if already present token: datastore token.
codesearchnet
def construct(name, exec_, terminal=False, additional_opts={}): desktop_file = '[Desktop Entry]\n' desktop_file_dict = { 'Name': name, 'Exec': exec_, 'Terminal': 'true' if terminal else 'false', 'Comment': additional_opts.get('Comment', name) } desktop_file = ('[Desktop Entry]\nName={name}\nExec={exec_}\n' 'Terminal={terminal}\nComment={comment}\n') desktop_file = desktop_file.format(name=desktop_file_dict['Name'], exec_=desktop_file_dict['Exec'], terminal=desktop_file_dict['Terminal'], comment=desktop_file_dict['Comment']) if additional_opts is None: additional_opts = {} for option in additional_opts: if not option in desktop_file_dict: desktop_file += '%s=%s\n' % (option, additional_opts[option]) return desktop_file
Construct a .desktop file and return it as a string. Create a standards-compliant .desktop file, returning it as a string. Args: name (str) : The program's name. exec\_ (str) : The command. terminal (bool): Determine if program should be run in a terminal emulator or not. Defaults to ``False``. additional_opts (dict): Any additional fields. Returns: str: The constructed .desktop file.
juraj-google-style
def get_services_health(self) -> dict: services_health = {} services_ids = self._get_services() for service_id in services_ids: service_name = DC.get_service_name(service_id) if (DC.get_replicas(service_id) != DC.get_actual_replica(service_id)): services_health[service_name] = 'Unhealthy' else: services_health[service_name] = 'Healthy' return services_health
Get the health of all services. Returns: dict, services id and health status
codesearchnet
def Delete(self, request, global_params=None): config = self.GetMethodConfig('Delete') return self._RunMethod(config, request, global_params=global_params)
Deletes the dataset specified by the datasetId value. Before you can delete a dataset, you must delete all its tables, either manually or by specifying deleteContents. Immediately after deletion, you can create another dataset with the same name. Args: request: (BigqueryDatasetsDeleteRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (BigqueryDatasetsDeleteResponse) The response message.
github-repos
class Kosmos2VisionEncoder(nn.Module): def __init__(self, config: Kosmos2VisionConfig): super().__init__() self.config = config self.layers = nn.ModuleList([Kosmos2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`Kosmos2VisionEncoderLayer`]. Args: config: Kosmos2VisionConfig
github-repos
def depth_november_average_ground_temperature(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `depth_november_average_ground_temperature`'.format(value)) self._depth_november_average_ground_temperature = value
Corresponds to IDD Field `depth_november_average_ground_temperature` Args: value (float): value for IDD Field `depth_november_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def consume_json(request): client = OEmbedConsumer() urls = request.GET.getlist('urls') width = request.GET.get('width') height = request.GET.get('height') template_dir = request.GET.get('template_dir') output = {} ctx = RequestContext(request) for url in urls: try: provider = oembed.site.provider_for_url(url) except OEmbedMissingEndpoint: oembeds = None rendered = None else: oembeds = url rendered = client.parse_text(url, width, height, context=ctx, template_dir=template_dir) output[url] = {'oembeds': oembeds, 'rendered': rendered} return HttpResponse(simplejson.dumps(output), mimetype='application/json')
Extract and return oembed content for given urls. Required GET params: urls - list of urls to consume Optional GET params: width - maxwidth attribute for oembed content height - maxheight attribute for oembed content template_dir - template_dir to use when rendering oembed Returns: list of dictionaries with oembed metadata and renderings, json encoded
codesearchnet
def __init__(self, graph_view): self._graph_view = graph_view if context.executing_eagerly(): self._cache = None self._saveables_cache = None else: self._cache = object_identity.ObjectIdentityWeakKeyDictionary() self._saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary() self._file_prefix_placeholder = None self._object_graph_feed_tensor = None self._last_save_object_graph = None self._file_prefix_feed_tensor = None self._cached_save_operation = None self._restore_op_cache = {} self._object_map = None
Configure saving. Args: graph_view: An `ObjectGraphView` object containing a description of the object graph to save.
github-repos
def split_data(*inputs, splits=[0.5, 0.5], shuffle=True, stratify_by=None, index_only=False, seed=None): def fractions_to_counts(fracs, n): 'Converts a list of fractions to a list of counts that sum to n' counts = [int(np.round((n * frac))) for frac in fracs] counts[(- 1)] = (n - sum(counts[:(- 1)])) return counts def slice_data(data, indices): if (isinstance(data, list) or isinstance(data, tuple)): return [d for (i, d) in enumerate(data) if (i in set(indices))] else: try: return data[indices] except TypeError: raise Exception(f'split_data() currently only accepts inputs of type tuple, list, np.ndarray, scipy.sparse, or torch.Tensor; not {type(data)}') if (seed is not None): random.seed(seed) try: n = len(inputs[0]) except TypeError: n = inputs[0].shape[0] num_splits = len(splits) if all((isinstance(x, int) for x in splits)): if (not (sum(splits) == n)): raise ValueError(f'Provided split counts must sum to n ({n}), not {sum(splits)}.') fracs = [(count / n) for count in splits] elif all((isinstance(x, float) for x in splits)): if (not (sum(splits) == 1.0)): raise ValueError(f'Split fractions must sum to 1.0, not {sum(splits)}.') fracs = splits else: raise ValueError('Splits must contain all ints or all floats.') if (stratify_by is None): pools = [np.arange(n)] else: pools = defaultdict(list) for (i, val) in enumerate(stratify_by): pools[val].append(i) pools = list(pools.values()) assignments = [[] for _ in range(num_splits)] for pool in pools: if (shuffle or (stratify_by is not None)): random.shuffle(pool) counts = fractions_to_counts(fracs, len(pool)) counts.insert(0, 0) cum_counts = np.cumsum(counts) for i in range(num_splits): assignments[i].extend(pool[cum_counts[i]:cum_counts[(i + 1)]]) if index_only: return assignments else: outputs = [] for data in inputs: data_splits = [] for split in range(num_splits): data_splits.append(slice_data(data, assignments[split])) outputs.append(data_splits) if (len(outputs) == 1): return outputs[0] else: return outputs
Splits inputs into multiple splits of defined sizes Args: inputs: correlated tuples/lists/arrays/matrices/tensors to split splits: list containing split sizes (fractions or counts); shuffle: if True, shuffle the data before splitting stratify_by: (None or an input) if not None, use these labels to stratify the splits (separating the data into groups by these labels and sampling from those, rather than from the population at large); overrides shuffle index_only: if True, return only the indices of the new splits, not the split data itself seed: (int) random seed Example usage: Ls, Xs, Ys = split_data(L, X, Y, splits=[0.8, 0.1, 0.1]) OR assignments = split_data(Y, splits=[0.8, 0.1, 0.1], index_only=True) Note: This is very similar to scikit-learn's train_test_split() method, but with support for more than two splits.
codesearchnet
def parse_mapping(mapping_file: Optional[str]) -> configparser.ConfigParser: LOGGER.debug('Parsing mapping file. Command line: %s', mapping_file) def parse(mapping_file): config = configparser.ConfigParser() config.read_file(mapping_file) return config if mapping_file is not None: LOGGER.debug('Parsing command line mapping file') return parse(mapping_file) xdg_config_dir = xdg.BaseDirectory.load_first_config('pass-git-helper') if xdg_config_dir is None: raise RuntimeError( 'No mapping configured so far at any XDG config location. ' 'Please create {config_file}'.format( config_file=DEFAULT_CONFIG_FILE)) mapping_file = os.path.join(xdg_config_dir, CONFIG_FILE_NAME) LOGGER.debug('Parsing mapping file %s', mapping_file) with open(mapping_file, 'r') as file_handle: return parse(file_handle)
Parse the file containing the mappings from hosts to pass entries. Args: mapping_file: Name of the file to parse. If ``None``, the default file from the XDG location is used.
juraj-google-style
def share(self, group_id, group_access, expires_at=None, **kwargs): path = ('/projects/%s/share' % self.get_id()) data = {'group_id': group_id, 'group_access': group_access, 'expires_at': expires_at} self.manager.gitlab.http_post(path, post_data=data, **kwargs)
Share the project with a group. Args: group_id (int): ID of the group. group_access (int): Access level for the group. **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server failed to perform the request
codesearchnet
def collapse_address_list(addresses): i = 0 addrs = [] ips = [] nets = [] for ip in addresses: if isinstance(ip, _BaseIP): if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( str(ip), str(ips[-1]))) ips.append(ip) elif ip._prefixlen == ip._max_prefixlen: if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( str(ip), str(ips[-1]))) ips.append(ip.ip) else: if nets and nets[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( str(ip), str(nets[-1]))) nets.append(ip) ips = sorted(set(ips)) nets = sorted(set(nets)) while i < len(ips): (first, last) = _find_address_range(ips[i:]) i = ips.index(last) + 1 addrs.extend(summarize_address_range(first, last)) return _collapse_address_list_recursive(sorted( addrs + nets, key=_BaseNet._get_networks_key))
Collapse a list of IP objects. Example: collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) -> [IPv4('1.1.0.0/23')] Args: addresses: A list of IPv4Network or IPv6Network objects. Returns: A list of IPv4Network or IPv6Network objects depending on what we were passed. Raises: TypeError: If passed a list of mixed version objects.
juraj-google-style
def _get_input_to_checker_function(self, flag_values): return dict([key, flag_values[key].value] for key in self.flag_names)
Given flag values, returns the input to be given to checker. Args: flag_values: flags.FlagValues, the FlagValues instance to get flags from. Returns: dict, with keys() being self.lag_names, and value for each key being the value of the corresponding flag (string, boolean, etc).
juraj-google-style
def read(self, vals): i = 0 if len(vals[i]) == 0: self.holiday_name = None else: self.holiday_name = vals[i] i += 1 if len(vals[i]) == 0: self.holiday_day = None else: self.holiday_day = vals[i] i += 1
Read values. Args: vals (list): list of strings representing values
juraj-google-style
def _ragged_tensor_apply_loss(loss_fn, y_true, y_pred, y_pred_extra_dim=False): def rt_is_equiv_dense(rt): return math_ops.reduce_all([math_ops.equal(math_ops.reduce_variance(math_ops.cast(row_lens, backend.floatx())), constant_op.constant([0.0])) for row_lens in rt.nested_row_lengths()]) def _convert_to_dense(inputs): return tuple((rt.to_tensor() if isinstance(rt, ragged_tensor.RaggedTensor) else rt for rt in inputs)) def _call_loss(inputs, ragged_output): r = loss_fn(*inputs) if ragged_output and (not isinstance(r, ragged_tensor.RaggedTensor)): r = ragged_tensor.RaggedTensor.from_tensor(r) elif not ragged_output and isinstance(r, ragged_tensor.RaggedTensor): r = r.to_tensor() return r def _wrapper(inputs, ragged_output): _, y_pred = inputs if isinstance(y_pred, ragged_tensor.RaggedTensor): return cond.cond(rt_is_equiv_dense(y_pred), lambda: _call_loss(_convert_to_dense(inputs), ragged_output), lambda: _call_loss(inputs, ragged_output)) return loss_fn(*inputs) if not isinstance(y_true, ragged_tensor.RaggedTensor): return loss_fn(y_true, y_pred.to_tensor()) lshape = y_pred.shape.as_list()[1:-1] if len(lshape) > 0: spec = ragged_tensor.RaggedTensorSpec(shape=lshape, dtype=y_pred.dtype) else: spec = tensor_spec.TensorSpec(shape=[], dtype=y_pred.dtype) nested_splits_list = [rt.nested_row_splits for rt in (y_true, y_pred)] if y_pred_extra_dim: rdims = [len(slist) for slist in nested_splits_list] if rdims[0] == rdims[1] - 1: nested_splits_list[1] = nested_splits_list[1][:-1] map_fn = functools.partial(_wrapper, ragged_output=len(lshape) > 1) assertion_list = ragged_util.assert_splits_match(nested_splits_list) with ops.control_dependencies(assertion_list): return ragged_map_ops.map_fn(map_fn, elems=(y_true, y_pred), dtype=spec)
Apply a loss function on a per batch basis. Args: loss_fn: The loss function y_true: truth values (RaggedTensor) y_pred: predicted values (RaggedTensor) y_pred_extra_dim: whether y_pred has an additional dimension compared to y_true Returns: Loss-function result. A dense tensor if the output has a single dimension (per-batch loss value); a ragged tensor otherwise.
github-repos
def dump(self, include_address=True, include_id=True) -> str: d = {'crypto': self.keystore['crypto'], 'version': self.keystore['version']} if (include_address and (self.address is not None)): d['address'] = remove_0x_prefix(encode_hex(self.address)) if (include_id and (self.uuid is not None)): d['id'] = self.uuid return json.dumps(d)
Dump the keystore for later disk storage. The result inherits the entries `'crypto'` and `'version`' from `account.keystore`, and adds `'address'` and `'id'` in accordance with the parameters `'include_address'` and `'include_id`'. If address or id are not known, they are not added, even if requested. Args: include_address: flag denoting if the address should be included or not include_id: flag denoting if the id should be included or not
codesearchnet
def step_preprocess(x, step, hparams): original_channel_size = common_layers.shape_list(x)[(- 1)] if hparams.add_position_timing_signal: x = add_position_timing_signal(x, step, hparams) if hparams.add_step_timing_signal: x = add_step_timing_signal(x, step, hparams) if ((hparams.add_position_timing_signal or hparams.add_position_timing_signal) and (hparams.add_or_concat_timing_signal == 'concat')): x = common_layers.dense(x, original_channel_size, activation=None, use_bias=False) if hparams.add_sru: x = common_layers.sru(x) return x
Preprocess the input at the beginning of each step. Args: x: input tensor step: step hparams: model hyper-parameters Returns: preprocessed input.
codesearchnet
def verify_firebase_token(id_token, request, audience=None): return verify_token(id_token, request, audience=audience, certs_url=_GOOGLE_APIS_CERTS_URL)
Verifies an ID Token issued by Firebase Authentication. Args: id_token (Union[str, bytes]): The encoded token. request (google.auth.transport.Request): The object used to make HTTP requests. audience (str): The audience that this token is intended for. This is typically your Firebase application ID. If None then the audience is not verified. Returns: Mapping[str, Any]: The decoded token.
codesearchnet
def _CreateTaskStorageWriter(self, path, task): return SQLiteStorageFileWriter( self._session, path, storage_type=definitions.STORAGE_TYPE_TASK, task=task)
Creates a task storage writer. Args: path (str): path to the storage file. task (Task): task. Returns: SQLiteStorageFileWriter: storage writer.
juraj-google-style
def CreateExtensionSetting(client, feed_items, campaign_feed, feed_item_ids, platform_restrictions=None): campaign_extension_setting_service = client.GetService('CampaignExtensionSettingService', 'v201809') extension_feed_items = [{CreateSitelinkFeedItem(feed_items, feed_item_id)} for feed_item_id in feed_item_ids] extension_setting = {'extensions': extension_feed_items} if platform_restrictions: extension_setting['platformRestrictions'] = platform_restrictions campaign_extension_setting = {'campaignId': campaign_feed['campaignId'], 'extensionType': 'SITELINK', 'extensionSetting': extension_setting} operation = {'operand': campaign_extension_setting, 'operator': 'ADD'} campaign_extension_setting_service.mutate([operation])
Creates the extension setting for a list of Feed Items. Args: client: an AdWordsClient instance. feed_items: the list of all Feed Items. campaign_feed: the original Campaign Feed. feed_item_ids: the Ids of the feed items for which extension settings should be created. platform_restrictions: an optional Platform Restriction for the Feed items.
codesearchnet
def load(cls, path, reader=None): if reader is None: from . import io reader = io.DefaultReader() elif type(reader) == str: from . import io reader = io.create_reader_of_type(reader) return reader.load(path)
Loads the corpus from the given path, using the given reader. If no reader is given the :py:class:`audiomate.corpus.io.DefaultReader` is used. Args: path (str): Path to load the corpus from. reader (str, CorpusReader): The reader or the name of the reader to use. Returns: Corpus: The loaded corpus.
juraj-google-style
def _read_data_handler(length, whence, ctx, skip=False, stream_event=ION_STREAM_INCOMPLETE_EVENT): trans = None queue = ctx.queue if length > ctx.remaining: raise IonException('Length overrun: %d bytes, %d remaining' % (length, ctx.remaining)) queue_len = len(queue) if queue_len > 0: stream_event = ION_STREAM_INCOMPLETE_EVENT length -= queue_len if skip: if length >= 0: queue.skip(queue_len) else: queue.skip(queue_len + length) while True: data_event, self = (yield trans) if data_event is not None and data_event.data is not None: data = data_event.data data_len = len(data) if data_len > 0: stream_event = ION_STREAM_INCOMPLETE_EVENT length -= data_len if not skip: queue.extend(data) else: pos_adjustment = data_len if length < 0: pos_adjustment += length queue.extend(data[length:]) queue.position += pos_adjustment if length <= 0: yield Transition(None, whence) trans = Transition(stream_event, self)
Creates a co-routine for retrieving data up to a requested size. Args: length (int): The minimum length requested. whence (Coroutine): The co-routine to return to after the data is satisfied. ctx (_HandlerContext): The context for the read. skip (Optional[bool]): Whether the requested number of bytes should be skipped. stream_event (Optional[IonEvent]): The stream event to return if no bytes are read or available.
juraj-google-style
def start(self) -> None: self._server.start()
Starts this server. Raises: tf.errors.OpError: Or one of its subclasses if an error occurs while starting the server.
github-repos
def _get_type(points, soma_class): assert soma_class in (SOMA_CONTOUR, SOMA_CYLINDER) npoints = len(points) if soma_class == SOMA_CONTOUR: return {0: None, 1: SomaSinglePoint, 2: None}.get(npoints, SomaSimpleContour) if(npoints == 3 and points[0][COLS.P] == -1 and points[1][COLS.P] == 1 and points[2][COLS.P] == 1): L.warning('Using neuromorpho 3-Point soma') return SomaNeuromorphoThreePointCylinders return {0: None, 1: SomaSinglePoint}.get(npoints, SomaCylinders)
get the type of the soma Args: points: Soma points soma_class(str): one of 'contour' or 'cylinder' to specify the type
juraj-google-style
def find_stacks(node, strict=False): fso = FindStackOps() fso.visit(node) AnnotateStacks(fso.push_pop_pairs, strict).visit(node) return node
Find pushes and pops to the stack and annotate them as such. Args: node: An AST node that might contain stack pushes and pops. strict: A boolean indicating whether to stringently test whether each push and pop are matched. This is not always possible when taking higher-order derivatives of code generated in split-motion. Returns: node: The node passed in, but with pushes and pops annotated in AST nodes.
juraj-google-style
def Append(self, value=None, **kwarg): if (self.rdf_type is not None): if (isinstance(value, rdfvalue.RDFValue) and (value.__class__ != self.rdf_type)): raise ValueError(('Can only accept %s' % self.rdf_type)) try: value = self.rdf_type(value, **kwarg) except (TypeError, ValueError): raise ValueError(('Unable to initialize %s from type %s' % (self.__class__.__name__, type(value)))) self.content.Append(DataBlob().SetValue(value))
Add another member to the array. Args: value: The new data to append to the array. **kwarg: Create a new element from these keywords. Returns: The value which was added. This can be modified further by the caller and changes will be propagated here. Raises: ValueError: If the value to add is not allowed.
codesearchnet
def run_from_ufos(self, ufos, output=(), **kwargs): if set(output) == {"ufo"}: return ufo_paths = [] if isinstance(ufos, basestring): ufo_paths = glob.glob(ufos) ufos = [Font(x) for x in ufo_paths] elif isinstance(ufos, list): ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos] ufo_paths = [x.path for x in ufos] else: raise FontmakeError( "UFOs parameter is neither a defcon.Font object, a path or a glob, " "nor a list of any of these.", ufos, ) need_reload = False if "otf" in output: self.build_otfs(ufos, **kwargs) need_reload = True if "ttf" in output: if need_reload: ufos = [Font(path) for path in ufo_paths] self.build_ttfs(ufos, **kwargs) need_reload = True
Run toolchain from UFO sources. Args: ufos: List of UFO sources, as either paths or opened objects. output: List of output formats to generate. kwargs: Arguments passed along to save_otfs.
juraj-google-style
def summarize(values, epsilon): values = np.reshape(values, [-1]) values = np.sort(values) elements = np.size(values) num_buckets = 1.0 / epsilon increment = elements / num_buckets start = increment step = max(increment, 1) boundaries = values[int(start)::int(step)] weights = np.ones_like(boundaries) weights = weights * step return np.stack([boundaries, weights])
Reduce a 1D sequence of values to a summary. This algorithm is based on numpy.quantiles but modified to allow for intermediate steps between multiple data sets. It first finds the target number of bins as the reciprocal of epsilon and then takes the individual values spaced at appropriate intervals to arrive at that target. The final step is to return the corresponding counts between those values If the target num_bins is larger than the size of values, the whole array is returned (with weights of 1). Args: values: 1D `np.ndarray` to be summarized. epsilon: A `'float32'` that determines the approximate desired precision. Returns: A 2D `np.ndarray` that is a summary of the inputs. First column is the interpolated partition values, the second is the weights (counts).
github-repos
def pnum_to_group(mesh_shape, group_dims, pnum): coord = pnum_to_processor_coordinates(mesh_shape, pnum) remaining_shape = Shape( [d for i, d in enumerate(mesh_shape) if i not in group_dims]) remaining_coord = [d for i, d in enumerate(coord) if i not in group_dims] return processor_coordinates_to_pnum(remaining_shape, remaining_coord)
Group number for grouped allreduce. Args: mesh_shape: a Shape group_dims: a list of integers (the dimensions reduced over) pnum: an integer Returns: an integer
juraj-google-style
def closest(self, coords=[], **kwargs): if self.ndims > 1: raise NotImplementedError("Closest method currently only " "implemented for 1D Elements") if kwargs: if len(kwargs) > 1: raise NotImplementedError("Closest method currently only " "supports 1D indexes") samples = list(kwargs.values())[0] coords = samples if isinstance(samples, list) else [samples] xs = self.dimension_values(0) if xs.dtype.kind in 'SO': raise NotImplementedError("Closest only supported for numeric types") idxs = [np.argmin(np.abs(xs-coord)) for coord in coords] return [xs[idx] for idx in idxs]
Snaps coordinate(s) to closest coordinate in Dataset Args: coords: List of coordinates expressed as tuples **kwargs: Coordinates defined as keyword pairs Returns: List of tuples of the snapped coordinates Raises: NotImplementedError: Raised if snapping is not supported
juraj-google-style
def binary_cross_entropy_with_logits(input_, target, name=PROVIDED, loss_weight=None, per_example_weights=None, per_output_weights=None): if (target is None): raise ValueError('target must be set') target = _convert_and_assert_tensors_compatible(input_, target) with tf.name_scope('stats'): (selected, sum_retrieved, sum_relevant) = _compute_precision_recall(input_, target, 0, per_example_weights) precision = (selected / sum_retrieved) recall = (selected / sum_relevant) if precision.get_shape().is_fully_defined(): input_.bookkeeper.add_average_summary(precision, ('average_precision_%s' % name)) if recall.get_shape().is_fully_defined(): input_.bookkeeper.add_average_summary(recall, ('average_recall_%s' % name)) input_.bookkeeper.add_scalar_summary(tf.reduce_sum(tf.to_float(tf.greater(input_, 0))), 'activations') if (per_output_weights is not None): per_output_weights = tf.convert_to_tensor(per_output_weights, name='per_output_weights', dtype=input_.dtype.base_dtype) input_.get_shape().assert_is_compatible_with(per_output_weights.get_shape()) def _batch_sum_bce(x, target, name='binary_cross_entropy'): logits = functions.binary_cross_entropy_loss_with_logits(x, target, name=name) if (per_output_weights is not None): logits *= per_output_weights return functions.reduce_batch_sum(logits) return apply_regression(input_, _batch_sum_bce, target, [], name=('%s_bce_loss' % name), loss_weight=loss_weight, per_example_weights=per_example_weights)
Calculates the binary cross entropy of the input_ vs inputs. Expects unscaled logits. Do not pass in results of sigmoid operation. Args: input_: A rank 2 Tensor or a Pretty Tensor holding the logits. target: A rank 2 tf.float32 or tf.float64 tensor containing class label probabilities. Note that binary cross entropy is equivalent to logistic loss. name: The optional name. loss_weight: A scalar multiplier for the loss. per_example_weights: A `Tensor` with a weight per example. per_output_weights: A weight `Tensor` that is the same shape as the input_ that can be used to scale individual prediction losses. See `tf.tile` to turn a per-column weight vector into a `per_output_weights` `Tensor`. Returns: Binary cross entropy loss after sigmoid operation. Raises: ValueError: if target is None or the type is not float or double.
codesearchnet
def __init__(self, xid=None, multipart_type=None, flags=0, body=b''): super().__init__(xid) self.multipart_type = multipart_type self.flags = flags self.body = body
Create a MultipartRequest with the optional parameters below. Args: xid (int): xid to the header. multipart_type (int): One of the OFPMP_* constants. flags (int): OFPMPF_REQ_* flags. body (bytes): Body of the request.
juraj-google-style
def get_port(self, id_or_uri, port_id_or_uri): uri = self._client.build_subresource_uri(id_or_uri, port_id_or_uri, "ports") return self._client.get(uri)
Gets an interconnect port. Args: id_or_uri: Can be either the interconnect id or uri. port_id_or_uri: The interconnect port id or uri. Returns: dict: The interconnect port.
juraj-google-style
def parse_timers(self): filenames = list(filter(os.path.exists, [task.output_file.path for task in self])) parser = AbinitTimerParser() parser.parse(filenames) return parser
Parse the TIMER section reported in the ABINIT output files. Returns: :class:`AbinitTimerParser` object
codesearchnet
def authenticate(self, username, password, attribute=None, base_dn=None, search_filter=None, search_scope=SUBTREE): valid_dn = False try: parse_dn(username) valid_dn = True except LDAPInvalidDnError: pass if (valid_dn is False): user_filter = '({0}={1})'.format(attribute, username) if (search_filter is not None): user_filter = '(&{0}{1})'.format(user_filter, search_filter) try: self.connection.search(base_dn, user_filter, search_scope, attributes=[attribute]) response = self.connection.response username = response[0]['dn'] except (LDAPInvalidDnError, LDAPInvalidFilterError, IndexError): return False try: conn = self.connect(username, password) conn.unbind() return True except LDAPBindError: return False
Attempts to bind a user to the LDAP server. Args: username (str): DN or the username to attempt to bind with. password (str): The password of the username. attribute (str): The LDAP attribute for the username. base_dn (str): The LDAP basedn to search on. search_filter (str): LDAP searchfilter to attempt the user search with. Returns: bool: ``True`` if successful or ``False`` if the credentials are invalid.
codesearchnet
def GetFeedItemIdsForCampaign(campaign_feed): feed_item_ids = set() try: lhs_operand = campaign_feed['matchingFunction']['lhsOperand'] except KeyError: lhs_operand = None if (lhs_operand and lhs_operand[0]['FunctionArgumentOperand.Type'] == 'RequestContextOperand'): request_context_operand = lhs_operand[0] if (request_context_operand['contextType'] == 'FEED_ITEM_ID' and campaign_feed['matchingFunction']['operator'] == 'IN'): for argument in campaign_feed['matchingFunction']['rhsOperand']: if argument['xsi_type'] == 'ConstantOperand': feed_item_ids.add(argument['longValue']) return feed_item_ids
Gets the Feed Item Ids used by a campaign through a given Campaign Feed. Args: campaign_feed: the Campaign Feed we are retrieving Feed Item Ids from. Returns: A list of Feed Item IDs.
juraj-google-style
def _read_single(parser, filepath): from os import path global packages if path.isfile(filepath): parser.readfp(open(filepath))
Reads a single config file into the parser, silently failing if the file does not exist. Args: parser (ConfigParser): parser to read the file into. filepath (str): full path to the config file.
juraj-google-style
def retrieve_pwd_from_config(msg, cfg): msg_type = msg.__class__.__name__.lower() key_fmt = ((msg.profile + '_') + msg_type) pwd = cfg.pwd[key_fmt].split(' :: ') if (len(pwd) == 1): msg.auth = pwd[0] else: msg.auth = tuple(pwd)
Retrieve auth from profile configuration and set in msg.auth attr. Args: :msg: (Message class) an instance of a message class. :cfg: (jsonconfig.Config) config instance.
codesearchnet
def output(self, filename): info = 'Inheritance\n' if not self.contracts: return info += blue('Child_Contract -> ') + green('Immediate_Base_Contracts') info += green(' [Not_Immediate_Base_Contracts]') for child in self.contracts: info += blue(f'\n+ {child.name}') if child.inheritance: immediate = child.immediate_inheritance not_immediate = [i for i in child.inheritance if i not in immediate] info += ' -> ' + green(", ".join(map(str, immediate))) if not_immediate: info += ", ["+ green(", ".join(map(str, not_immediate))) + "]" info += green('\n\nBase_Contract -> ') + blue('Immediate_Child_Contracts') info += blue(' [Not_Immediate_Child_Contracts]') for base in self.contracts: info += green(f'\n+ {base.name}') children = list(self._get_child_contracts(base)) if children: immediate = [child for child in children if base in child.immediate_inheritance] not_immediate = [child for child in children if not child in immediate] info += ' -> ' + blue(", ".join(map(str, immediate))) if not_immediate: info += ', [' + blue(", ".join(map(str, not_immediate))) + ']' self.info(info)
Output the inheritance relation _filename is not used Args: _filename(string)
juraj-google-style
def _get_ami_file(region='us-east-1'): LOG.info("Getting AMI from Gitlab") lookup = FileLookup(git_short='devops/ansible') filename = 'scripts/{0}.json'.format(region) ami_contents = lookup.remote_file(filename=filename, branch='master') LOG.debug('AMI file contents in %s: %s', filename, ami_contents) return ami_contents
Get file from Gitlab. Args: region (str): AWS Region to find AMI ID. Returns: str: Contents in json format.
juraj-google-style
def search(self, query, results=10, suggestion=False): self._check_query(query, 'Query must be specified') search_params = {'list': 'search', 'srprop': '', 'srlimit': results, 'srsearch': query} if suggestion: search_params['srinfo'] = 'suggestion' raw_results = self.wiki_request(search_params) self._check_error_response(raw_results, query) search_results = [d['title'] for d in raw_results['query']['search']] if suggestion: sug = None if raw_results['query'].get('searchinfo'): sug = raw_results['query']['searchinfo']['suggestion'] return (search_results, sug) return search_results
Search for similar titles Args: query (str): Page title results (int): Number of pages to return suggestion (bool): Use suggestion Returns: tuple or list: tuple (list results, suggestion) if \ suggestion is **True**; list of results \ otherwise
codesearchnet
def disconnect(self, container, *args, **kwargs): if isinstance(container, Container): container = container.id return self.client.api.disconnect_container_from_network(container, self.id, *args, **kwargs)
Disconnect a container from this network. Args: container (str): Container to disconnect from this network, as either an ID, name, or :py:class:`~docker.models.containers.Container` object. force (bool): Force the container to disconnect from a network. Default: ``False`` Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def SetDefault(self, name, value): fl = self.FlagDict() if name not in fl: self._SetUnknownFlag(name, value) return if self.IsParsed(): logging.warn( 'FLAGS.SetDefault called on flag "%s" after flag parsing. Call this ' 'method at the top level of a module to avoid overwriting the value ' 'passed at the command line.', name) fl[name]._set_default(value) self._AssertValidators(fl[name].validators)
Changes the default value (and current value) of the named flag object. Call this method at the top level of a module to avoid overwriting the value passed at the command line. Args: name: A string, the name of the flag to modify. value: The new default value. Raises: UnrecognizedFlagError: When there is no registered flag named name. IllegalFlagValueError: When value is not valid.
juraj-google-style
def format_formula(formula): formatted_formula = "" number_format = "" for i, s in enumerate(formula): if s.isdigit(): if not number_format: number_format = "_{" number_format += s if i == len(formula) - 1: number_format += "}" formatted_formula += number_format else: if number_format: number_format += "}" formatted_formula += number_format number_format = "" formatted_formula += s return r"$%s$" % (formatted_formula)
Converts str of chemical formula into latex format for labelling purposes Args: formula (str): Chemical formula
juraj-google-style
def _serialize_normalized_array(array, fmt='png', quality=70): dtype = array.dtype assert np.issubdtype(dtype, np.unsignedinteger) assert np.max(array) <= np.iinfo(dtype).max assert array.shape[-1] > 1 image = PIL.Image.fromarray(array) image_bytes = BytesIO() image.save(image_bytes, fmt, quality=quality) image_data = image_bytes.getvalue() return image_data
Given a normalized array, returns byte representation of image encoding. Args: array: NumPy array of dtype uint8 and range 0 to 255 fmt: string describing desired file format, defaults to 'png' quality: specifies compression quality from 0 to 100 for lossy formats Returns: image data as BytesIO buffer
juraj-google-style
def validate(data): try: return Schema(Validator.SCHEMA).validate(data) except SchemaError as exception: logging.getLogger(__name__).error(exception) return None
Validate data against the schema. Args: data(dict): data structure to validate. Returns: dict: data as provided and defaults where defined in schema.
codesearchnet
def remove_token(self, *, payer_id, credit_card_token_id): payload = {'language': self.client.language.value, 'command': PaymentCommand.REMOVE_TOKEN.value, 'merchant': {'apiLogin': self.client.api_login, 'apiKey': self.client.api_key}, 'removeCreditCardToken': {'payerId': payer_id, 'creditCardTokenId': credit_card_token_id}, 'test': self.client.is_test} return self.client._post(self.url, json=payload)
This feature allows you to delete a tokenized credit card register. Args: payer_id: credit_card_token_id: Returns:
codesearchnet