code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def GetDefinitionByName(self, name): lookup_name = name.lower() if lookup_name not in self._definitions: lookup_name = self._aliases.get(name, None) return self._definitions.get(lookup_name, None)
Retrieves a specific data type definition by name. Args: name (str): name of the data type definition. Returns: DataTypeDefinition: data type definition or None if not available.
juraj-google-style
def grabEmails(emails=None, emailsFile=None, nicks=None, nicksFile=None, domains=EMAIL_DOMAINS, excludeDomains=[]): email_candidates = [] if emails != None: email_candidates = emails elif emailsFile != None: with open(emailsFile, "r") as iF: email_candidates = iF.read().splitlines() elif nicks != None: for n in nicks: for d in domains: if d not in excludeDomains: email_candidates.append(n+"@"+d) elif nicksFile != None: with open(nicksFile, "r") as iF: nicks = iF.read().splitlines() for n in nicks: for d in domains: if d not in excludeDomains: email_candidates.append(n+"@"+d) return email_candidates
Method that generates a list of emails. Args: ----- emails: Any premade list of emails. emailsFile: Filepath to the emails file (one per line). nicks: A list of aliases. nicksFile: Filepath to the aliases file (one per line). domains: Domains where the aliases will be tested. excludeDomains: Domains to be excluded from the created list. Returns: -------- list: the list of emails that will be verified.
juraj-google-style
def custom_object_save(obj: Any, folder: Union[str, os.PathLike], config: Optional[dict]=None) -> list[str]: if obj.__module__ == '__main__': logger.warning(f"We can't save the code defining {obj} in {folder} as it's been defined in __main__. You should put this code in a separate module so we can include it in the saved folder and make it easier to share via the Hub.") return def _set_auto_map_in_config(_config): module_name = obj.__class__.__module__ last_module = module_name.split('.')[-1] full_name = f'{last_module}.{obj.__class__.__name__}' if 'Tokenizer' in full_name: slow_tokenizer_class = None fast_tokenizer_class = None if obj.__class__.__name__.endswith('Fast'): fast_tokenizer_class = f'{last_module}.{obj.__class__.__name__}' if getattr(obj, 'slow_tokenizer_class', None) is not None: slow_tokenizer = getattr(obj, 'slow_tokenizer_class') slow_tok_module_name = slow_tokenizer.__module__ last_slow_tok_module = slow_tok_module_name.split('.')[-1] slow_tokenizer_class = f'{last_slow_tok_module}.{slow_tokenizer.__name__}' else: slow_tokenizer_class = f'{last_module}.{obj.__class__.__name__}' full_name = (slow_tokenizer_class, fast_tokenizer_class) if isinstance(_config, dict): auto_map = _config.get('auto_map', {}) auto_map[obj._auto_class] = full_name _config['auto_map'] = auto_map elif getattr(_config, 'auto_map', None) is not None: _config.auto_map[obj._auto_class] = full_name else: _config.auto_map = {obj._auto_class: full_name} if isinstance(config, (list, tuple)): for cfg in config: _set_auto_map_in_config(cfg) elif config is not None: _set_auto_map_in_config(config) result = [] object_file = sys.modules[obj.__module__].__file__ dest_file = Path(folder) / Path(object_file).name shutil.copy(object_file, dest_file) result.append(dest_file) for needed_file in get_relative_import_files(object_file): dest_file = Path(folder) / Path(needed_file).name shutil.copy(needed_file, dest_file) result.append(dest_file) return result
Save the modeling files corresponding to a custom model/configuration/tokenizer etc. in a given folder. Optionally adds the proper fields in a config. Args: obj (`Any`): The object for which to save the module files. folder (`str` or `os.PathLike`): The folder where to save. config (`PretrainedConfig` or dictionary, `optional`): A config in which to register the auto_map corresponding to this custom object. Returns: `List[str]`: The list of files saved.
github-repos
def gc_velocity_update(particle, social, state): gbest = state.swarm[gbest_idx(state.swarm)].position if not np.array_equal(gbest, particle.position): return std_velocity(particle, social, state) rho = state.params['rho'] inertia = state.params['inertia'] v_max = state.params['v_max'] size = particle.position.size r2 = state.rng.uniform(0.0, 1.0, size) velocity = __gc_velocity_equation__(inertia, rho, r2, particle, gbest) return __clamp__(velocity, v_max)
Guaranteed convergence velocity update. Args: particle: cipy.algorithms.pso.Particle: Particle to update the velocity for. social: cipy.algorithms.pso.Particle: The social best for the particle. state: cipy.algorithms.pso.State: The state of the PSO algorithm. Returns: numpy.ndarray: the calculated velocity.
juraj-google-style
def parseTree(self, root, state: ParseState) -> List[Dict]: if root.tag in self.AST_TAG_HANDLERS: return self.AST_TAG_HANDLERS[root.tag](root, state) elif root.tag in self.libRtns: return self.process_libRtn(root, state) else: prog = [] for node in root: prog += self.parseTree(node, state) return prog
Parses the XML ast tree recursively to generate a JSON AST which can be ingested by other scripts to generate Python scripts. Args: root: The current root of the tree. state: The current state of the tree defined by an object of the ParseState class. Returns: ast: A JSON ast that defines the structure of the Fortran file.
juraj-google-style
def ParseArguments(args): try: (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 'counting=', 'filter=', 'root=', 'repository=', 'linelength=', 'extensions=', 'exclude=', 'headers=', 'quiet', 'recursive']) except getopt.GetoptError: PrintUsage('Invalid arguments.') verbosity = _VerboseLevel() output_format = _OutputFormat() filters = '' counting_style = '' recursive = False for (opt, val) in opts: if (opt == '--help'): PrintUsage(None) elif (opt == '--output'): if (val not in ('emacs', 'vs7', 'eclipse', 'junit')): PrintUsage('The only allowed output formats are emacs, vs7, eclipse and junit.') output_format = val elif (opt == '--verbose'): verbosity = int(val) elif (opt == '--filter'): filters = val if (not filters): PrintCategories() elif (opt == '--counting'): if (val not in ('total', 'toplevel', 'detailed')): PrintUsage('Valid counting options are total, toplevel, and detailed') counting_style = val elif (opt == '--root'): global _root _root = val elif (opt == '--repository'): global _repository _repository = val elif (opt == '--linelength'): global _line_length try: _line_length = int(val) except ValueError: PrintUsage('Line length must be digits.') elif (opt == '--exclude'): global _excludes if (not _excludes): _excludes = set() _excludes.update(glob.glob(val)) elif (opt == '--extensions'): global _valid_extensions try: _valid_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') elif (opt == '--headers'): global _header_extensions try: _header_extensions = set(val.split(',')) except ValueError: PrintUsage('Extensions must be comma seperated list.') elif (opt == '--recursive'): recursive = True elif (opt == '--quiet'): global _quiet _quiet = True if (not filenames): PrintUsage('No files were specified.') if recursive: filenames = _ExpandDirectories(filenames) if _excludes: filenames = _FilterExcludedFiles(filenames) _SetOutputFormat(output_format) _SetVerboseLevel(verbosity) _SetFilters(filters) _SetCountingStyle(counting_style) return filenames
Parses the command line arguments. This may set the output format and verbosity level as side-effects. Args: args: The command line arguments: Returns: The list of filenames to lint.
codesearchnet
def return_item_count_on_page(self, page=1, total_items=1): up_to_page = ((page - 1) * self.page_items) if total_items > up_to_page: count = total_items - up_to_page if count >= self.page_items: return self.page_items else: return count
Return the number of items on page. Args: * page = The Page to test for * total_items = the total item count Returns: * Integer - Which represents the calculated number of items on page.
juraj-google-style
def setPANID(self, xPAN): print '%s call setPANID' % self.port print xPAN panid = '' try: if not isinstance(xPAN, str): panid = str(hex(xPAN)) print panid cmd = WPANCTL_CMD + 'setprop -s Network:PANID %s' % panid datasetCmd = WPANCTL_CMD + 'setprop Dataset:PanId %s' % panid self.hasActiveDatasetToCommit = True return self.__sendCommand(cmd)[0] != 'Fail' and self.__sendCommand(datasetCmd)[0] != 'Fail' except Exception, e: ModuleHelper.WriteIntoDebugLogger('setPANID() Error: ' + str(e))
set Thread Network PAN ID Args: xPAN: a given PAN ID in hex format Returns: True: successful to set the Thread Network PAN ID False: fail to set the Thread Network PAN ID
juraj-google-style
def _transition_instrumentation_block(self, instrumentation_block, new_state=_InstrumentationBlockStates.UNKNOWN): formatters = self._create_formatters(instrumentation_block, new_state) for formatter in formatters: test_record = formatter.create_test_record(self.TAG) if test_record: self.results.add_record(test_record) self.summary_writer.dump(test_record.to_dict(), records.TestSummaryEntryType.RECORD) return instrumentation_block.transition_state(new_state=new_state)
Transitions and finishes the current instrumentation block. Args: instrumentation_block: _InstrumentationBlock, the current instrumentation block to finish. new_state: _InstrumentationBlockState, the next state for the parser to transition to. Returns: The new instrumentation block to use for storing parsed instrumentation output.
github-repos
def store_to_file(self, filename): with tf.gfile.Open(filename, 'w') as f: for i in range(len(self._id_to_token)): f.write((self._id_to_token[i] + '\n'))
Write vocab file to disk. Vocab files have one token per line. The file ends in a newline. Reserved tokens are written to the vocab file as well. Args: filename: Full path of the file to store the vocab to.
codesearchnet
def broadcast_to_rank(self, rank): if self.rank is None: raise ValueError('Unable to broadcast: self.rank is unknown') dims_to_add = rank - self.rank if dims_to_add < 0: raise ValueError('Unable to broadcast: rank=%d must be greater than self.rank=%d.' % (rank, self.rank)) elif dims_to_add == 0: return self elif self._partitioned_dim_sizes: partitioned_dims = (1,) * dims_to_add + self._partitioned_dim_sizes return RaggedTensorDynamicShape(partitioned_dims, self.inner_dim_sizes, self.dim_size_dtype) else: inner_dims = array_ops.concat([array_ops.ones([dims_to_add], self.dim_size_dtype), self.inner_dim_sizes], axis=0) return RaggedTensorDynamicShape([], inner_dims, self.dim_size_dtype)
Adds leading size-1 dimensions to broadcast `self` to the given rank. E.g., if `shape1` is `[3, (D2), 4]`, then `shape1.broadcast_to_rank(5)` is `[1, 1, 3, (D2), 4]`. Args: rank: The rank for the returned shape. Returns: A RaggedTensorDynamicShape with `rank` dimensions, whose inner dimensions have the same size as `self` and whose outer dimensions have size `1`. Raises: ValueError: If `self.rank` is unknown or greater than `rank`.
github-repos
def modify_user_power_levels(self, users=None, users_default=None): try: content = self.client.api.get_power_levels(self.room_id) if users_default: content['users_default'] = users_default if users: if ('users' in content): content['users'].update(users) else: content['users'] = users for (user, power_level) in list(content['users'].items()): if (power_level is None): del content['users'][user] self.client.api.set_power_levels(self.room_id, content) return True except MatrixRequestError: return False
Modify the power level for a subset of users Args: users(dict): Power levels to assign to specific users, in the form {"@name0:host0": 10, "@name1:host1": 100, "@name3:host3", None} A level of None causes the user to revert to the default level as specified by users_default. users_default(int): Default power level for users in the room Returns: True if successful, False if not
codesearchnet
def _InsertNodeAt(new_node, target, after=False): if new_node.parent is not None: raise RuntimeError('inserting node which already has a parent', (new_node, new_node.parent)) parent_of_target = target.parent if parent_of_target is None: raise RuntimeError('expected target node to have a parent', (target,)) for i, child in enumerate(parent_of_target.children): if child is target: insertion_index = i + 1 if after else i parent_of_target.insert_child(insertion_index, new_node) return raise RuntimeError('unable to find insertion point for target node', (target,))
Underlying implementation for node insertion. Arguments: new_node: a new node to insert (this node should not be in the tree). target: the target node. after: if True, new_node is inserted after target. Otherwise, it's inserted before target. Returns: nothing Raises: RuntimeError: if the tree is corrupted, or the insertion would corrupt it.
github-repos
def _restore_resources(resources): resources = deepcopy(resources) for resource in resources: schema = resource['schema'] for fk in schema.get('foreignKeys', []): (_, name) = _restore_path(fk['reference']['resource']) fk['reference']['resource'] = name return resources
Restore schemas from being compatible with storage schemas. Foreign keys related operations. Args: list: resources from storage Returns: list: restored resources
codesearchnet
def _get_sorted_methods(self, methods): if (not methods): return methods def _sorted_methods_comparison(method_info1, method_info2): "Sort method info by path and http_method.\n\n Args:\n method_info1: Method name and info for the first method to compare.\n method_info2: Method name and info for the method to compare to.\n\n Returns:\n Negative if the first method should come first, positive if the\n first method should come after the second. Zero if they're\n equivalent.\n " def _score_path(path): "Calculate the score for this path, used for comparisons.\n\n Higher scores have priority, and if scores are equal, the path text\n is sorted alphabetically. Scores are based on the number and location\n of the constant parts of the path. The server has some special handling\n for variables with regexes, which we don't handle here.\n\n Args:\n path: The request path that we're calculating a score for.\n\n Returns:\n The score for the given path.\n " score = 0 parts = path.split('/') for part in parts: score <<= 1 if ((not part) or (part[0] != '{')): score += 1 score <<= (31 - len(parts)) return score path_score1 = _score_path(method_info1[1].get('path', '')) path_score2 = _score_path(method_info2[1].get('path', '')) if (path_score1 != path_score2): return (path_score2 - path_score1) path_result = cmp(method_info1[1].get('path', ''), method_info2[1].get('path', '')) if (path_result != 0): return path_result method_result = cmp(method_info1[1].get('httpMethod', ''), method_info2[1].get('httpMethod', '')) return method_result return sorted(methods.items(), _sorted_methods_comparison)
Get a copy of 'methods' sorted the way they would be on the live server. Args: methods: JSON configuration of an API's methods. Returns: The same configuration with the methods sorted based on what order they'll be checked by the server.
codesearchnet
def port_remove(br, port, if_exists=True): param_if_exists = _param_if_exists(if_exists) if port and not br: cmd = 'ovs-vsctl {1}del-port {0}'.format(port, param_if_exists) else: cmd = 'ovs-vsctl {2}del-port {0} {1}'.format(br, port, param_if_exists) result = __salt__['cmd.run_all'](cmd) retcode = result['retcode'] return _retcode_to_bool(retcode)
Deletes port. Args: br: A string - bridge name (If bridge is None, port is removed from whatever bridge contains it) port: A string - port name. if_exists: Bool, if False - attempting to delete a por that does not exist returns False. (Default True) Returns: True on success, else False. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' openvswitch.port_remove br0 8080
juraj-google-style
def file_path(path=None, payload=None, objectInput=None): f = path if path else write_payload(payload, objectInput) if not os.path.exists(f): msg = "File {!r} does not exist".format(f) log.exception(msg) raise TikaAppFilePathError(msg) return f
Given a file path, payload or file object, it writes file on disk and returns the temp path. Args: path (string): path of real file payload(string): payload in base64 of file objectInput (object): file object/standard input to analyze Returns: Path of file
juraj-google-style
def load(path): with open(path, 'r') as fobj: analytics = Analytics(info=json.load(fobj)) os.unlink(path) return analytics
Loads analytics report from json file specified by path. Args: path (str): path to json file with analytics report.
codesearchnet
def _compute_version_info(): ray_version = ray.__version__ python_version = '.'.join(map(str, sys.version_info[:3])) pyarrow_version = pyarrow.__version__ return (ray_version, python_version, pyarrow_version)
Compute the versions of Python, pyarrow, and Ray. Returns: A tuple containing the version information.
codesearchnet
def create_position_ids_from_input_ids(self, input_ids): mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype) incremental_indices = tf.math.cumsum(mask, axis=1) * mask return incremental_indices + self.padding_idx
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: input_ids: tf.Tensor Returns: tf.Tensor
github-repos
def _parse_field_value(self, field: descriptor.FieldDescriptor, json_value: Any) -> message.Message: if field.type != descriptor.FieldDescriptor.TYPE_MESSAGE: raise ValueError(f'Error in FHIR proto definition, field: {field.full_name} is not a message.') if field.message_type.full_name == any_pb2.Any.DESCRIPTOR.full_name: contained = self.primitive_handler.new_contained_resource() self._merge_contained_resource(json_value, contained) any_message = any_pb2.Any() any_message.Pack(contained) return any_message else: target = proto_utils.create_message_from_descriptor(field.message_type) self.merge_value(json_value, target) return target
Returns a new Message described by the FieldDescriptor and json_value. Args: field: The FieldDescriptor of the Message instance to create. json_value: The JSON value representation to merge into the newly created Message. Returns: A new Message as described by the provided FieldDescriptor merged with the contents of json_value.
github-repos
def center_crop(self, image: 'torch.Tensor', crop_size: dict[str, int], size: dict[str, int], **kwargs) -> 'torch.Tensor': if size.height is None or size.width is None: raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}") height, width = image.shape[-2:] min_dim = min(height, width) cropped_height = int(size.height / crop_size.height * min_dim) cropped_width = int(size.width / crop_size.width * min_dim) return F.center_crop(image, (cropped_height, cropped_width))
Center crop an image to `(size["height"] / crop_size["height"] * min_dim, size["width"] / crop_size["width"] * min_dim)`. Where `min_dim = min(size["height"], size["width"])`. If the input size is smaller than `crop_size` along any edge, the image will be padded with zeros and then center cropped. Args: image (`"torch.Tensor"`): Image to center crop. crop_size (`Dict[str, int]`): Desired output size after applying the center crop. size (`Dict[str, int]`): Size of the output image. Returns: `torch.Tensor`: The center cropped image.
github-repos
async def remember(self, request, user_id): ticket = self._new_ticket(request, user_id) await self.remember_ticket(request, ticket)
Called to store the userid for a request. This function creates a ticket from the request and user_id, and calls the abstract function remember_ticket() to store the ticket. Args: request: aiohttp Request object. user_id: String representing the user_id to remember
juraj-google-style
def read_handler(Model, name=None, **kwds): async def action_handler(service, action_type, payload, props, **kwds): if (action_type == get_crud_action('read', (name or Model))): message_props = {} if ('correlation_id' in props): message_props['correlation_id'] = props['correlation_id'] try: resolved = service.schema.execute(payload) response = json.dumps({'data': {key: value for (key, value) in resolved.data.items()}, 'errors': resolved.errors}) (await service.event_broker.send(payload=response, action_type=change_action_status(action_type, success_status()), **message_props)) except Exception as err: (await service.event_broker.send(payload=str(err), action_type=change_action_status(action_type, error_status()), **message_props)) return action_handler
This factory returns an action handler that responds to read requests by resolving the payload as a graphql query against the internal schema. Args: Model (nautilus.BaseModel): The model to delete when the action received. Returns: function(type, payload): The action handler for this model
codesearchnet
def set_contrast(self, contrast): self._contrast = contrast self.x_spread = (2 * (1.0 - contrast)) self.y_spread = (2.0 - (2 * (1.0 - contrast))) self._build_cdict()
Adjusts the image contrast. Contrast refers to the rate of change of color with color level. At low contrast, color changes gradually over many intensity levels, while at high contrast it can change rapidly within a few levels Args: contrast: float A number between 0 and 1. Note that upon initialization the colormap has a default contrast value of 0.5. Returns: void
codesearchnet
def all_reduce(self, input_tensor: core.TensorLike, control_input: Optional[Union[core.TensorLike, ops.Operation]]=None, options: Optional[collective_util.Options]=None) -> core.Tensor: instance_key = self._next_instance_key() options = self._options.merge(options) ordering_token = self._get_ordering_token() with ops.device(self._device), self._control_input(control_input): return collective_ops.all_reduce_v2(input_tensor, self._group_size, self._group_key, instance_key, communication_hint=options.implementation.value, timeout=options.timeout_seconds, ordering_token=ordering_token)
All-reduce a dense tensor. Args: input_tensor: a dense tensor. It must have the same shape on all replicas. control_input: if not None, add control edges between control_input and the all-reduce. options: an optional tf.distribute.experimental.CommunicationOptions. If provided, it overrides the default options. Returns: The reduced tensor.
github-repos
def _PromptUserForEncryptedVolumeCredential( self, scan_context, locked_scan_node, output_writer): credentials = credentials_manager.CredentialsManager.GetCredentials( locked_scan_node.path_spec) if locked_scan_node.type_indicator == ( definitions.TYPE_INDICATOR_APFS_CONTAINER): line = 'Found an APFS encrypted volume.' elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_BDE: line = 'Found a BitLocker encrypted volume.' elif locked_scan_node.type_indicator == definitions.TYPE_INDICATOR_FVDE: line = 'Found a CoreStorage (FVDE) encrypted volume.' else: line = 'Found an encrypted volume.' output_writer.WriteLine(line) credentials_list = list(credentials.CREDENTIALS) credentials_list.append('skip') output_writer.WriteLine('Supported credentials:') output_writer.WriteLine('') for index, name in enumerate(credentials_list): output_writer.WriteLine(' {0:d}. {1:s}'.format(index + 1, name)) output_writer.WriteLine('') result = False while not result: output_writer.WriteString( 'Select a credential to unlock the volume: ') input_line = sys.stdin.readline() input_line = input_line.strip() if input_line in credentials_list: credential_identifier = input_line else: try: credential_identifier = int(input_line, 10) credential_identifier = credentials_list[credential_identifier - 1] except (IndexError, ValueError): output_writer.WriteLine( 'Unsupported credential: {0:s}'.format(input_line)) continue if credential_identifier == 'skip': break getpass_string = 'Enter credential data: ' if sys.platform.startswith('win') and sys.version_info[0] < 3: getpass_string = self._EncodeString(getpass_string) credential_data = getpass.getpass(getpass_string) output_writer.WriteLine('') result = self._source_scanner.Unlock( scan_context, locked_scan_node.path_spec, credential_identifier, credential_data) if not result: output_writer.WriteLine('Unable to unlock volume.') output_writer.WriteLine('')
Prompts the user to provide a credential for an encrypted volume. Args: scan_context (SourceScannerContext): the source scanner context. locked_scan_node (SourceScanNode): the locked scan node. output_writer (StdoutWriter): the output writer.
juraj-google-style
def __init__(self, num_embeddings, num_additional_embeddings, embedding_dim, partially_freeze: Optional[bool]=False, dtype=None, **kwargs) -> None: super().__init__(input_dim=num_embeddings, output_dim=embedding_dim, dtype=dtype, **kwargs) self.num_embeddings = num_embeddings self.num_additional_embeddings = num_additional_embeddings self.partially_freeze = partially_freeze if partially_freeze: self.trainable = False if self.num_additional_embeddings > 0: self.additional_embedding = tf.keras.layers.Embedding(input_dim=self.num_additional_embeddings, output_dim=embedding_dim, dtype=dtype, name='additional_embedding')
Args: num_embeddings (`int`): Size of the dictionary of embeddings num_additional_embeddings (`int`): Number of additional embeddings. Only useful when you `partially_freeze=True`. embedding_dim (`int`): The size of each embedding vector partially_freeze: (`bool`, *optional*, defaults to `False`): If `True`, the regular `weight` will be frozen. `additional_weight` is never frozen. Note: there are a lot of other parameters to initialize a standard `tf.keras.layers.Embedding` such as `mask_zero`, `input_length` or `embeddings_initializer`. We are not supporting these.
github-repos
def maybe_do_strip(node: node_def_pb2.NodeDef) -> None: if node.op == 'Assert' or node.op == 'PrintV2': node.op = 'NoOp' erase_regular_node_attributes(node) new_inputs = [] for inp in node.input: if not is_control_input(inp): new_inputs.append(as_control_dep(inp)) else: new_inputs.append(inp) node.ClearField('input') node.input.extend(new_inputs) elif node.op == 'CheckNumerics' or node.op == 'Print': node.op = 'Identity' prune_all_non_t_attributes(node) for i in range(1, len(node.input)): if not is_control_input(node.input[i]): node.input[i] = as_control_dep(node.input[i])
Strips the graph from Assert and CheckNumerics ops. For Assert ops, this function also rewrites all of the inputs to the nodes that were transformed by making them into control dependencies. It also removes all of the regular node attributes, that is all node attributes that do not start with `_`. For CheckNumerics ops, this function turns the op into an Identity op, which will be pruned later (according to the original implementation in grappler's `debug_stripper.cc`. Then, since Identity ops only take one input, it leaves the first input as is while transforming the other ones into control dependencies. Args: node: The node to potentally strip.
github-repos
def address_to_ip(address): address_parts = address.split(':') ip_address = socket.gethostbyname(address_parts[0]) if (ip_address == '127.0.0.1'): ip_address = get_node_ip_address() return ':'.join(([ip_address] + address_parts[1:]))
Convert a hostname to a numerical IP addresses in an address. This should be a no-op if address already contains an actual numerical IP address. Args: address: This can be either a string containing a hostname (or an IP address) and a port or it can be just an IP address. Returns: The same address but with the hostname replaced by a numerical IP address.
codesearchnet
def args_to_dict(args): arguments = dict() for arg in args.split(','): key, value = arg.split('=') arguments[key] = value return arguments
Convert command line arguments in a comma separated string to a dictionary Args: args (str): Command line arguments Returns: DictUpperBound[str,str]: Dictionary of arguments
juraj-google-style
def is_valid_embedding(emb, source, target): for _ in diagnose_embedding(emb, source, target): return False return True
A simple (bool) diagnostic for minor embeddings. See :func:`diagnose_embedding` for a more detailed diagnostic / more information. Args: emb (dict): a dictionary mapping source nodes to arrays of target nodes source (graph or edgelist): the graph to be embedded target (graph or edgelist): the graph being embedded into Returns: bool: True if `emb` is valid.
juraj-google-style
def __init__(self, kwargs): PanCloudError.__init__( self, "{}".format(", ".join(kwargs.keys())) )
Convert kwargs to CSV string. Args: kwargs (dict): Key-word arguments.
juraj-google-style
def _get_fbeta_score(true_positives, selected, relevant, beta=1): precision = 1 if selected > 0: precision = true_positives / selected if beta == 0: return precision recall = 1 if relevant > 0: recall = true_positives / relevant if precision > 0 and recall > 0: beta2 = beta * beta return (1 + beta2) * precision * recall / (beta2 * precision + recall) else: return 0
Compute Fbeta score. Args: true_positives: Number of true positive ngrams. selected: Number of selected ngrams. relevant: Number of relevant ngrams. beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only. Returns: Fbeta score.
juraj-google-style
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0): if (kmip_version < enums.KMIPVersion.KMIP_2_0): raise exceptions.VersionNotSupported('KMIP {} does not support the ObjectDefaults object.'.format(kmip_version.value)) super(ObjectDefaults, self).read(input_buffer, kmip_version=kmip_version) local_buffer = utils.BytearrayStream(input_buffer.read(self.length)) if self.is_tag_next(enums.Tags.OBJECT_TYPE, local_buffer): self._object_type = primitives.Enumeration(enums.ObjectType, tag=enums.Tags.OBJECT_TYPE) self._object_type.read(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidKmipEncoding('The ObjectDefaults encoding is missing the object type enumeration.') if self.is_tag_next(enums.Tags.ATTRIBUTES, local_buffer): self._attributes = Attributes() self._attributes.read(local_buffer, kmip_version=kmip_version) else: raise exceptions.InvalidKmipEncoding('The ObjectDefaults encoding is missing the attributes structure.') self.is_oversized(local_buffer)
Read the data encoding the ObjectDefaults structure and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 2.0. Raises: InvalidKmipEncoding: Raised if the object type or attributes are missing from the encoding. VersionNotSupported: Raised when a KMIP version is provided that does not support the ObjectDefaults structure.
codesearchnet
def _GetLineNumberPrefix(self, error, show_number=True): prefix_with_number = ' {} | '.format(error.position.line) if show_number: return prefix_with_number return re.sub('\\d', ' ', prefix_with_number)
Returns a prefix to annotate a line with a line number. Args: error: The ErrorInfo to get the line number from. show_number: Whether to show or hide the number (hiding the number is useful to get a prefix with the same width and formatting).
github-repos
def describe_field(field_definition): field_descriptor = FieldDescriptor() field_descriptor.name = field_definition.name field_descriptor.number = field_definition.number field_descriptor.variant = field_definition.variant if isinstance(field_definition, messages.EnumField): field_descriptor.type_name = field_definition.type.definition_name() if isinstance(field_definition, messages.MessageField): field_descriptor.type_name = field_definition.message_type.definition_name() if (field_definition.default is not None): field_descriptor.default_value = _DEFAULT_TO_STRING_MAP[type(field_definition)](field_definition.default) if field_definition.repeated: field_descriptor.label = FieldDescriptor.Label.REPEATED elif field_definition.required: field_descriptor.label = FieldDescriptor.Label.REQUIRED else: field_descriptor.label = FieldDescriptor.Label.OPTIONAL return field_descriptor
Build descriptor for Field instance. Args: field_definition: Field instance to provide descriptor for. Returns: Initialized FieldDescriptor instance describing the Field instance.
codesearchnet
def _ParseMRUListValue(self, registry_key): mrulist_value = registry_key.GetValueByName('MRUList') if (not mrulist_value): return None mrulist_entries_map = self._GetDataTypeMap('mrulist_entries') context = dtfabric_data_maps.DataTypeMapContext(values={'data_size': len(mrulist_value.data)}) return self._ReadStructureFromByteStream(mrulist_value.data, 0, mrulist_entries_map, context=context)
Parses the MRUList value in a given Registry key. Args: registry_key (dfwinreg.WinRegistryKey): Windows Registry key that contains the MRUList value. Returns: mrulist_entries: MRUList entries or None if not available.
codesearchnet
def _Execute(statements, context, callback, trace): if trace: trace.exec_depth += 1 for (i, statement) in enumerate(statements): if isinstance(statement, six.string_types): callback(statement) else: try: (func, args) = statement func(args, context, callback, trace) except UndefinedVariable as e: start = max(0, (i - 3)) end = (i + 3) e.near = statements[start:end] e.trace = trace raise
Execute a bunch of template statements in a ScopedContext. Args: callback: Strings are "written" to this callback function. trace: Trace object, or None This is called in a mutually recursive fashion.
codesearchnet
def _AddParentDirectories(self, path): path_segments = self.file_system.SplitPath(path) for segment_index in range(len(path_segments)): parent_path = self.file_system.JoinPath(path_segments[:segment_index]) file_entry = self.file_system.GetFileEntryByPath(parent_path) if file_entry and not file_entry.IsDirectory(): raise ValueError( 'Non-directory parent file entry: {0:s} already exists.'.format( parent_path)) for segment_index in range(len(path_segments)): parent_path = self.file_system.JoinPath(path_segments[:segment_index]) if not self.file_system.FileEntryExistsByPath(parent_path): self.file_system.AddFileEntry( parent_path, file_entry_type=definitions.FILE_ENTRY_TYPE_DIRECTORY)
Adds the parent directories of a path to the fake file system. Args: path (str): path of the file within the fake file system. Raises: ValueError: if a parent directory is already set and is not a directory.
juraj-google-style
def _seed2key(a): def int32s_to_int64(a): a = math_ops.bitwise_or(math_ops.cast(a[0], dtypes.uint64), math_ops.left_shift(math_ops.cast(a[1], dtypes.uint64), constant_op.constant(32, dtypes.uint64))) a = math_ops.cast(a, dtypes.int64) return a return tf_np.asarray(int32s_to_int64(a))
Converts an RNG seed to an RNG key. Args: a: an RNG seed, a tensor of shape [2] and dtype `tf.int32`. Returns: an RNG key, an ndarray of shape [] and dtype `np.int64`.
github-repos
def run_board(args): init_config(args) from backend.collector import CollectorService service = CollectorService( args.logdir, args.reload_interval, standalone=False, log_level=args.log_level) service.run() logger.info("Try to start automlboard on port %s\n" % args.port) command = [ os.path.join(root_path, "manage.py"), "runserver", "0.0.0.0:%s" % args.port, "--noreload" ] execute_from_command_line(command)
Run main entry for AutoMLBoard. Args: args: args parsed from command line
juraj-google-style
def _tokenize_field_path(path): pos = 0 get_token = TOKENS_REGEX.match match = get_token(path) while match is not None: type_ = match.lastgroup value = match.group(type_) yield value pos = match.end() match = get_token(path, pos) if pos != len(path): raise ValueError("Path {} not consumed, residue: {}".format(path, path[pos:]))
Lex a field path into tokens (including dots). Args: path (str): field path to be lexed. Returns: List(str): tokens
juraj-google-style
def __init__(self, shape, dtype, minimum, maximum, name=None): super(BoundedTensorSpec, self).__init__(shape, dtype, name) if minimum is None: raise ValueError('`minimum` can not be None.') if maximum is None: raise ValueError('`maximum` can not be None.') try: minimum_shape = np.shape(minimum) common_shapes.broadcast_shape(tensor_shape.TensorShape(minimum_shape), self.shape) except ValueError as exception: raise ValueError(f'`minimum` {minimum} is not compatible with shape {self.shape}.') from exception try: maximum_shape = np.shape(maximum) common_shapes.broadcast_shape(tensor_shape.TensorShape(maximum_shape), self.shape) except ValueError as exception: raise ValueError(f'`maximum` {maximum} is not compatible with shape {self.shape}.') from exception self._minimum = np.array(minimum, dtype=self.dtype.as_numpy_dtype) self._minimum.setflags(write=False) self._maximum = np.array(maximum, dtype=self.dtype.as_numpy_dtype) self._maximum.setflags(write=False)
Initializes a new `BoundedTensorSpec`. Args: shape: Value convertible to `tf.TensorShape`. The shape of the tensor. dtype: Value convertible to `tf.DType`. The type of the tensor values. minimum: Number or sequence specifying the minimum element bounds (inclusive). Must be broadcastable to `shape`. maximum: Number or sequence specifying the maximum element bounds (inclusive). Must be broadcastable to `shape`. name: Optional string containing a semantic name for the corresponding array. Defaults to `None`. Raises: ValueError: If `minimum` or `maximum` are not provided or not broadcastable to `shape`. TypeError: If the shape is not an iterable or if the `dtype` is an invalid numpy dtype.
github-repos
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]: pass
Gets a query from the data source, which contains a request for multiple objects. Args: query: The query being requested (contains a request for multiple objects). context: The context for the extraction (mutable). Returns: The requested objects.
juraj-google-style
def Register(self, a, b, migrated_entity): if (a is not None): self.a_merge_map[a] = migrated_entity a._migrated_entity = migrated_entity if (b is not None): self.b_merge_map[b] = migrated_entity b._migrated_entity = migrated_entity
Registers a merge mapping. If a and b are both not None, this means that entities a and b were merged to produce migrated_entity. If one of a or b are not None, then it means it was not merged but simply migrated. The effect of a call to register is to update a_merge_map and b_merge_map according to the merge. Also the private attributes _migrated_entity of a and b are set to migrated_entity. Args: a: The entity from the old feed or None. b: The entity from the new feed or None. migrated_entity: The migrated entity.
codesearchnet
def save_summaries_secs(self): return self._save_summaries_secs
Return the delay between summary computations. Returns: A timestamp.
github-repos
def maybe_get_static_value(x, dtype=None): if (x is None): return x try: x_ = tf.get_static_value(x) except TypeError: x_ = x if ((x_ is None) or (dtype is None)): return x_ return np.array(x_, dtype)
Helper which tries to return a static value. Given `x`, extract it's value statically, optionally casting to a specific dtype. If this is not possible, None is returned. Args: x: `Tensor` for which to extract a value statically. dtype: Optional dtype to cast to. Returns: Statically inferred value if possible, otherwise None.
codesearchnet
def keywords(self): path = self._get_id_path('keywords') response = self._GET(path) self._set_attrs_to_values(response) return response
Get the plot keywords for a specific movie id. Returns: A dict representation of the JSON returned from the API.
codesearchnet
def get(self, key, **ctx_options): options = _make_ctx_options(ctx_options) use_cache = self._use_cache(key, options) if use_cache: self._load_from_cache_if_available(key) use_datastore = self._use_datastore(key, options) if (use_datastore and isinstance(self._conn, datastore_rpc.TransactionalConnection)): use_memcache = False else: use_memcache = self._use_memcache(key, options) ns = key.namespace() memcache_deadline = None if use_memcache: mkey = self._memcache_prefix + key.urlsafe() memcache_deadline = self._get_memcache_deadline(options) mvalue = yield self.memcache_get(mkey, for_cas=use_datastore, namespace=ns, use_cache=True, deadline=memcache_deadline) if use_cache: self._load_from_cache_if_available(key) if mvalue not in (_LOCKED, None): cls = model.Model._lookup_model(key.kind(), self._conn.adapter.default_model) pb = entity_pb.EntityProto() try: pb.MergePartialFromString(mvalue) except ProtocolBuffer.ProtocolBufferDecodeError: logging.warning('Corrupt memcache entry found ' 'with key %s and namespace %s' % (mkey, ns)) mvalue = None else: entity = cls._from_pb(pb) entity._key = key if use_cache: self._cache[key] = entity raise tasklets.Return(entity) if mvalue is None and use_datastore: yield self.memcache_set(mkey, _LOCKED, time=_LOCK_TIME, namespace=ns, use_cache=True, deadline=memcache_deadline) yield self.memcache_gets(mkey, namespace=ns, use_cache=True, deadline=memcache_deadline) if not use_datastore: raise tasklets.Return(None) if use_cache: entity = yield self._get_batcher.add_once(key, options) else: entity = yield self._get_batcher.add(key, options) if entity is not None: if use_memcache and mvalue != _LOCKED: pbs = entity._to_pb(set_key=False).SerializePartialToString() if len(pbs) <= memcache.MAX_VALUE_SIZE: timeout = self._get_memcache_timeout(key, options) yield self.memcache_cas(mkey, pbs, time=timeout, namespace=ns, deadline=memcache_deadline) if use_cache: self._cache[key] = entity raise tasklets.Return(entity)
Return a Model instance given the entity key. It will use the context cache if the cache policy for the given key is enabled. Args: key: Key instance. **ctx_options: Context options. Returns: A Model instance if the key exists in the datastore; None otherwise.
juraj-google-style
def allreduce_grads(all_grads, average): if (get_tf_version_tuple() <= (1, 12)): from tensorflow.contrib import nccl else: from tensorflow.python.ops import nccl_ops as nccl nr_tower = len(all_grads) if (nr_tower == 1): return all_grads new_all_grads = [] for grads in zip(*all_grads): summed = nccl.all_sum(grads) grads_for_devices = [] for g in summed: with tf.device(g.device): if average: g = tf.multiply(g, (1.0 / nr_tower)) grads_for_devices.append(g) new_all_grads.append(grads_for_devices) ret = list(zip(*new_all_grads)) return ret
All-reduce average the gradients among K devices. Results are broadcasted to all devices. Args: all_grads (K x N): List of list of gradients. N is the number of variables. average (bool): average gradients or not. Returns: K x N: same as input, but each grad is replaced by the average over K devices.
codesearchnet
def tensor_dim_to_mesh_dim_size(layout, mesh_shape, tensor_dim): layout_rules = convert_to_layout_rules(layout) mesh_shape = convert_to_shape(mesh_shape) mesh_axis = layout_rules.tensor_dimension_to_mesh_axis(tensor_dim, mesh_shape) if mesh_axis is None: return 1 else: return mesh_shape.dims[mesh_axis].size
How many ways does a tensor dimension get split. This is used to "cheat" when building the mtf graph and peek at how a tensor dimension will be split. Returns 1 if the tensor dimension is not split. Args: layout: an input to convert_to_layout_rules mesh_shape: an input to convert_to_shape tensor_dim: a Dimension Returns: an integer
juraj-google-style
def _MakeSavedModelV1(self, run_params): saved_model_dir = trt_test.TfTrtIntegrationTestBase._MakeSavedModelV1(self, run_params) saved_model_proto = loader_impl.parse_saved_model(saved_model_dir) new_saved_model = saved_model_pb2.SavedModel() new_saved_model.CopyFrom(saved_model_proto) new_meta_graph_def = new_saved_model.meta_graphs[0] for func_def in new_meta_graph_def.graph_def.library.function: func_def.attr['_noinline'].CopyFrom(attr_value_pb2.AttrValue(b=True)) self._copy_test_attributes_to_func_def(func_def) old_saved_model_file = os.path.join(saved_model_dir, constants.SAVED_MODEL_FILENAME_PB) if os.path.exists(old_saved_model_file): os.remove(old_saved_model_file) path = os.path.join(compat.as_bytes(saved_model_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB)) file_io.write_string_to_file(path, new_saved_model.SerializeToString(deterministic=True)) return saved_model_dir
Write the saved model as an input for testing. In addition to creating a SavedModel like its parent method, this method replaces this SavedModel by adding TF-TRT conversion parameters as function attributes to each function in the SavedModel. Args: run_params: The current test run parameters. Returns: The directory of the saved model.
github-repos
def _CreateQueryAccessHelper(self): h = CheckAccessHelper('query') h.Allow('aff4:/users/*', self._IsHomeDir) h.Allow('aff4:/cron') h.Allow('aff4:/cron/*') h.Allow('aff4:/hunts') h.Allow('aff4:/hunts/*') h.Allow('aff4:/ACL') h.Allow('aff4:/ACL/*') h.Allow(self.CLIENT_URN_PATTERN) h.Allow((self.CLIENT_URN_PATTERN + '/*')) h.Allow('aff4:/index') h.Allow('aff4:/index/*') h.Allow('aff4:/config') h.Allow('aff4:/config/*') h.Allow('aff4:/flows/*') h.Allow(('aff4:/files/hash/generic/sha256/' + ('[a-z0-9]' * 64))) h.Allow('aff4:/artifact_store') h.Allow('aff4:/artifact_store/*') h.Allow('aff4:/artifact_store') h.Allow('aff4:/artifact_store/*') h.Allow('aff4:/audit/logs') h.Allow('aff4:/audit/logs/*') return h
Creates a CheckAccessHelper for controlling query access. This function and _CreateReadAccessHelper essentially define GRR's ACL policy. Please refer to these 2 functions to either review or modify GRR's ACLs. Query access gives you the ability to find objects in the tree without knowing their URN, using ListChildren. If you grant query access, you will also need read access. Returns: CheckAccessHelper for controlling query access.
codesearchnet
def _parse_ports(port_values: dict) -> dict: endpoints = {} for port_element in port_values: target_port = port_element.split(':') for port in target_port: endpoints[int(port)] = int(port) endpoint_spec = docker.types.EndpointSpec(ports=endpoints) return endpoint_spec
Parse ports key. Args: port_values (dict): ports configuration values Returns: dict, Ports specification which contains exposed ports
codesearchnet
class FuzzedExponentialIntervals(object): def __init__(self, initial_delay_secs, num_retries, factor=2, fuzz=0.5, max_delay_secs=60 * 60 * 1, stop_after_secs=None): self._initial_delay_secs = initial_delay_secs if num_retries > 10000: raise ValueError('num_retries parameter cannot exceed 10000.') self._num_retries = num_retries self._factor = factor if not 0 <= fuzz <= 1: raise ValueError('fuzz parameter expected to be in [0, 1] range.') self._fuzz = fuzz self._max_delay_secs = max_delay_secs self._stop_after_secs = stop_after_secs def __iter__(self): current_delay_secs = min(self._max_delay_secs, self._initial_delay_secs) total_delay_secs = 0 for _ in range(self._num_retries): fuzz_multiplier = 1 - self._fuzz + random.random() * self._fuzz delay_secs = current_delay_secs * fuzz_multiplier total_delay_secs += delay_secs if self._stop_after_secs is not None and total_delay_secs > self._stop_after_secs: break yield delay_secs current_delay_secs = min(self._max_delay_secs, current_delay_secs * self._factor)
Iterable for intervals that are exponentially spaced, with fuzzing. On iteration, yields retry interval lengths, in seconds. Every iteration over this iterable will yield differently fuzzed interval lengths, as long as fuzz is nonzero. Args: initial_delay_secs: The delay before the first retry, in seconds. num_retries: The total number of times to retry. factor: The exponential factor to use on subsequent retries. Default is 2 (doubling). fuzz: A value between 0 and 1, indicating the fraction of fuzz. For a given delay d, the fuzzed delay is randomly chosen between [(1 - fuzz) * d, d]. max_delay_secs: Maximum delay (in seconds). After this limit is reached, further tries use max_delay_sec instead of exponentially increasing the time. Defaults to 1 hour. stop_after_secs: Places a limit on the sum of intervals returned (in seconds), such that the sum is <= stop_after_secs. Defaults to disabled (None). You may need to increase num_retries to effectively use this feature.
github-repos
def text_pb(tag, data, description=None): try: tensor = tensor_util.make_tensor_proto(data, dtype=np.object) except TypeError as e: raise TypeError('tensor must be of type string', e) summary_metadata = metadata.create_summary_metadata(display_name=None, description=description) summary = summary_pb2.Summary() summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor) return summary
Create a text tf.Summary protobuf. Arguments: tag: String tag for the summary. data: A Python bytestring (of type bytes), a Unicode string, or a numpy data array of those types. description: Optional long-form description for this summary, as a `str`. Markdown is supported. Defaults to empty. Raises: TypeError: If the type of the data is unsupported. Returns: A `tf.Summary` protobuf object.
codesearchnet
def summarize(self, highlight=None): lines = [RL('Command-line configuration:', 'bold'), RL('')] for name, val in self._config.items(): highlight_attr = 'bold' if name == highlight else None line = RL(' ') line += RL(name, ['underline', highlight_attr]) line += RL(': ') line += RL(str(val), font_attr=highlight_attr) lines.append(line) return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
Get a text summary of the config. Args: highlight: A property name to highlight in the output. Returns: A `RichTextLines` output.
github-repos
class MeanMetricWrapper(Mean): def __init__(self, fn, name=None, dtype=None, **kwargs): super().__init__(name=name, dtype=dtype) self._fn = fn self._fn_kwargs = kwargs if self._fn in losses.ALL_OBJECTS or (hasattr(self._fn, '__class__') and self._fn.__class__ in losses.ALL_OBJECTS): self._direction = 'down' def update_state(self, y_true, y_pred, sample_weight=None): mask = backend.get_keras_mask(y_pred) values = self._fn(y_true, y_pred, **self._fn_kwargs) if sample_weight is not None and mask is not None: sample_weight = losses.loss.apply_mask(sample_weight, mask, dtype=self.dtype, reduction='sum') return super().update_state(values, sample_weight=sample_weight) def get_config(self): base_config = super().get_config() config = {'fn': serialization_lib.serialize_keras_object(self._fn)} config.update(serialization_lib.serialize_keras_object(self._fn_kwargs)) return {**base_config, **config} @classmethod def from_config(cls, config): if 'fn' in config: config = serialization_lib.deserialize_keras_object(config) return cls(**config)
Wrap a stateless metric function with the `Mean` metric. You could use this class to quickly build a mean metric from a function. The function needs to have the signature `fn(y_true, y_pred)` and return a per-sample loss array. `MeanMetricWrapper.result()` will return the average metric value across all samples seen so far. For example: ```python def mse(y_true, y_pred): return (y_true - y_pred) ** 2 mse_metric = MeanMetricWrapper(fn=mse) ``` Args: fn: The metric function to wrap, with signature `fn(y_true, y_pred, **kwargs)`. name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. **kwargs: Keyword arguments to pass on to `fn`.
github-repos
def _data_format_resolver(data_format, resolver_dict): try: data_format = DataFormat(data_format) except ValueError: supported_formats = ', '.join( ["'{}'".format(f.value) for f in DataFormat]) raise ValueError(("'data_format' must be one of {formats}. Given " "'{value}'.").format(formats=supported_formats, value=data_format)) return (resolver_dict.get(data_format) or resolver_dict.get(data_format.value))
Resolve a value from :attr:`resolver_dict` based on the :attr:`data_format`. Args: data_format (:class:`~.DataFormat` or str): The data format; must be a member of :class:`~.DataFormat` or a string equivalent. resolver_dict (dict): the resolving dict. Can hold any value for any of the valid :attr:`data_format` strings Returns: The value of the key in :attr:`resolver_dict` that matches :attr:`data_format`
juraj-google-style
def assemble_config(partition: Partition, manifest: Manifest) -> Config: name, params, out = partition out.kwargs.update(params) out.subsection_name = name location = prepare_target_name(out) user = out.user_id manifest.schedule(out.config_name, out.dataset, out.selection, location, user) logger.info(f'[{name}] Created partition {location!r}.') beam.metrics.Metrics.counter('Subsection', name).inc() return out
Assemble the configuration for a single partition. For each cross product of the 'selection' sections, the output dictionary will overwrite parameters from the extra param subsections, evenly cycling through each subsection. For example: { 'parameters': {... 'api_key': KKKKK1, ... }, ... } { 'parameters': {... 'api_key': KKKKK2, ... }, ... } { 'parameters': {... 'api_key': KKKKK3, ... }, ... } { 'parameters': {... 'api_key': KKKKK1, ... }, ... } { 'parameters': {... 'api_key': KKKKK2, ... }, ... } { 'parameters': {... 'api_key': KKKKK3, ... }, ... } ... Returns: An `Config` assembled out of subsection parameters and config shards.
github-repos
def word_error_rate(ref: Sequence[T], hyp: Sequence[T]) -> float: if (len(ref) == 0): raise EmptyReferenceException('Cannot calculating word error rate against a length 0 reference sequence.') distance = min_edit_distance(ref, hyp) return ((100 * float(distance)) / len(ref))
Calculate the word error rate of a sequence against a reference. Args: ref: The gold-standard reference sequence hyp: The hypothesis to be evaluated against the reference. Returns: The word error rate of the supplied hypothesis with respect to the reference string. Raises: persephone.exceptions.EmptyReferenceException: If the length of the reference sequence is 0.
codesearchnet
def pars_in_groups(self): pargp = self.par_groups allpars = dict() for cpg in pargp: allpars[cpg] = [i for i in self.parameter_data.loc[((self.parameter_data.pargp == cpg), 'parnme')]] return allpars
return a dictionary of parameter names in each parameter group. Returns: dictionary
codesearchnet
def update_failover_dns_record(env, zone_id, **kwargs): client = boto3.Session(profile_name=env).client('route53') response = {} hosted_zone_info = client.get_hosted_zone(Id=zone_id) zone_name = hosted_zone_info['HostedZone']['Name'].rstrip('.') dns_name = kwargs.get('dns_name') failover_state = kwargs.get('failover_state') if (failover_state.lower() != 'primary'): primary_record = find_existing_record(env, zone_id, dns_name, check_key='Failover', check_value='PRIMARY') if (not primary_record): raise PrimaryDNSRecordNotFound('Primary Failover DNS record not found: {}'.format(dns_name)) if (dns_name and dns_name.endswith(zone_name)): dns_json = get_template(template_file='infrastructure/dns_failover_upsert.json.j2', **kwargs) LOG.info('Attempting to create DNS Failover record %s (%s) in Hosted Zone %s (%s)', dns_name, kwargs['elb_aws_dns'], zone_id, zone_name) try: delete_existing_cname(env, zone_id, dns_name) response = client.change_resource_record_sets(HostedZoneId=zone_id, ChangeBatch=json.loads(dns_json)) LOG.info('Upserted DNS Failover record %s (%s) in Hosted Zone %s (%s)', dns_name, kwargs['elb_aws_dns'], zone_id, zone_name) except botocore.exceptions.ClientError as error: LOG.info('Error creating DNS Failover record %s (%s) in Hosted Zone %s (%s)', dns_name, kwargs['elb_aws_dns'], zone_id, zone_name) LOG.debug(error) else: LOG.info('Skipping creating DNS record %s in non-matching Hosted Zone %s (%s)', dns_name, zone_id, zone_name) LOG.debug('Route53 JSON Response: \n%s', pformat(response))
Create a Failover Route53 alias record in _env_ zone. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. Keyword Args: dns_name (str): FQDN of application's dns entry to add/update. dns_ttl (int): DNS time-to-live (ttl) elb_aws_dns (str): DNS A Record of ELB from AWS elb_dns_zone_id (str): Zone ID of ELB DNS failover_state (str): if the record is primary or secondary primary_region (str): Primary AWS region for DNS
codesearchnet
def GetContainingCondContext(ctxt): while ctxt: if ctxt.IsCondContext(): return ctxt ctxt = ctxt.outer_context return None
Returns the first ancestor CondContext of `ctxt`. Returns `ctxt` if `ctxt` is a CondContext, or None if `ctxt` is not in a cond. Args: ctxt: ControlFlowContext Returns: `ctxt` if `ctxt` is a CondContext, the most nested CondContext containing `ctxt`, or None if `ctxt` is not in a cond.
github-repos
def space(self, newlines=1): space = Space() for line in range(newlines): space.add_line('\n') self._container.structure.insert(self._idx, space) self._idx += 1 return self
Creates a vertical space of newlines Args: newlines (int): number of empty lines Returns: self for chaining
codesearchnet
def get_figure(new_fig=True, subplot='111', params=None): _get_plt() if new_fig: fig = plt.figure() else: fig = plt.gcf() params = dict_if_none(params) if isinstance(subplot, (tuple, list)): ax = fig.add_subplot(*subplot, **params) else: ax = fig.add_subplot(subplot, **params) return (fig, ax)
Function to be used for viewing - plotting, to initialize the matplotlib figure - axes. Args: new_fig(bool): Defines if a new figure will be created, if false current figure is used subplot (tuple or matplolib subplot specifier string): Create axes with these parameters params (dict): extra options passed to add_subplot() Returns: Matplotlib Figure and Axes
codesearchnet
def _get_descending_key(gettime=time.time): now_descending = int(((_FUTURE_TIME - gettime()) * 100)) request_id_hash = os.environ.get('REQUEST_ID_HASH') if (not request_id_hash): request_id_hash = str(random.getrandbits(32)) return ('%d%s' % (now_descending, request_id_hash))
Returns a key name lexically ordered by time descending. This lets us have a key name for use with Datastore entities which returns rows in time descending order when it is scanned in lexically ascending order, allowing us to bypass index building for descending indexes. Args: gettime: Used for testing. Returns: A string with a time descending key.
codesearchnet
def clipped_zoom(img, zoom_factor): h = img.shape[0] ch = int(np.ceil(h / float(zoom_factor))) top_h = (h - ch) w = img.shape[1] cw = int(np.ceil(w / float(zoom_factor))) top_w = (w - cw) img = tfds.core.lazy_imports.scipy.ndimage.zoom( img[top_h:top_h + ch, top_w:top_w + cw], (zoom_factor, zoom_factor, 1), order=1) trim_top_h = (img.shape[0] - h) trim_top_w = (img.shape[1] - w) return img[trim_top_h:trim_top_h + h, trim_top_w:trim_top_w + w]
Zoom image with clipping. Zoom the central part of the image and clip extra pixels. Args: img: numpy array, uncorrupted image. zoom_factor: numpy array, a sequence of float numbers for zoom factor. Returns: numpy array, zoomed image after clipping.
juraj-google-style
def get_torch_dataloader(self): raise NotImplementedError
Get a Torch `DataLoader` for the `DataAdapter`. Returns: A Torch `DataLoader`.
github-repos
def str_to_etree(xml_str, encoding='utf-8'): parser = xml.etree.ElementTree.XMLParser(encoding=encoding) return xml.etree.ElementTree.fromstring(xml_str, parser=parser)
Deserialize API XML doc to an ElementTree. Args: xml_str: bytes DataONE API XML doc encoding: str Decoder to use when converting the XML doc ``bytes`` to a Unicode str. Returns: ElementTree: Matching the API version of the XML doc.
juraj-google-style
def __init__(self, dtensor_components: Tuple[tensor.Tensor], global_element_spec: tensor_spec.TensorSpec, layouts: Any): [self._iterator_resource_dtensor] = dtensor_components self._global_element_spec = global_element_spec self._layouts = layouts self._layouts_str = nest.map_structure(lambda layout: layout.to_string(), layouts) super().__init__(components=dtensor_components, element_spec=global_element_spec)
Initializes a distributed iterator for DTensor datasets. This iterator encapsulates tf.data iterators for the underlying devices, and treats it as a packed DTensor of iterator resource tensors. Args: dtensor_components: a tuple containing the underlying iterator resources packed into a DTensor. This is expected to be a tuple with a single element. global_element_spec: the underlying dataset's element spec from a global view. layouts: a structure of DTensor layouts to be applied to the elements returned by the underlying iterators. This can be a single layout or (possibly nested) tuples or dictionaries of layouts, and the structure must match the structure of the iterator elements.
github-repos
def dict_take(dict_, keys, default=util_const.NoParam): if (default is util_const.NoParam): for key in keys: (yield dict_[key]) else: for key in keys: (yield dict_.get(key, default))
r""" Generates values from a dictionary Args: dict_ (Mapping): a dictionary to take from keys (Iterable): the keys to take default (object, optional): if specified uses default if keys are missing CommandLine: python -m ubelt.util_dict dict_take_gen Example: >>> import ubelt as ub >>> dict_ = {1: 'a', 2: 'b', 3: 'c'} >>> keys = [1, 2, 3, 4, 5] >>> result = list(ub.dict_take(dict_, keys, None)) >>> assert result == ['a', 'b', 'c', None, None] Example: >>> import ubelt as ub >>> dict_ = {1: 'a', 2: 'b', 3: 'c'} >>> keys = [1, 2, 3, 4, 5] >>> try: >>> print(list(ub.dict_take(dict_, keys))) >>> raise AssertionError('did not get key error') >>> except KeyError: >>> print('correctly got key error')
codesearchnet
def Log(self, format_str, *args): format_str = utils.SmartUnicode(format_str) status = format_str if args: try: status = (format_str % args) except TypeError: logging.error('Tried to log a format string with the wrong number of arguments: %s', format_str) logging.info('%s: %s', self.session_id, status) self.context.status = utils.SmartUnicode(status) log_entry = rdf_flows.FlowLog(client_id=None, urn=self.session_id, flow_name=self.hunt_obj.__class__.__name__, log_message=status) logs_collection_urn = self.hunt_obj.logs_collection_urn with data_store.DB.GetMutationPool() as pool: grr_collections.LogCollection.StaticAdd(logs_collection_urn, log_entry, mutation_pool=pool)
Logs the message using the hunt's standard logging. Args: format_str: Format string *args: arguments to the format string Raises: RuntimeError: on parent missing logs_collection
codesearchnet
def sell(self, product_id, order_type, **kwargs): return self.place_order(product_id, 'sell', order_type, **kwargs)
Place a sell order. This is included to maintain backwards compatibility with older versions of cbpro-Python. For maximum support from docstrings and function signatures see the order type-specific functions place_limit_order, place_market_order, and place_stop_order. Args: product_id (str): Product to order (eg. 'BTC-USD') order_type (str): Order type ('limit', 'market', or 'stop') **kwargs: Additional arguments can be specified for different order types. Returns: dict: Order details. See `place_order` for example.
codesearchnet
def __init__(self, getter, verbose=False): self._count = 0 self._getter = getter self._verbose = verbose
Initializes a contextual switch for a custom getter. Args: getter: The custom getter which we may want to switch on. verbose: Log out every time a variable is fetched, and whether or not `getter` is used. Returns: A custom getter which can also be used as a context manager. Entering the context enables the custom getter.
juraj-google-style
def get_request_data(self, path, action, body=None): body = body or '' path_name, path_spec = self.get_path_spec(path) response = {} if path_spec is not None and action in path_spec.keys(): for status_code in path_spec[action]['responses'].keys(): resp = path_spec[action]['responses'][status_code] try: response[int(status_code)] = self.get_response_example(resp) except ValueError: response[status_code] = self.get_response_example(resp) if response == {}: response[400] = '' return response
Get the default data and status code of the given path + action request. Args: path: path of the request. action: action of the request(get, post, delete...) body: body sent, used to sent it back for post request. Returns: A tuple with the default response data and status code In case of default status_code, use 0
juraj-google-style
def authenticate(self, user, password): request = Request(AUTH_URL) request.add_header('X-Simperium-API-Key', API_KEY) if sys.version_info < (3, 3): request.add_data(json.dumps({'username': user, 'password': password})) else: request.data = json.dumps({'username': user, 'password': password}).encode() try: res = urllib2.urlopen(request).read() token = json.loads(res.decode('utf-8'))["access_token"] except HTTPError: raise SimplenoteLoginFailed('Login to Simplenote API failed!') except IOError: token = None return token
Method to get simplenote auth token Arguments: - user (string): simplenote email address - password (string): simplenote password Returns: Simplenote API token as string
juraj-google-style
def get_file_diff(tree, files_to_diff): config.LOGGER.info("\nChecking if files exist on Kolibri Studio...") file_diff = tree.get_file_diff(files_to_diff) return file_diff
get_file_diff: Download files from nodes Args: tree (ChannelManager): manager to handle communication to Kolibri Studio Returns: list of files that are not on Kolibri Studio
juraj-google-style
def read_samples(self, sr=None, offset=0, duration=None): with self.container.open_if_needed(mode='r') as cnt: (samples, native_sr) = cnt.get(self.key) start_sample_index = int((offset * native_sr)) if (duration is None): end_sample_index = samples.shape[0] else: end_sample_index = int(((offset + duration) * native_sr)) samples = samples[start_sample_index:end_sample_index] if ((sr is not None) and (sr != native_sr)): samples = librosa.core.resample(samples, native_sr, sr, res_type='kaiser_best') return samples
Return the samples from the track in the container. Uses librosa for resampling, if needed. Args: sr (int): If ``None``, uses the sampling rate given by the file, otherwise resamples to the given sampling rate. offset (float): The time in seconds, from where to start reading the samples (rel. to the file start). duration (float): The length of the samples to read in seconds. Returns: np.ndarray: A numpy array containing the samples as a floating point (numpy.float32) time series.
codesearchnet
def is_tensor_final(self, tensor_name): tensor = self._name_to_tensor(tensor_name) return tensor in self._final_tensors
Whether a tensor is a final output of the computation. Args: tensor_name: a string, name of a tensor in the graph. Returns: a boolean indicating whether the tensor was a final output.
juraj-google-style
def numeric_function_clean_dataframe(self, axis): result = None query_compiler = self if ((not axis) and (len(self.index) == 0)): result = pandas.Series(dtype=np.int64) nonnumeric = [col for (col, dtype) in zip(self.columns, self.dtypes) if (not is_numeric_dtype(dtype))] if (len(nonnumeric) == len(self.columns)): if axis: result = pandas.Series([np.nan for _ in self.index]) else: result = pandas.Series([0 for _ in self.index]) else: query_compiler = self.drop(columns=nonnumeric) return (result, query_compiler)
Preprocesses numeric functions to clean dataframe and pick numeric indices. Args: axis: '0' if columns and '1' if rows. Returns: Tuple with return value(if any), indices to apply func to & cleaned Manager.
codesearchnet
def execute(self, asm_instr): self.ir_emulator.registers[self.ip] = (asm_instr.address + asm_instr.size) if self.arch_info.instr_is_syscall(asm_instr): raise Syscall() return self.__execute(asm_instr)
Execute an assembler instruction. Args: asm_instr (X86Instruction): A instruction to execute. Returns: A int. The address of the next instruction to execute.
codesearchnet
def get_member_slackuid(self, slack): members = self.__con__.search_s( CSHMember.__ldap_user_ou__, ldap.SCOPE_SUBTREE, "(slackuid=%s)" % slack, ['ipaUniqueID']) if members: return CSHMember( self, members[0][1]['ipaUniqueID'][0].decode('utf-8'), False) return None
Get a CSHMember object. Arguments: slack -- the Slack UID of the member Returns: None if the Slack UID provided does not correspond to a CSH Member
juraj-google-style
def detect(self, text): t = text.encode('utf-8') (reliable, index, top_3_choices) = cld2.detect(t, bestEffort=False) if (not reliable): self.reliable = False (reliable, index, top_3_choices) = cld2.detect(t, bestEffort=True) if (not self.quiet): if (not reliable): raise UnknownLanguage('Try passing a longer snippet of text') else: logger.warning('Detector is not able to detect the language reliably.') self.languages = [Language(x) for x in top_3_choices] self.language = self.languages[0] return self.language
Decide which language is used to write the text. The method tries first to detect the language with high reliability. If that is not possible, the method switches to best effort strategy. Args: text (string): A snippet of text, the longer it is the more reliable we can detect the language used to write the text.
codesearchnet
def created(cls, data=None): if cls.expose_status: cls.response.content_type = 'application/json' cls.response._status_line = '201 Created' return cls(201, data=data).to_json
Shortcut API for HTTP 201 `Created` response. Args: data (object): Response key/value data. Returns: WSResponse Instance.
codesearchnet
def search_users(self, user): user_url = "%s/%s/%s" % (self.url, "user", user) response = self.jss.get(user_url) return LDAPUsersResults(self.jss, response)
Search for LDAP users. Args: user: User to search for. It is not entirely clear how the JSS determines the results- are regexes allowed, or globbing? Returns: LDAPUsersResult object. Raises: Will raise a JSSGetError if no results are found.
juraj-google-style
def create_volume(self, name=None, driver=None, driver_opts=None, labels=None): url = self._url('/volumes/create') if ((driver_opts is not None) and (not isinstance(driver_opts, dict))): raise TypeError('driver_opts must be a dictionary') data = {'Name': name, 'Driver': driver, 'DriverOpts': driver_opts} if (labels is not None): if (utils.compare_version('1.23', self._version) < 0): raise errors.InvalidVersion('volume labels were introduced in API 1.23') if (not isinstance(labels, dict)): raise TypeError('labels must be a dictionary') data['Labels'] = labels return self._result(self._post_json(url, data=data), True)
Create and register a named volume Args: name (str): Name of the volume driver (str): Name of the driver used to create the volume driver_opts (dict): Driver options as a key-value dictionary labels (dict): Labels to set on the volume Returns: (dict): The created volume reference object Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> volume = cli.create_volume(name='foobar', driver='local', driver_opts={'foo': 'bar', 'baz': 'false'}, labels={"key": "value"}) >>> print(volume) {u'Driver': u'local', u'Labels': {u'key': u'value'}, u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data', u'Name': u'foobar', u'Scope': u'local'}
codesearchnet
def add(TargetGroup, NewMember, Config=None, Args=None): r Member = Task(NewMember, Args or {}, Config or {}) if isfunction(NewMember) else Group(NewMember, Config or {}) ParentMembers = TargetGroup.__ec_member__.Members ParentMembers[Member.Config['name']] = Member alias = Member.Config.get('alias') if alias: ParentMembers[alias] = Member
r"""Adds members to an existing group. Args: TargetGroup (Group): The target group for the addition. NewMember (Group / Task): The member to be added. Config (dict): The config for the member. Args (OrderedDict): ArgConfig for the NewMember, if it's a task (optional).
juraj-google-style
def blocksearch(block, name): if hasattr(block, 'tokens'): for b in block.tokens[1]: b = (b if hasattr(b, 'raw') and b.raw() == name else blocksearch( b, name)) if b: return b return False
Recursive search for name in block (inner blocks) Args: name (str): search term Returns: Block OR False
juraj-google-style
def print_all_configs(configs, missing, warning): print_text = '' llen = 65 for i, row in enumerate(configs): if i != 0: print_text += '-' * llen + '\n' if isinstance(row[1], list): val = ', '.join(row[1]) else: val = row[1] print_text += ' {: <28}'.format(row[0]) + ' {: <25}'.format(val) + '\n' print_text += '=' * llen print('\n\n {: ^32} {: ^25}'.format('Configuration(s)', 'Detected value(s)')) print('=' * llen) print(print_text) if missing: print('\n * ERROR: The following configurations are missing:') for m in missing: print(' ', *m) if warning: print('\n * WARNING: The following configurations could cause issues:') for w in warning: print(' ', *w) if not missing and (not warning): print('\n * INFO: Successfully found all configurations.') print('\n')
Prints the status and info on all configurations in a table format. Args: configs: List of all configurations found. missing: List of all configurations that are missing. warning: List of all configurations found with warnings.
github-repos
def matches(node, pattern): if isinstance(pattern, str): pattern = parser.parse_str(pattern) matcher = PatternMatcher(pattern) matcher.visit(node) return matcher.matches
Basic pattern matcher for AST. The pattern may contain wildcards represented by the symbol '_'. A node matches a pattern if for every node in the tree, either there is a node of the same type in pattern, or a Name node with id='_'. Args: node: ast.AST pattern: ast.AST Returns: bool
github-repos
def _set_auditpol_data(option, value): auditpol_values = {'None': 'No Auditing', '0': 'No Auditing', '1': 'Success', '2': 'Failure', '3': 'Success and Failure'} defaults = _get_audit_defaults(option) return __utils__['auditpol.set_setting']( name=defaults['Auditpol Name'], value=auditpol_values[value])
Helper function that updates the current applied settings to match what has just been set in the audit.csv files. We're doing it this way instead of running `gpupdate` Args: option (str): The name of the option to set value (str): The value to set. ['None', '0', '1', '2', '3'] Returns: bool: ``True`` if successful, otherwise ``False``
juraj-google-style
def xml(self, xml): self._request.xml = xml self.add_matcher(matcher('XMLMatcher', xml))
Defines a XML body value to match. Arguments: xml (str|regex): body XML to match. Returns: self: current Mock instance.
codesearchnet
def __init__(self, saved_model_dir, saved_model_tags, saved_model_exported_names, experimental_debug_info_func=None): super(TFLiteSavedModelConverter, self).__init__(experimental_debug_info_func) self.saved_model_dir = saved_model_dir self._saved_model_tags = saved_model_tags self._saved_model_exported_names = saved_model_exported_names if len(self._saved_model_exported_names) != 1: raise ValueError('Only supports a single signature key.') signature_key = self._saved_model_exported_names[0] result = _freeze_saved_model(self.saved_model_dir, None, None, None, self._saved_model_tags, signature_key) self._graph_def = result[0] self._input_tensors = result[1] self._output_tensors = result[2] self._parse_saved_model_args()
Constructor for TFLiteConverter. Args: saved_model_dir: Directory of the SavedModel. saved_model_tags: Set of tags identifying the MetaGraphDef within the SavedModel to analyze. All tags in the tag set must be present. (default {tf.saved_model.SERVING}). saved_model_exported_names: Names to be exported when the saved model import path is on. experimental_debug_info_func: An experimental function to retrieve the graph debug info for a set of nodes from the `graph_def`. Raises: ValueError: Invalid arguments.
github-repos
class PatchTSMixerBlock(nn.Module): def __init__(self, config: PatchTSMixerConfig): super().__init__() num_layers = config.num_layers self.mixers = nn.ModuleList([PatchTSMixerLayer(config=config) for _ in range(num_layers)]) def forward(self, hidden_state, output_hidden_states: bool=False): all_hidden_states = [] embedding = hidden_state for mod in self.mixers: embedding = mod(embedding) if output_hidden_states: all_hidden_states.append(embedding) if output_hidden_states: return (embedding, all_hidden_states) else: return (embedding, None)
The main computing framework of the `PatchTSMixer` model. Args: config (`PatchTSMixerConfig`): Configuration.
github-repos
def from_text_vision_configs(cls, text_config: XCLIPTextConfig, vision_config: XCLIPVisionConfig, **kwargs): return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
Instantiate a [`XCLIPConfig`] (or a derived class) from xclip text model configuration and xclip vision model configuration. Returns: [`XCLIPConfig`]: An instance of a configuration object
github-repos
def dimension_values(self, dimension, expanded=True, flat=True): index = self.get_dimension_index(dimension) if index == 0: return np.array([self.x]) elif index == 1: return np.array([self.y]) else: return super(Arrow, self).dimension_values(dimension)
Return the values along the requested dimension. Args: dimension: The dimension to return values for expanded (bool, optional): Whether to expand values flat (bool, optional): Whether to flatten array Returns: NumPy array of values along the requested dimension
juraj-google-style
def from_string(key_pem, is_x509_cert): key_pem = _helpers._to_bytes(key_pem) if is_x509_cert: pubkey = crypto.load_certificate(crypto.FILETYPE_PEM, key_pem) else: pubkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key_pem) return OpenSSLVerifier(pubkey)
Construct a Verified instance from a string. Args: key_pem: string, public key in PEM format. is_x509_cert: bool, True if key_pem is an X509 cert, otherwise it is expected to be an RSA key in PEM format. Returns: Verifier instance. Raises: OpenSSL.crypto.Error: if the key_pem can't be parsed.
juraj-google-style
def write(self, data): self._check_open() if (not isinstance(data, str)): raise TypeError(('Expected str but got %s.' % type(data))) if (not data): return self._buffer.append(data) self._buffered += len(data) self._offset += len(data) if (self._buffered >= self._flushsize): self._flush()
Write some bytes. Args: data: data to write. str. Raises: TypeError: if data is not of type str.
codesearchnet