code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _apply_conv(self, inputs, w): w_dw, w_pw = w outputs = tf.nn.separable_conv2d(inputs, w_dw, w_pw, rate=self._rate, strides=self.stride, padding=self._conv_op_padding, data_format=self._data_format) return outputs
Apply a `separable_conv2d` operation on `inputs` using `w`. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. w: A tuple of weight matrices of the same type as `inputs`, the first being the depthwise weight matrix, and the second being the pointwise weight matrix. Returns: outputs: The result of the convolution operation on `inputs`.
juraj-google-style
def is_storage(url, storage=None): if storage: return True split_url = url.split(': if len(split_url) == 2 and split_url[0].lower() != 'file': return True return False
Check if file is a local file or a storage file. File is considered local if: - URL is a local path. - URL starts by "file://" - a "storage" is provided. Args: url (str): file path or URL storage (str): Storage name. Returns: bool: return True if file is local.
juraj-google-style
def association(self, group_xid): association = {'groupXid': group_xid} self._indicator_data.setdefault('associatedGroups', []).append(association)
Add association using xid value. Args: group_xid (str): The external id of the Group to associate.
codesearchnet
def hint_for_accuracy(self, accuracy="normal"): if not self.has_dojo_report: return Hint(ecut=0., pawecutdg=0.) if "hints" in self.dojo_report: return Hint.from_dict(self.dojo_report["hints"][accuracy]) elif "ppgen_hints" in self.dojo_report: return Hint.from_dict(self.dojo_report["ppgen_hints"][accuracy]) return Hint(ecut=0., pawecutdg=0.)
Returns a :class:`Hint` object with the suggested value of ecut [Ha] and pawecutdg [Ha] for the given accuracy. ecut and pawecutdg are set to zero if no hint is available. Args: accuracy: ["low", "normal", "high"]
juraj-google-style
def incident(self, name, **kwargs): group_obj = Incident(name, **kwargs) return self._group(group_obj)
Add Incident data to Batch object. Args: name (str): The name for this Group. date_added (str, kwargs): The date timestamp the Indicator was created. event_date (str, kwargs): The event datetime expression for this Group. status (str, kwargs): The status for this Group. xid (str, kwargs): The external id for this Group. Returns: obj: An instance of Incident.
juraj-google-style
def set_logging_verbosity(level): valid_levels = {'FATAL': logging.FATAL, 'ERROR': logging.ERROR, 'WARNING': logging.WARNING, 'INFO': logging.INFO, 'DEBUG': logging.DEBUG} verbosity = valid_levels.get(level) if verbosity is None: raise ValueError(f'Please pass a valid level for logging verbosity. Expected one of: {set(valid_levels.keys())}. Received: {level}') logging.set_verbosity(verbosity)
Sets the verbosity level for logging. Supported log levels are as follows: - `"FATAL"` (least verbose) - `"ERROR"` - `"WARNING"` - `"INFO"` - `"DEBUG"` (most verbose) Args: level: A string corresponding to the level of verbosity for logging.
github-repos
def _check_keyword_only_parameters(method_signature, base_signature, is_subtype): base_kwonly_params = set(base_signature.kwonly_params) method_kwonly_params = set(method_signature.kwonly_params) method_defaults = set(method_signature.defaults) if not base_signature.kwargs_name: for method_param_name in method_kwonly_params.difference(base_kwonly_params).difference(method_defaults): return SignatureError(SignatureErrorType.DEFAULT_PARAMETER_MISMATCH, f"Parameter '{method_param_name}' must have a default value.") for base_param_name in base_kwonly_params.difference(method_kwonly_params): try: method_param_index = method_signature.param_names.index(base_param_name) except ValueError: if not method_signature.kwargs_name: return SignatureError(SignatureErrorType.KWONLY_PARAMETER_NAME_MISMATCH, f"Parameter '{base_param_name}' not found in overriding method.") else: if method_param_index < method_signature.posonly_count: return SignatureError(SignatureErrorType.KWONLY_PARAMETER_NAME_MISMATCH, f"Keyword-only parameter '{base_param_name}' of the overridden method has the same name as a positional-only parameterof the overriding method.") for base_param_name in base_signature.kwonly_params: try: base_param_type = base_signature.annotations[base_param_name] except KeyError: continue if base_param_name in method_kwonly_params or base_param_name in method_signature.param_names: method_param_name = base_param_name elif method_signature.kwargs_name: method_param_name = method_signature.kwargs_name else: continue try: method_param_type = method_signature.annotations[method_param_name] except KeyError: continue if method_param_name == method_signature.kwargs_name: if isinstance(method_param_type, abstract.ParameterizedClass): method_param_type = method_param_type.get_formal_type_parameter(abstract_utils.V) else: continue if not is_subtype(base_param_type, method_param_type): return SignatureError(SignatureErrorType.KWONLY_PARAMETER_TYPE_MISMATCH, f"Type mismatch for parameter '{base_param_name}'.") return None
Checks that the keyword-only parameters of the overriding method match. Args: method_signature: signature of the overriding method. base_signature: signature of the overridden method. is_subtype: a binary function to compare types. Returns: SignatureError if a mismatch is detected. Otherwise returns None.
github-repos
def read_tree_nexml(nexml): if not isinstance(nexml, str): raise TypeError("nexml must be a str") if nexml.lower().endswith('.gz'): f = gopen(expanduser(nexml)) elif isfile(expanduser(nexml)): f = open(expanduser(nexml)) else: f = nexml.splitlines() trees = dict(); id_to_node = dict(); tree_id = None for line in f: if isinstance(line,bytes): l = line.decode().strip() else: l = line.strip() l_lower = l.lower() if l_lower.startswith('<tree '): if tree_id is not None: raise ValueError(INVALID_NEXML) parts = l.split() for part in parts: if '=' in part: k,v = part.split('='); k = k.strip() if k.lower() == 'id': tree_id = v.split('"')[1]; break if tree_id is None: raise ValueError(INVALID_NEXML) trees[tree_id] = Tree(); trees[tree_id].root = None elif l_lower.replace(' ','').startswith('</tree>'): if tree_id is None: raise ValueError(INVALID_NEXML) id_to_node = dict(); tree_id = None elif l_lower.startswith('<node '): if tree_id is None: raise ValueError(INVALID_NEXML) node_id = None; node_label = None; is_root = False k = ''; v = ''; in_key = True; in_quote = False for i in range(6, len(l)): if l[i] == '"' or l[i] == "'": in_quote = not in_quote if not in_quote and in_key and l[i] == '=': in_key = False elif not in_quote and not in_key and (l[i] == '"' or l[i] == "'"): k = k.strip() if k.lower() == 'id': node_id = v elif k.lower() == 'label': node_label = v elif k.lower() == 'root' and v.strip().lower() == 'true': is_root = True in_key = True; k = ''; v = '' elif in_key and not (l[i] == '"' or l[i] == "'"): k += l[i] elif not in_key and not (l[i] == '"' or l[i] == "'"): v += l[i] if node_id is None or node_id in id_to_node: raise ValueError(INVALID_NEXML) id_to_node[node_id] = Node(label=node_label) if is_root: if trees[tree_id].root is not None: raise ValueError(INVALID_NEXML) trees[tree_id].root = id_to_node[node_id] elif l_lower.startswith('<edge '): if tree_id is None: raise ValueError(INVALID_NEXML) source = None; target = None; length = None parts = l.split() for part in parts: if '=' in part: k,v = part.split('='); k = k.strip(); k_lower = k.lower() if k_lower == 'source': source = v.split('"')[1] elif k_lower == 'target': target = v.split('"')[1] elif k_lower == 'length': length = float(v.split('"')[1]) if source is None or target is None or length is None: raise ValueError(INVALID_NEXML) if source not in id_to_node: raise ValueError(INVALID_NEXML) if target not in id_to_node: raise ValueError(INVALID_NEXML) id_to_node[source].add_child(id_to_node[target]) id_to_node[target].edge_length = length elif l_lower.startswith('<rootedge '): if tree_id is None: raise ValueError(INVALID_NEXML) root_node = None; length = None parts = l.split() for part in parts: if '=' in part: k,v = part.split('='); k = k.strip(); k_lower = k.lower() if k_lower == 'target': root_node = id_to_node[v.split('"')[1]] elif k_lower == 'length': length = float(v.split('"')[1]) if trees[tree_id].root is None: raise ValueError(INVALID_NEXML) if root_node is not None and trees[tree_id].root != root_node: raise ValueError(INVALID_NEXML) trees[tree_id].root.edge_length = length if hasattr(f,'close'): f.close() return trees
Read a tree from a NeXML string or file Args: ``nexml`` (``str``): Either a NeXML string or the path to a NeXML file (plain-text or gzipped) Returns: ``dict`` of ``Tree``: A dictionary of the trees represented by ``nexml``, where keys are tree names (``str``) and values are ``Tree`` objects
juraj-google-style
def forward_pass(self, vector, layer_index, is_transpose=False, is_abs=False): if ((layer_index < 0) or (layer_index > self.num_hidden_layers)): raise ValueError('Invalid layer index') layer_type = self.layer_types[layer_index] weight = self.weights[layer_index] if is_abs: weight = tf.abs(weight) if is_transpose: vector = tf.reshape(vector, self.output_shapes[layer_index]) else: vector = tf.reshape(vector, self.input_shapes[layer_index]) if (layer_type in {'ff', 'ff_relu'}): if is_transpose: weight = tf.transpose(weight) return_vector = tf.matmul(weight, vector) elif (layer_type in {'conv', 'conv_relu'}): if is_transpose: return_vector = tf.nn.conv2d_transpose(vector, weight, output_shape=self.input_shapes[layer_index], strides=[1, self.cnn_params[layer_index]['stride'], self.cnn_params[layer_index]['stride'], 1], padding=self.cnn_params[layer_index]['padding']) else: return_vector = tf.nn.conv2d(vector, weight, strides=[1, self.cnn_params[layer_index]['stride'], self.cnn_params[layer_index]['stride'], 1], padding=self.cnn_params[layer_index]['padding']) else: raise NotImplementedError('Unsupported layer type: {0}'.format(layer_type)) if is_transpose: return tf.reshape(return_vector, (self.sizes[layer_index], 1)) return tf.reshape(return_vector, (self.sizes[(layer_index + 1)], 1))
Performs forward pass through the layer weights at layer_index. Args: vector: vector that has to be passed through in forward pass layer_index: index of the layer is_transpose: whether the weights of the layer have to be transposed is_abs: whether to take the absolute value of the weights Returns: tensor that corresponds to the forward pass through the layer Raises: ValueError: if the layer_index is negative or more than num hidden layers
codesearchnet
def in_batches(iterable, batch_size): items = list(iterable) size = len(items) for i in range(0, size, batch_size): (yield items[i:min((i + batch_size), size)])
Split the given iterable into batches. Args: iterable (Iterable[Any]): The iterable you want to split into batches. batch_size (int): The size of each bach. The last batch will be probably smaller (if the number of elements cannot be equally divided. Returns: Generator[list[Any]]: Will yield all items in batches of **batch_size** size. Example: >>> from peltak.core import util >>> >>> batches = util.in_batches([1, 2, 3, 4, 5, 6, 7], 3) >>> batches = list(batches) # so we can query for lenght >>> len(batches) 3 >>> batches [[1, 2, 3], [4, 5, 6], [7]]
codesearchnet
def _stdout_list_split(retcode, stdout='', splitstring='\n'): if retcode == 0: ret = stdout.split(splitstring) return ret else: return False
Evaulates Open vSwitch command`s retcode value. Args: retcode: Value of retcode field from response, should be 0, 1 or 2. stdout: Value of stdout filed from response. splitstring: String used to split the stdout default new line. Returns: List or False.
juraj-google-style
def ApprovalCreateRaw(aff4_path, reason='', expire_in=(((60 * 60) * 24) * 7), token=None, approval_type='ClientApproval'): if (approval_type in ['ClientApproval', security.ClientApproval]): urn = rdf_client.ClientURN(aff4_path) else: urn = rdfvalue.RDFURN(aff4_path) if (not token): expiry = (time.time() + expire_in) token = access_control.ACLToken(reason=reason, expiry=expiry) if (not token.reason): raise RuntimeError('Cannot create approval with empty reason') if (not token.username): token.username = getpass.getuser() approval_urn = security.ApprovalRequestor.ApprovalUrnBuilder(urn.Path(), token.username, token.reason) super_token = access_control.ACLToken(username='raw-approval-superuser') super_token.supervisor = True if isinstance(approval_type, string_types): approval_type_cls = aff4.AFF4Object.classes[approval_type] else: approval_type_cls = approval_type approval_request = aff4.FACTORY.Create(approval_urn, approval_type_cls, mode='rw', token=super_token) approval_request.AddAttribute(approval_request.Schema.APPROVER(('%s1-raw' % token.username))) approval_request.AddAttribute(approval_request.Schema.APPROVER(('%s-raw2' % token.username))) approval_request.Close()
Creates an approval with raw access. This method requires raw datastore access to manipulate approvals directly. This currently doesn't work for hunt or cron approvals, because they check that each approver has the admin label. Since the fake users don't exist the check fails. Args: aff4_path: The aff4_path or client id the approval should be created for. reason: The reason to put in the token. expire_in: Expiry in seconds to use in the token. token: The token that will be used. If this is specified reason and expiry are ignored. approval_type: The type of the approval to create. Returns: The token. Raises: RuntimeError: On bad token.
codesearchnet
def awscli_defaults(os_type=None): try: if os_type is None: os_type = platform.system() if os_type == 'Linux': HOME = os.environ['HOME'] awscli_credentials = HOME + '/.aws/credentials' awscli_config = HOME + '/.aws/config' elif os_type == 'Windows': username = os.getenv('username') awscli_credentials = 'C:\\Users\\' + username + '\\.aws\\credentials' awscli_config = 'C:\\Users\\' + username + '\\.aws\\config' elif os_type == 'Java': logger.warning('Unsupported OS. No information') HOME = os.environ['HOME'] awscli_credentials = HOME + '/.aws/credentials' awscli_config = HOME + '/.aws/config' alt_credentials = os.getenv('AWS_SHARED_CREDENTIALS_FILE') except OSError as e: logger.exception( '%s: problem determining local os environment %s' % (inspect.stack()[0][3], str(e)) ) raise e return { 'awscli_defaults': { 'awscli_credentials': awscli_credentials, 'awscli_config': awscli_config, 'alt_credentials': alt_credentials } }
Summary: Parse, update local awscli config credentials Args: :user (str): USERNAME, only required when run on windows os Returns: TYPE: dict object containing key, value pairs describing os information
juraj-google-style
def get_name_servers(self, id_or_uri): uri = self._client.build_uri(id_or_uri) + "/nameServers" return self._client.get(uri)
Gets the named servers for an interconnect. Args: id_or_uri: Can be either the interconnect id or the interconnect uri. Returns: dict: the name servers for an interconnect.
juraj-google-style
def _client_send(self, msg): try: self._client.write((msg.encode('utf8') + b'\n')) self._client.flush() self.log.debug('Snippet sent %s.', msg) except socket.error as e: raise Error(self._ad, ('Encountered socket error "%s" sending RPC message "%s"' % (e, msg)))
Sends an Rpc message through the connection. Args: msg: string, the message to send. Raises: Error: a socket error occurred during the send.
codesearchnet
def BSearch(a, x, lo=0, hi=None): if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) return pos if pos != hi and a[pos] == x else -1
Returns index of x in a, or -1 if x not in a. Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search* hi -- highest index to consider in search* *bisect.bisect_left capability that we don't need to loose.
juraj-google-style
def _create_datadict(cls, internal_name): if internal_name == "LOCATION": return Location() if internal_name == "DESIGN CONDITIONS": return DesignConditions() if internal_name == "TYPICAL/EXTREME PERIODS": return TypicalOrExtremePeriods() if internal_name == "GROUND TEMPERATURES": return GroundTemperatures() if internal_name == "HOLIDAYS/DAYLIGHT SAVINGS": return HolidaysOrDaylightSavings() if internal_name == "COMMENTS 1": return Comments1() if internal_name == "COMMENTS 2": return Comments2() if internal_name == "DATA PERIODS": return DataPeriods() raise ValueError( "No DataDictionary known for {}".format(internal_name))
Creates an object depending on `internal_name` Args: internal_name (str): IDD name Raises: ValueError: if `internal_name` cannot be matched to a data dictionary object
juraj-google-style
def file(self, owner=None, **kwargs): return File(self.tcex, owner=owner, **kwargs)
Create the File TI object. Args: owner: **kwargs: Return:
juraj-google-style
def __init__(self, direction, edge_name, depth, within_optional_scope=False): super(Recurse, self).__init__( direction, edge_name, depth, within_optional_scope=within_optional_scope) self.direction = direction self.edge_name = edge_name self.depth = depth self.within_optional_scope = within_optional_scope self.validate()
Create a new Recurse block which traverses the given edge up to "depth" times. Args: direction: string, 'in' or 'out'. edge_name: string obeying variable name rules (see validate_safe_string). depth: int, always greater than or equal to 1. Returns: new Recurse object
juraj-google-style
def ray_get_and_free(object_ids): global _last_free_time global _to_free result = ray.get(object_ids) if (type(object_ids) is not list): object_ids = [object_ids] _to_free.extend(object_ids) now = time.time() if ((len(_to_free) > MAX_FREE_QUEUE_SIZE) or ((now - _last_free_time) > FREE_DELAY_S)): ray.internal.free(_to_free) _to_free = [] _last_free_time = now return result
Call ray.get and then queue the object ids for deletion. This function should be used whenever possible in RLlib, to optimize memory usage. The only exception is when an object_id is shared among multiple readers. Args: object_ids (ObjectID|List[ObjectID]): Object ids to fetch and free. Returns: The result of ray.get(object_ids).
codesearchnet
def chmod(self, path, mode, follow_symlinks=True): try: file_object = self.resolve(path, follow_symlinks, allow_fd=True) except IOError as io_error: if io_error.errno == errno.ENOENT: self.raise_os_error(errno.ENOENT, path) raise if self.is_windows_fs: if mode & PERM_WRITE: file_object.st_mode = file_object.st_mode | 0o222 else: file_object.st_mode = file_object.st_mode & 0o777555 else: file_object.st_mode = ((file_object.st_mode & ~PERM_ALL) | (mode & PERM_ALL)) file_object.st_ctime = time.time()
Change the permissions of a file as encoded in integer mode. Args: path: (str) Path to the file. mode: (int) Permissions. follow_symlinks: If `False` and `path` points to a symlink, the link itself is affected instead of the linked object.
juraj-google-style
def __ge__(self, other): other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value >= other.value
Returns True if `self` is known to be greater than or equal to `other`. Dimensions are compared as follows: ```python (tf.compat.v1.Dimension(m) >= tf.compat.v1.Dimension(n)) == (m >= n) (tf.compat.v1.Dimension(m) >= tf.compat.v1.Dimension(None)) == None (tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(n)) == None (tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value >= other.value` if both are known, otherwise None.
github-repos
def serialize_to_xml(root, block): root.tag = 'ubcpi' if (block.rationale_size is not None): if block.rationale_size.get('min'): root.set('rationale_size_min', unicode(block.rationale_size.get('min'))) if block.rationale_size.get('max'): root.set('rationale_size_max', unicode(block.rationale_size['max'])) if block.algo: if block.algo.get('name'): root.set('algorithm', block.algo.get('name')) if block.algo.get('num_responses'): root.set('num_responses', unicode(block.algo.get('num_responses'))) display_name = etree.SubElement(root, 'display_name') display_name.text = block.display_name question = etree.SubElement(root, 'question') question_text = etree.SubElement(question, 'text') question_text.text = block.question_text['text'] serialize_image(block.question_text, question) options = etree.SubElement(root, 'options') serialize_options(options, block) seeds = etree.SubElement(root, 'seeds') serialize_seeds(seeds, block)
Serialize the Peer Instruction XBlock's content to XML. Args: block (PeerInstructionXBlock): The peer instruction block to serialize. root (etree.Element): The XML root node to update. Returns: etree.Element
codesearchnet
def __init__(self, name, description, optional=False): self.name = name self.description = description self.optional = optional
Parameter descriptor Args: name: 1 word parameter identifier. description: short description of the purpose of the parameter. What does it configure/do. optional: flag indicating whether the parameter is optional. Defaults to mandatory (false).
juraj-google-style
def _ParseIndexTable( self, parser_mediator, file_system, file_entry, index_table): path_segments = file_system.SplitPath(file_entry.path_spec.location) data_block_files = {} for cache_address in index_table: if cache_address.filename not in data_block_files: path_segments.pop() path_segments.append(cache_address.filename) kwargs = {} if file_entry.path_spec.parent: kwargs['parent'] = file_entry.path_spec.parent kwargs['location'] = file_system.JoinPath(path_segments) data_block_file_path_spec = path_spec_factory.Factory.NewPathSpec( file_entry.path_spec.TYPE_INDICATOR, **kwargs) try: data_block_file_entry = path_spec_resolver.Resolver.OpenFileEntry( data_block_file_path_spec) except RuntimeError as exception: message = ( 'Unable to open data block file: {0:s} with error: ' '{1!s}'.format(kwargs['location'], exception)) parser_mediator.ProduceExtractionWarning(message) data_block_file_entry = None if not data_block_file_entry: message = 'Missing data block file: {0:s}'.format( cache_address.filename) parser_mediator.ProduceExtractionWarning(message) data_block_file_object = None else: data_block_file_object = data_block_file_entry.GetFileObject() try: self._data_block_file_parser.ParseFileObject( parser_mediator, data_block_file_object) except (IOError, errors.ParseError) as exception: message = ( 'Unable to parse data block file: {0:s} with error: ' '{1!s}').format(cache_address.filename, exception) parser_mediator.ProduceExtractionWarning(message) data_block_file_object.close() data_block_file_object = None data_block_files[cache_address.filename] = data_block_file_object try: self._ParseCacheEntries( parser_mediator, index_table, data_block_files) finally: for data_block_file_object in iter(data_block_files.values()): if data_block_file_object: data_block_file_object.close()
Parses a Chrome Cache index table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_system (dfvfs.FileSystem): file system. file_entry (dfvfs.FileEntry): file entry. index_table (list[CacheAddress]): the cache addresses which are stored in the index file.
juraj-google-style
def is_generator(obj): if isinstance(obj, types.GeneratorType): return True CO_GENERATOR = 32 return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and (obj.func_code.co_flags & CO_GENERATOR)))
Return true if the object is generator or generator function. Generator function objects provides same attributes as functions. See isfunction.__doc__ for attributes listing. Adapted from Python 2.6. Args: obj: an object to test. Returns: true if the object is generator function.
codesearchnet
def cases(store, case_query, limit=100): case_groups = {status: [] for status in CASE_STATUSES} for case_obj in case_query.limit(limit): analysis_types = set((ind['analysis_type'] for ind in case_obj['individuals'])) case_obj['analysis_types'] = list(analysis_types) case_obj['assignees'] = [store.user(user_email) for user_email in case_obj.get('assignees', [])] case_groups[case_obj['status']].append(case_obj) case_obj['is_rerun'] = (len(case_obj.get('analyses', [])) > 0) case_obj['clinvar_variants'] = store.case_to_clinVars(case_obj['_id']) case_obj['display_track'] = TRACKS[case_obj.get('track', 'rare')] data = {'cases': [(status, case_groups[status]) for status in CASE_STATUSES], 'found_cases': case_query.count(), 'limit': limit} return data
Preprocess case objects. Add the necessary information to display the 'cases' view Args: store(adapter.MongoAdapter) case_query(pymongo.Cursor) limit(int): Maximum number of cases to display Returns: data(dict): includes the cases, how many there are and the limit.
codesearchnet
def _nested_from_proto(nested_proto, process_leafs): if not isinstance(nested_proto, module_pb2.NestedData): raise base_errors.ModuleInfoError("Expected module_pb2.NestedData.") if nested_proto.HasField("value"): value = nested_proto.value if not value: value = _UnserializableObject() else: value = process_leafs(value) return value elif nested_proto.HasField("list"): return [_nested_from_proto(child, process_leafs) for child in nested_proto.list.list] elif nested_proto.HasField("tuple"): return tuple(_nested_from_proto(child, process_leafs) for child in nested_proto.tuple.list) elif nested_proto.HasField("dict"): return {name: _nested_from_proto(child, process_leafs) for name, child in six.iteritems(nested_proto.dict.map)} elif nested_proto.HasField("named_tuple"): tmp_dict = {name: _nested_from_proto(child, process_leafs) for name, child in six.iteritems(nested_proto.named_tuple.map)} NamedTuple = collections.namedtuple( nested_proto.named_tuple.name, tmp_dict.keys()) return NamedTuple(**tmp_dict) elif nested_proto.HasField("special_type"): if nested_proto.special_type.name not in _TO_PROTO_SPECIAL_TYPES: return _UnserializableObject() type_info = _TO_PROTO_SPECIAL_TYPES[nested_proto.special_type.name] return type_info.from_proto(nested_proto.special_type.object, process_leafs) else: raise base_errors.ModuleInfoError( "Cannot deserialize a `ModuleInfo` protobuf with no fields.")
Deserializes `nested_proto`. Args: nested_proto: An instance of `module_pb2.NestedData`. process_leafs: A function to be applied to the leaf values of the nested structure. Returns: An instance of `string`, `tuple`, `dict` or `namedtuple`. Raises: base_errors.ModuleInfoError: If the probobuf is of the wrong type or if some of its fields are missing.
juraj-google-style
def ensure_s3_bucket(s3_client, bucket_name, bucket_region): try: s3_client.head_bucket(Bucket=bucket_name) except botocore.exceptions.ClientError as e: if (e.response['Error']['Message'] == 'Not Found'): logger.debug('Creating bucket %s.', bucket_name) create_args = {'Bucket': bucket_name} location_constraint = s3_bucket_location_constraint(bucket_region) if location_constraint: create_args['CreateBucketConfiguration'] = {'LocationConstraint': location_constraint} s3_client.create_bucket(**create_args) elif (e.response['Error']['Message'] == 'Forbidden'): logger.exception(('Access denied for bucket %s. Did ' + 'you remember to use a globally unique name?'), bucket_name) raise else: logger.exception('Error creating bucket %s. Error %s', bucket_name, e.response) raise
Ensure an s3 bucket exists, if it does not then create it. Args: s3_client (:class:`botocore.client.Client`): An s3 client used to verify and create the bucket. bucket_name (str): The bucket being checked/created. bucket_region (str, optional): The region to create the bucket in. If not provided, will be determined by s3_client's region.
codesearchnet
def WrapCFTypeInPython(self, obj): obj_type = self.dll.CFGetTypeID(obj) if obj_type == self.dll.CFBooleanGetTypeID(): return CFBoolean(obj) elif obj_type == self.dll.CFNumberGetTypeID(): return CFNumber(obj) elif obj_type == self.dll.CFStringGetTypeID(): return CFString(obj) elif obj_type == self.dll.CFDictionaryGetTypeID(): return CFDictionary(obj) elif obj_type == self.dll.CFArrayGetTypeID(): return CFArray(obj) else: raise TypeError('Unknown type for object: {0}'.format(obj))
Package a CoreFoundation object in a Python wrapper. Args: obj: The CoreFoundation object. Returns: One of CFBoolean, CFNumber, CFString, CFDictionary, CFArray. Raises: TypeError: If the type is not supported.
juraj-google-style
def _system_parameters(**kwargs): return {key: value for (key, value) in kwargs.items() if ((value is not None) or (value == {}))}
Returns system keyword arguments removing Nones. Args: kwargs: system keyword arguments. Returns: dict: system keyword arguments.
codesearchnet
def make_calls(self, num_calls=1): self._cull() while self._outstanding_calls + num_calls > self._max_calls_per_second: time.sleep(0) self._cull() self._call_times.append(self.CallRecord(time=time.time(), num_calls=num_calls)) self._outstanding_calls += num_calls
Adds appropriate sleep to avoid making too many calls. Args: num_calls: int the number of calls which will be made
juraj-google-style
def restore(self, file_prefix: str, options: Optional[checkpoint_options.CheckpointOptions]=None) -> Dict[str, ops.Operation]: if options is not None and options.experimental_io_device is not None: raise ValueError('Specified experimental_io_device in DTensor checkpoint is not supported.') del options restore_specs = [] tensor_structure = [] for saveable in self._saveable_objects: saveable_tensor_structure = [] tensor_structure.append(saveable_tensor_structure) for spec in saveable.specs: saveable_tensor_structure.append(spec.name) if isinstance(spec, d_variable.DSaveSpec): restore_specs.append((spec.name, spec.slice_spec, spec.dtype, spec.layout, spec.global_shape)) elif isinstance(spec, saveable_object.SaveSpec): restore_specs.append((spec.name, spec.slice_spec, spec.dtype, layout.Layout.replicated(self._mesh.host_mesh(), spec.tensor.shape.rank).to_string(), spec.tensor.shape.as_list())) tensor_names, tensor_slices, tensor_dtypes, layouts, global_shapes = zip(*restore_specs) with ops.device(api.device_name()): restored_tensors = gen_dtensor_ops.d_tensor_restore_v2(prefix=file_prefix, tensor_names=tensor_names, shape_and_slices=tensor_slices, input_shapes=global_shapes, input_layouts=layouts, dtypes=tensor_dtypes) structured_restored_tensors = nest.pack_sequence_as(tensor_structure, restored_tensors) restore_ops = {} for saveable, restored_tensors in zip(self._saveable_objects, structured_restored_tensors): restore_ops[saveable.name] = saveable.restore(restored_tensors, restored_shapes=None) return restore_ops
Restore the saveable objects from a checkpoint with `file_prefix`. Args: file_prefix: A string or scalar string Tensor containing the prefix for files to read from. options: Optional `CheckpointOptions` object. This is unused in DTensor. Returns: A dictionary mapping from SaveableObject names to restore operations.
github-repos
def write(self, message, cur_time=None): if (cur_time is None): cur_time = time.time() lines = self._line_buffer.add_string(message) for line in lines: timestamp = '' if self._prepend_timestamp: timestamp = (datetime.datetime.utcfromtimestamp(cur_time).isoformat() + ' ') line = u'{}{}{}'.format(self._line_prepend, timestamp, line) self._fsapi.push(self._filename, line)
Write some text to the pusher. Args: message: a string to push for this file. cur_time: used for unit testing. override line timestamp.
codesearchnet
def get_vertex(self, key): if (key in self.vertex_map): return self.vertex_map[key] vertex = self.new_vertex() self.vertex_map[key] = vertex return vertex
Returns or Creates a Vertex mapped by key. Args: key: A string reference for a vertex. May refer to a new Vertex in which case it will be created. Returns: A the Vertex mapped to by key.
codesearchnet
def learning_rate_with_decay(batch_size, batch_denom, num_images, boundary_epochs, decay_rates, base_lr=0.1, enable_lars=False): initial_learning_rate = ((base_lr * batch_size) / batch_denom) batches_per_epoch = (num_images / batch_size) boundaries = [int((batches_per_epoch * epoch)) for epoch in boundary_epochs] vals = [(initial_learning_rate * decay) for decay in decay_rates] def learning_rate_fn(global_step): lr = tf.train.piecewise_constant(global_step, boundaries, vals) warmup_steps = int((batches_per_epoch * 5)) warmup_lr = ((initial_learning_rate * tf.cast(global_step, tf.float32)) / tf.cast(warmup_steps, tf.float32)) return tf.cond((global_step < warmup_steps), (lambda : warmup_lr), (lambda : lr)) def poly_rate_fn(global_step): 'Handles linear scaling rule, gradual warmup, and LR decay.\n\n The learning rate starts at 0, then it increases linearly per step. After\n flags.poly_warmup_epochs, we reach the base learning rate (scaled to account\n for batch size). The learning rate is then decayed using a polynomial rate\n decay schedule with power 2.0.\n\n Args:\n global_step: the current global_step\n\n Returns:\n returns the current learning rate\n ' if (batch_size < 8192): plr = 5.0 w_epochs = 5 elif (batch_size < 16384): plr = 10.0 w_epochs = 5 elif (batch_size < 32768): plr = 25.0 w_epochs = 5 else: plr = 32.0 w_epochs = 14 w_steps = int((w_epochs * batches_per_epoch)) wrate = ((plr * tf.cast(global_step, tf.float32)) / tf.cast(w_steps, tf.float32)) num_epochs = 90 train_steps = (batches_per_epoch * num_epochs) min_step = tf.constant(1, dtype=tf.int64) decay_steps = tf.maximum(min_step, tf.subtract(global_step, w_steps)) poly_rate = tf.train.polynomial_decay(plr, decay_steps, ((train_steps - w_steps) + 1), power=2.0) return tf.where((global_step <= w_steps), wrate, poly_rate) if enable_lars: return poly_rate_fn return learning_rate_fn
Get a learning rate that decays step-wise as training progresses. Args: batch_size: the number of examples processed in each training batch. batch_denom: this value will be used to scale the base learning rate. `0.1 * batch size` is divided by this number, such that when batch_denom == batch_size, the initial learning rate will be 0.1. num_images: total number of images that will be used for training. boundary_epochs: list of ints representing the epochs at which we decay the learning rate. decay_rates: list of floats representing the decay rates to be used for scaling the learning rate. It should have one more element than `boundary_epochs`, and all elements should have the same type. base_lr: Initial learning rate scaled based on batch_denom. Returns: Returns a function that takes a single argument - the number of batches trained so far (global_step)- and returns the learning rate to be used for training the next batch.
codesearchnet
def get_constant_state(self): ret = self.constant_states[self.next_constant_state] self.next_constant_state += 1 return ret
Read state that was written in "first_part" mode. Returns: a structure
codesearchnet
def download_url(url, root, filename=None, md5=None): from six.moves import urllib root = os.path.expanduser(root) if (not filename): filename = os.path.basename(url) fpath = os.path.join(root, filename) makedir_exist_ok(root) if (os.path.isfile(fpath) and check_integrity(fpath, md5)): print(('Using downloaded and verified file: ' + fpath)) else: try: print(((('Downloading ' + url) + ' to ') + fpath)) urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater()) except OSError: if (url[:5] == 'https'): url = url.replace('https:', 'http:') print(((('Failed download. Trying https -> http instead. Downloading ' + url) + ' to ') + fpath)) urllib.request.urlretrieve(url, fpath, reporthook=gen_bar_updater())
Download a file from a url and place it in root. Args: url (str): URL to download file from root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the basename of the URL md5 (str, optional): MD5 checksum of the download. If None, do not check
codesearchnet
def ifft2(x): if any_symbolic_tensors(x): return IFFT2().symbolic_call(x) return backend.math.ifft2(x)
Computes the 2D Inverse Fast Fourier Transform along the last two axes of input. Args: x: Tuple of the real and imaginary parts of the input tensor. Both tensors in the tuple should be of floating type. Returns: A tuple containing two tensors - the real and imaginary parts of the output. Example: >>> x = ( ... keras.ops.convert_to_tensor([[1., 2.], [2., 1.]]), ... keras.ops.convert_to_tensor([[0., 1.], [1., 0.]]), ... ) >>> ifft2(x) (array([[ 6., 0.], [ 0., -2.]], dtype=float32), array([[ 2., 0.], [ 0., -2.]], dtype=float32))
github-repos
def get_variation(self, experiment_key, user_id, attributes=None): if not self.is_valid: self.logger.error(enums.Errors.INVALID_DATAFILE.format('get_variation')) return None if not validator.is_non_empty_string(experiment_key): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('experiment_key')) return None if not isinstance(user_id, string_types): self.logger.error(enums.Errors.INVALID_INPUT_ERROR.format('user_id')) return None experiment = self.config.get_experiment_from_key(experiment_key) variation_key = None if not experiment: self.logger.info('Experiment key "%s" is invalid. Not activating user "%s".' % ( experiment_key, user_id )) return None if not self._validate_user_inputs(attributes): return None variation = self.decision_service.get_variation(experiment, user_id, attributes) if variation: variation_key = variation.key if self.config.is_feature_experiment(experiment.id): decision_notification_type = enums.DecisionNotificationTypes.FEATURE_TEST else: decision_notification_type = enums.DecisionNotificationTypes.AB_TEST self.notification_center.send_notifications( enums.NotificationTypes.DECISION, decision_notification_type, user_id, attributes or {}, { 'experiment_key': experiment_key, 'variation_key': variation_key } ) return variation_key
Gets variation where user will be bucketed. Args: experiment_key: Experiment for which user variation needs to be determined. user_id: ID for user. attributes: Dict representing user attributes. Returns: Variation key representing the variation the user will be bucketed in. None if user is not in experiment or if experiment is not Running.
juraj-google-style
def create_error(msg, cause=None): status_code = config.exc_to_code(cause) status_name = config.NAME_STATUS_CODES.get(status_code) if (status_name == 'INVALID_ARGUMENT'): return InvalidArgumentError(msg, cause=cause) else: return GaxError(msg, cause=cause)
Creates a ``GaxError`` or subclass. Attributes: msg (string): describes the error that occurred. cause (Exception, optional): the exception raised by a lower layer of the RPC stack (for example, gRPC) that caused this exception, or None if this exception originated in GAX. Returns: .GaxError: The exception that wraps ``cause``.
codesearchnet
def markdown_table(data, headers): maxx = [max([len(x) for x in column]) for column in zip(*data)] maxx = [max(ll) for ll in zip(maxx, [len(x) for x in headers])] mask = " | ".join(["%-{0:d}s".format(n) for n in maxx]) ret = [mask % headers] ret.append(" | ".join(["-"*n for n in maxx])) for line in data: ret.append(mask % line) return ret
Creates MarkDown table. Returns list of strings Arguments: data -- [(cell00, cell01, ...), (cell10, cell11, ...), ...] headers -- sequence of strings: (header0, header1, ...)
juraj-google-style
def escalatees(self, escalatee=None, resource_id=None): if resource_id is not None: self.resource_id(resource_id) self._request_uri = '{}/escalatees'.format(self._request_uri) if escalatee is not None: self._request_uri = '{}/{}'.format(self._request_uri, escalatee)
Add an escalatee to a Task GET: /v2/tasks/{uniqueId}/escalatees GET: /v2/tasks/{uniqueId}/escalatees/{escalateeId} POST: /v2/tasks/{uniqueId}/escalatees/{escalateeId} DELETE: /v2/tasks/{uniqueId}/escalatees/{escalateeId} Args: escalatee (Optional [string]): The escalatee name. resource_id (Optional [string]): The task ID.
juraj-google-style
def officers(self, num, **kwargs): baseuri = (self._BASE_URI + 'company/{}/officers'.format(num)) res = self.session.get(baseuri, params=kwargs) self.handle_http_error(res) return res
Search for a company's registered officers by company number. Args: num (str): Company number to search on. kwargs (dict): additional keywords passed into requests.session.get *params* keyword.
codesearchnet
def __init__(self, config): super().__init__() in_channels = config.bottleneck_features self.transformer_encoder = nn.ModuleList([ZoeDepthTransformerEncoderLayer(config) for _ in range(config.num_patch_transformer_layers)]) self.embedding_convPxP = nn.Conv2d(in_channels, config.patch_transformer_hidden_size, kernel_size=1, stride=1, padding=0)
ViT-like transformer block Args: config (`ZoeDepthConfig`): Model configuration class defining the model architecture.
github-repos
def datalab(line, cell=None): parser = google.datalab.utils.commands.CommandParser( prog='%datalab', description=) config_parser = parser.subcommand( 'config', help='List or set API-specific configurations.') config_sub_commands = config_parser.add_subparsers(dest='command') config_list_parser = config_sub_commands.add_parser( 'list', help='List configurations') config_list_parser.set_defaults(func=_config_list_fn) config_set_parser = config_sub_commands.add_parser( 'set', help='Set configurations') config_set_parser.add_argument( '-n', '--name', help='The name of the configuration value', required=True) config_set_parser.add_argument( '-v', '--value', help='The value to set', required=True) config_set_parser.set_defaults(func=_config_set_fn) project_parser = parser.subcommand( 'project', help='Get or set the default project ID') project_sub_commands = project_parser.add_subparsers(dest='command') project_get_parser = project_sub_commands.add_parser( 'get', help='Get the default project ID') project_get_parser.set_defaults(func=_project_get_fn) project_set_parser = project_sub_commands.add_parser( 'set', help='Set the default project ID') project_set_parser.add_argument( '-p', '--project', help='The default project ID', required=True) project_set_parser.set_defaults(func=_project_set_fn) return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
Implements the datalab cell magic for ipython notebooks. Args: line: the contents of the datalab line. Returns: The results of executing the cell.
juraj-google-style
def Failed(self): interval = self._current_interval_sec self._current_interval_sec = min(self.max_interval_sec, (self._current_interval_sec * self.multiplier)) return interval
Indicates that a request has failed. Returns: Time interval to wait before retrying (in seconds).
codesearchnet
def _get_raw_feature_as_tensor(self, key): raw_feature = self._features[key] feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(raw_feature) def expand_dims(input_tensor): if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): return sparse_ops.sparse_reshape(input_tensor, [array_ops.shape(input_tensor)[0], 1]) else: return array_ops.expand_dims(input_tensor, -1) rank = feature_tensor.get_shape().ndims if rank is not None: if rank == 0: raise ValueError('Feature (key: {}) cannot have rank 0. Given: {}'.format(key, feature_tensor)) return feature_tensor if rank != 1 else expand_dims(feature_tensor) with ops.control_dependencies([check_ops.assert_positive(array_ops.rank(feature_tensor), message='Feature (key: {}) cannot have rank 0. Given: {}'.format(key, feature_tensor))]): return cond.cond(math_ops.equal(1, array_ops.rank(feature_tensor)), lambda: expand_dims(feature_tensor), lambda: feature_tensor)
Gets the raw_feature (keyed by `key`) as `tensor`. The raw feature is converted to (sparse) tensor and maybe expand dim. For both `Tensor` and `SparseTensor`, the rank will be expanded (to 2) if the rank is 1. This supports dynamic rank also. For rank 0 raw feature, will error out as it is not supported. Args: key: A `str` key to access the raw feature. Returns: A `Tensor` or `SparseTensor`. Raises: ValueError: if the raw feature has rank 0.
github-repos
def cut_sphere(self, radius=15.0, origin=None, outside_sliced=True, preserve_bonds=False): if (origin is None): origin = np.zeros(3) elif pd.api.types.is_list_like(origin): origin = np.array(origin, dtype='f8') else: origin = self.loc[(origin, ['x', 'y', 'z'])] molecule = self.get_distance_to(origin) if outside_sliced: molecule = molecule[(molecule['distance'] < radius)] else: molecule = molecule[(molecule['distance'] > radius)] if preserve_bonds: molecule = self._preserve_bonds(molecule) return molecule
Cut a sphere specified by origin and radius. Args: radius (float): origin (list): Please note that you can also pass an integer. In this case it is interpreted as the index of the atom which is taken as origin. outside_sliced (bool): Atoms outside/inside the sphere are cut out. preserve_bonds (bool): Do not cut covalent bonds. Returns: Cartesian:
codesearchnet
def GetQueryValuesFromDict(cls, d, version=sorted(_SERVICE_MAP.keys())[(- 1)]): return [{'key': key, 'value': cls.GetValueRepresentation(value, version)} for (key, value) in d.iteritems()]
Converts a dict of python types into a list of PQL types. Args: d: A dictionary of variable names to python types. version: A string identifying the Ad Manager version the values object is compatible with. This defaults to what is currently the latest version. This will be updated in future releases to point to what is then the latest version. Returns: A list of variables formatted for PQL statements which are compatible with a particular API version.
codesearchnet
def _example_from_allof(self, prop_spec): example_dict = {} for definition in prop_spec['allOf']: update = self.get_example_from_prop_spec(definition, True) example_dict.update(update) return example_dict
Get the examples from an allOf section. Args: prop_spec: property specification you want an example of. Returns: An example dict
codesearchnet
def _read_range(self, start, end=0): stream = _BytesIO() try: with _handle_azure_exception(): self._get_to_stream( stream=stream, start_range=start, end_range=(end - 1) if end else None, **self._client_kwargs) except _AzureHttpError as exception: if exception.status_code == 416: return bytes() raise return stream.getvalue()
Read a range of bytes in stream. Args: start (int): Start stream position. end (int): End stream position. 0 To not specify end. Returns: bytes: number of bytes read
juraj-google-style
def pretty_polyfit_plot(x, y, deg=1, xlabel=None, ylabel=None, **kwargs): plt = pretty_plot(**kwargs) pp = np.polyfit(x, y, deg) xp = np.linspace(min(x), max(x), 200) plt.plot(xp, np.polyval(pp, xp), 'k--', x, y, 'o') if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) return plt
Convenience method to plot data with trend lines based on polynomial fit. Args: x: Sequence of x data. y: Sequence of y data. deg (int): Degree of polynomial. Defaults to 1. xlabel (str): Label for x-axis. ylabel (str): Label for y-axis. \\*\\*kwargs: Keyword args passed to pretty_plot. Returns: matplotlib.pyplot object.
juraj-google-style
def _logger(self): log_level = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL} level = log_level.get(self.args.logging_level.lower()) tx_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s ' tx_format += '(%(funcName)s:%(lineno)d)' formatter = logging.Formatter(tx_format) log = logging.getLogger('tcrun') if (not os.access('log', os.W_OK)): os.makedirs('log') logfile = os.path.join('log', 'run.log') fh = logging.FileHandler(logfile) fh.set_name('fh') fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) log.addHandler(fh) log.setLevel(level) log.info('Logging Level: {}'.format(logging.getLevelName(level))) return log
Create logger instance. Returns: logger: An instance of logging
codesearchnet
def _ReadTable(self, tables, file_object, table_offset): table_header = self._ReadTableHeader(file_object, table_offset) for record_offset in table_header.record_offsets: if record_offset == 0: continue record_offset += table_offset if table_header.record_type == self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INFO: self._ReadRecordSchemaInformation(tables, file_object, record_offset) elif table_header.record_type == ( self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_INDEXES): self._ReadRecordSchemaIndexes(tables, file_object, record_offset) elif table_header.record_type == ( self._RECORD_TYPE_CSSM_DL_DB_SCHEMA_ATTRIBUTES): self._ReadRecordSchemaAttributes(tables, file_object, record_offset) else: self._ReadRecord( tables, file_object, record_offset, table_header.record_type)
Reads the table. Args: tables (dict[int, KeychainDatabaseTable]): tables per identifier. file_object (file): file-like object. table_offset (int): offset of the table relative to the start of the file. Raises: ParseError: if the table cannot be read.
juraj-google-style
def mme_matches(case_obj, institute_obj, mme_base_url, mme_token): data = {'institute': institute_obj, 'case': case_obj, 'server_errors': []} matches = {} if (not case_obj.get('mme_submission')): return None for patient in case_obj['mme_submission']['patients']: patient_id = patient['id'] matches[patient_id] = None url = ''.join([mme_base_url, '/matches/', patient_id]) server_resp = matchmaker_request(url=url, token=mme_token, method='GET') if ('status_code' in server_resp): pat_matches = [] if server_resp.get('matches'): pat_matches = parse_matches(patient_id, server_resp['matches']) matches[patient_id] = pat_matches else: LOG.warning('Server returned error message: {}'.format(server_resp['message'])) data['server_errors'].append(server_resp['message']) data['matches'] = matches return data
Show Matchmaker submission data for a sample and eventual matches. Args: case_obj(dict): a scout case object institute_obj(dict): an institute object mme_base_url(str) base url of the MME server mme_token(str) auth token of the MME server Returns: data(dict): data to display in the html template
codesearchnet
def kms_decrypt(kms_client, secret): try: decrypted_secret = kms_client.decrypt(CiphertextBlob=base64.b64decode(secret))['Plaintext'] except TypeError: fail("Malformed base64 string data") except ClientError as error: if error.response["Error"]["Code"] == "InvalidCiphertextException": fail("The decrypt request was rejected because the specified ciphertext \ has been corrupted or is otherwise invalid.", error) elif error.response["Error"]["Code"] == "NotFoundException": fail("The decrypt request was rejected because the specified entity or resource could not be found.", error) else: fail("boto3 exception occurred while performing kms decrypt operation.", error) return decrypted_secret
Decrypt kms-encrypted string Args: kms_client (boto3 kms client object): Instantiated kms client object. Usually created through create_aws_clients. secret (string): base64 encoded value to be decrypted Returns: a populated EFPWContext object Raises: SystemExit(1): If there is an error with the boto3 decryption call (ex. malformed secret)
juraj-google-style
def get_loggable_url(url): loggable_url = (url or '') for secret_string in ('bewit=', 'AWSAccessKeyId=', 'access_token='): parts = loggable_url.split(secret_string) loggable_url = parts[0] if (loggable_url != url): loggable_url = '{}<snip>'.format(loggable_url) return loggable_url
Strip out secrets from taskcluster urls. Args: url (str): the url to strip Returns: str: the loggable url
codesearchnet
def sparse(self, rows: np.ndarray = None, cols: np.ndarray = None, layer: str = None) -> scipy.sparse.coo_matrix: if layer is None: return self.layers[""].sparse(rows=rows, cols=cols) else: return self.layers[layer].sparse(rows=rows, cols=cols)
Return the main matrix or specified layer as a scipy.sparse.coo_matrix, without loading dense matrix in RAM Args: rows: Rows to include, or None to include all cols: Columns to include, or None to include all layer: Layer to return, or None to return the default layer Returns: Sparse matrix (:class:`scipy.sparse.coo_matrix`)
juraj-google-style
def __init__(self, initial_learning_rate, first_decay_steps, t_mul=2.0, m_mul=1.0, alpha=0.0, name=None): super(CosineDecayRestarts, self).__init__() self.initial_learning_rate = initial_learning_rate self.first_decay_steps = first_decay_steps self._t_mul = t_mul self._m_mul = m_mul self.alpha = alpha self.name = name
Applies cosine decay with restarts to the learning rate. Args: initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python number. The initial learning rate. first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number. Number of steps to decay over. t_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to derive the number of iterations in the i-th period m_mul: A scalar `float32` or `float64` `Tensor` or a Python number. Used to derive the initial learning rate of the i-th period: alpha: A scalar `float32` or `float64` Tensor or a Python number. Minimum learning rate value as a fraction of the initial_learning_rate. name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
github-repos
def current_missing(**kwargs) -> int: data_path = os.environ.get(BBG_ROOT, '').replace('\\', '/') if (not data_path): return 0 return len(files.all_files(f'{data_path}/Logs/{missing_info(**kwargs)}'))
Check number of trials for missing values Returns: int: number of trials already tried
codesearchnet
def get_max_num_classes(self): num = 0 for task in self.task_list: if hasattr(task, 'num_classes'): if (num < task.num_classes): num = task.num_classes return num
Compute the maximum number of classes any subtask has. This is useful for modifying the size of the softmax to include the output labels for the classification tasks. Currently, labels from different tasks are overloaded. Returns: num: Highest number of output classes in any text classification sub-task within this MultiProblem.
codesearchnet
def create(provider, count=1, name=None, **kwargs): count = int(count) provider = provider_by_name(provider) options = provider.create_server_defaults options.update(kwargs) names = ([name] * count) provider.validate_create_options(**options) return provider.create_servers(count, names, **options)
r''' Create one or more cloud servers Args: * provider (str): Cloud provider, e.g. ec2, digitalocean * count (int) =1: Number of instances * name (str) =None: Name of server(s) * \**kwargs: Provider-specific flags
codesearchnet
def make_padding_config(padding_config: PaddingConfig | Sequence[tuple[int, int, int]]) -> PaddingConfig: if not isinstance(padding_config, PaddingConfig): triples = padding_config padding_config = PaddingConfig() for lo, hi, interior in triples: dimension = PaddingConfigDimension() dimension.edge_padding_low = lo dimension.edge_padding_high = hi dimension.interior_padding = interior padding_config.dimensions.append(dimension) return padding_config
Create PaddingConfig proto from list of triples of integers. Args: padding_config: either a PaddingConfig or a list of integer triples (edge_padding_low, edge_padding_high, interior_padding) representing the configuration of the padding operation. Returns: A `PaddingConfig` object.
github-repos
def ProgChunks(list_, chunksize, nInput=None, **kwargs): if (nInput is None): nInput = len(list_) n_chunks = get_num_chunks(nInput, chunksize) kwargs['length'] = n_chunks if ('freq' not in kwargs): kwargs['freq'] = 1 chunk_iter = util_iter.ichunks(list_, chunksize) progiter_ = ProgressIter(chunk_iter, **kwargs) return progiter_
Yeilds an iterator in chunks and computes progress Progress version of ut.ichunks Args: list_ (list): chunksize (?): nInput (None): (default = None) Kwargs: length, freq Returns: ProgressIter: progiter_ CommandLine: python -m utool.util_progress ProgChunks --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_progress import * # NOQA >>> import utool as ut >>> list_ = range(100) >>> chunksize = 10 >>> nInput = None >>> progiter_ = ProgChunks(list_, chunksize, nInput) >>> iter_ = iter(progiter_) >>> chunk = six.next(iter_) >>> assert len(chunk) == 10 >>> rest = ut.flatten(list(progiter_)) >>> assert len(rest) == 90
codesearchnet
def whois_domains_history(self, domains): api_name = 'opendns-whois-domain-history' fmt_url_path = u'whois/{0}/history' return self._multi_get(api_name, fmt_url_path, domains)
Calls WHOIS domain history end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_history_result}
juraj-google-style
def __init__(self, base: ModelHandler[ExampleT, PredictionT, ModelT], preprocess_fn: Callable[[PreProcessT], ExampleT]): self._base = base self._env_vars = getattr(base, '_env_vars', {}) self._preprocess_fn = preprocess_fn
A ModelHandler that has a preprocessing function associated with it. Args: base: An implementation of the underlying model handler. preprocess_fn: the preprocessing function to use.
github-repos
def create(cls, **kwargs): conn = Qubole.agent() if kwargs.get('command_type') is None: kwargs['command_type'] = cls.__name__ if kwargs.get('tags') is not None: kwargs['tags'] = kwargs['tags'].split(',') return cls(conn.post(cls.rest_entity_path, data=kwargs))
Create a command object by issuing a POST request to the /command endpoint Note - this does not wait for the command to complete Args: `**kwargs`: keyword arguments specific to command type Returns: Command object
juraj-google-style
def show_corrections(self, status=None, nids=None): nrows, ncols = get_terminal_size() count = 0 for task in self.iflat_tasks(status=status, nids=nids): if task.num_corrections == 0: continue count += 1 print(make_banner(str(task), width=ncols, mark="=")) for corr in task.corrections: pprint(corr) if not count: print("No correction found.") return count
Show the corrections applied to the flow at run-time. Args: status: if not None, only the tasks with this status are select. nids: optional list of node identifiers used to filter the tasks. Return: The number of corrections found.
juraj-google-style
def _dispatch(self, event, listener, *args, **kwargs): if (asyncio.iscoroutinefunction(listener) or (isinstance(listener, functools.partial) and asyncio.iscoroutinefunction(listener.func))): return self._dispatch_coroutine(event, listener, *args, **kwargs) return self._dispatch_function(event, listener, *args, **kwargs)
Dispatch an event to a listener. Args: event (str): The name of the event that triggered this call. listener (def or async def): The listener to trigger. *args: Any number of positional arguments. **kwargs: Any number of keyword arguments. This method inspects the listener. If it is a def it dispatches the listener to a method that will execute that def. If it is an async def it dispatches it to a method that will schedule the resulting coro with the event loop.
codesearchnet
def segmentation_to_mask(polys, height, width): polys = [p.flatten().tolist() for p in polys] assert (len(polys) > 0), 'Polygons are empty!' import pycocotools.mask as cocomask rles = cocomask.frPyObjects(polys, height, width) rle = cocomask.merge(rles) return cocomask.decode(rle)
Convert polygons to binary masks. Args: polys: a list of nx2 float array. Each array contains many (x, y) coordinates. Returns: a binary matrix of (height, width)
codesearchnet
async def getTypeNorm(self, name, valu): tobj = self.model.type(name) if (tobj is None): raise s_exc.NoSuchType(mesg=f'The type {name} does not exist.', name=name) (norm, info) = tobj.norm(valu) return (norm, info)
Get the normalized type value based on the Cortex data model. Args: name (str): The type to normalize. valu: The value to normalize. Returns: (tuple): A two item tuple, containing the normed value and the info dictionary. Raises: s_exc.NoSuchType: If the type does not exist. s_exc.BadTypeValu: If the value fails to normalize.
codesearchnet
def closest_point_to(self, point, thr=20.0): i = 0 point_arr = point.gen2arr() def closest_in_line(pointA, pointB): temp = closest_point(pointA.gen2arr(), pointB.gen2arr(), point_arr) return Point(temp[1], temp[0], None) for (p_a, p_b) in pairwise(self.points): candidate = closest_in_line(p_a, p_b) if candidate.distance(point) <= thr: if p_a.distance(point) <= thr: return i, p_a elif p_b.distance(point) <= thr: return i + 1, p_b else: return i, candidate i = i + 1 return -1, None
Finds the closest point in the segment to a given point Args: point (:obj:`Point`) thr (float, optional): Distance threshold, in meters, to be considered the same point. Defaults to 20.0 Returns: (int, Point): Index of the point. -1 if doesn't exist. A point is given if it's along the segment
juraj-google-style
def __call__(self, *args, **kwargs) -> Any: args, kwargs = self._parse_call_time_overrides(*args, **kwargs) signature = self.__signature__ if self.is_subclassed_functor: for arg_spec, arg_value in zip(signature.args, args): kwargs[arg_spec.name] = arg_value with self._apply_call_time_overrides_to_members(**kwargs): return_value = self._call() else: return_value = self._call(*args, **kwargs) if signature.return_value and flags.is_type_check_enabled() and (pg_typing.MISSING_VALUE != return_value): return_value = signature.return_value.apply(return_value, root_path=self.sym_path + 'returns') if flags.is_tracking_origin() and isinstance(return_value, base.Symbolic): return_value.sym_setorigin(self, 'return') return return_value
Call with late bound arguments. Args: *args: list arguments. **kwargs: keyword arguments. Returns: Any. Raises: TypeError: got multiple values for arguments or extra argument name.
github-repos
def add_catalog_from_URL(self, votable_URL, votable_options={}): self.votable_URL= votable_URL self.votable_options= votable_options self.votable_from_URL_flag= not self.votable_from_URL_flag
load a VOTable table from an url and load its data into the widget Args: votable_URL: string url votable_options: dictionary object
juraj-google-style
def to_timestamp(self, data): result = pd.Series(index=data.index) _slice = (~ data[self.col_name].isnull()) result[_slice] = data[_slice][self.col_name].astype('int64') return result
Transform a datetime series into linux epoch. Args: data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`. Returns: pandas.Series
codesearchnet
def validate_metadata(train_config): if len(train_config['csv_header']) != len(train_config['csv_defaults']): raise ValueError('Unequal number of columns in input features file and ' 'schema file.') sorted_columns = sorted(train_config['csv_header'] + [train_config['target_column']]) sorted_columns2 = sorted(train_config['categorical_columns'] + train_config['numerical_columns'] + [train_config['key_column']] + [train_config['target_column']]) if sorted_columns2 != sorted_columns: raise ValueError('Each csv header must be a numerical/categorical type, a ' ' key, or a target.')
Perform some checks that the trainig config is correct. Args: train_config: train config as produced by merge_metadata() Raises: ValueError: if columns look wrong.
juraj-google-style
def code_cell(sourcecode): r import utool as ut sourcecode = ut.remove_codeblock_syntax_sentinals(sourcecode) cell_header = ut.codeblock( ) cell_footer = ut.codeblock( ) if sourcecode is None: source_line_repr = ' []\n' else: lines = sourcecode.split('\n') line_list = [line + '\n' if count < len(lines) else line for count, line in enumerate(lines, start=1)] repr_line_list = [repr_single_for_md(line) for line in line_list] source_line_repr = ut.indent(',\n'.join(repr_line_list), ' ' * 2) source_line_repr = ' [\n' + source_line_repr + '\n ]\n' return (cell_header + source_line_repr + cell_footer)
r""" Args: sourcecode (str): Returns: str: json formatted ipython notebook code cell CommandLine: python -m ibeis.templates.generate_notebook --exec-code_cell Example: >>> # DISABLE_DOCTEST >>> from ibeis.templates.generate_notebook import * # NOQA >>> sourcecode = notebook_cells.timestamp_distribution[1] >>> sourcecode = notebook_cells.initialize[1] >>> result = code_cell(sourcecode) >>> print(result)
juraj-google-style
def lazy_property(fn): attr_name = '_lazy_' + fn.__name__ @property @wraps(fn) def _lazy_property(self): if not hasattr(self, attr_name): setattr(self, attr_name, fn(self)) return getattr(self, attr_name) return _lazy_property
Decorator that makes a property lazy-evaluated whilst preserving docstrings. Args: fn (function): the property in question Returns: evaluated version of the property.
juraj-google-style
def rotate_texture(texture, rotation, x_offset=0.5, y_offset=0.5): x, y = texture x = x.copy() - x_offset y = y.copy() - y_offset angle = np.radians(rotation) x_rot = x * np.cos(angle) + y * np.sin(angle) y_rot = x * -np.sin(angle) + y * np.cos(angle) return x_rot + x_offset, y_rot + y_offset
Rotates the given texture by a given angle. Args: texture (texture): the texture to rotate rotation (float): the angle of rotation in degrees x_offset (float): the x component of the center of rotation (optional) y_offset (float): the y component of the center of rotation (optional) Returns: texture: A texture.
juraj-google-style
def load(cls, path): with open(path, 'r') as in_file: metadata = json.load(in_file) return cls.from_dict(metadata)
Create a new MLPipeline from a JSON specification. The JSON file format is the same as the one created by the `to_dict` method. Args: path (str): Path of the JSON file to load. Returns: MLPipeline: A new MLPipeline instance with the specification found in the JSON file.
juraj-google-style
def parse_ped(ped_stream, family_type='ped'): pedigree = FamilyParser(ped_stream, family_type=family_type) if len(pedigree.families) != 1: raise PedigreeError("Only one case per ped file is allowed") family_id = list(pedigree.families.keys())[0] family = pedigree.families[family_id] samples = [{ 'sample_id': ind_id, 'father': individual.father, 'mother': individual.mother, 'sex': SEX_MAP[individual.sex], 'phenotype': PHENOTYPE_MAP[int(individual.phenotype)], } for ind_id, individual in family.individuals.items()] return family_id, samples
Parse out minimal family information from a PED file. Args: ped_stream(iterable(str)) family_type(str): Format of the pedigree information Returns: family_id(str), samples(list[dict])
juraj-google-style
def trace_max_buffer_capacity(self): cmd = enums.JLinkTraceCommand.GET_MAX_CAPACITY data = ctypes.c_uint32(0) res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data)) if (res == 1): raise errors.JLinkException('Failed to get max trace buffer size.') return data.value
Retrieves the maximum size the trace buffer can be configured with. Args: self (JLink): the ``JLink`` instance. Returns: The maximum configurable capacity for the trace buffer.
codesearchnet
def SetProtocol(self, protocol): protocol = protocol.lower().strip() if protocol not in ['http', 'https']: raise ValueError('Invalid protocol specified for Viper lookup') self._analyzer.SetProtocol(protocol)
Sets the protocol that will be used to query Viper. Args: protocol (str): protocol to use to query Viper. Either 'http' or 'https'. Raises: ValueError: If an invalid protocol is selected.
juraj-google-style
def subscribe(self, clock_name: str=None, clock_slots: Iterable[str]=None, subscriptions: Dict[(str, Any)]={}): for area in subscriptions: init_full(self, area, subscriptions[area]) subscriptions[area] = {'slots': subscriptions[area]} if (clock_name is not None): self.clock_name = clock_name self.clock_slots = clock_slots subscriptions[clock_name] = {'slots': clock_slots, 'buffer-length': 1} self.setup(puller=True, subscriptions=subscriptions)
Subscribes this Area to the given Areas and optionally given Slots. Must be called before the Area is run. Args: clock_name: The name of the Area that is used as synchronizing Clock. clock_slots: The slots of the Clock relevant to this Area. subscriptions: A dictionary containing the relevant Areas names as keys and optionally the Slots as values.
codesearchnet
def __init__(self, executable: _PATH = 'default') -> None: _default_path = os.path.join( os.path.dirname(__file__), 'executable', 'adb.exe') if executable == 'default': self.path = _default_path elif executable.endswith('adb.exe'): if not os.path.isfile(executable): raise FileNotFoundError(f'{self.path!r} does not exist.') self.path = executable elif executable in ['adb', 'adb.exe']: PATH = os.environ['PATH'] if not ('adb' in PATH or 'android' in PATH or 'platform-tools' in PATH): raise EnvironmentError('PATH does not exist.') self.path = executable else: self.path = _default_path
Creates a new instance of the Commands. Args: executable_path: Path to the AndroidDriver. On the Windows platform, the best choice is default.
juraj-google-style
def get_measurements(region, core_info, data, extra_offset=0): measurements = [] clean_core_info = [x for x in core_info if x] cores = len(clean_core_info) for k in data: if (k not in ['1', 'Region Info', 'Event', 'Metric', 'CPU clock']): slot = data[k] for i in range(cores): core = core_info[i] idx = (extra_offset + i) if (core and slot[idx]): measurements.append((region, k, core, slot[idx])) return measurements
Get the complete measurement info from likwid's region info. Args: region: The region we took a measurement in. core_info: The core information. data: The raw data. extra_offset (int): default = 0 Returns (list((region, metric, core, value))): A list of measurement tuples, a tuple contains the information about the region, the metric, the core and the actual value.
codesearchnet
def AddStationDecoration(self, index, color=' tmpstr = str() num_stations = len(self._stations) ind = int(index) if self._stations: if (0 < ind < num_stations): y = self._stations[ind] tmpstr = ('<polyline class="Dec" stroke="%s" points="%s,%s,%s,%s" />' % (color, 20, ((20 + y) + 0.5), (self._gwidth + 20), ((20 + y) + 0.5))) self._decorators.append(tmpstr)
Flushes existing decorations and highlights the given station-line. Args: # Integer, index of stop to be highlighted. index: 4 # An optional string with a html color code color: "#fff"
codesearchnet
def _parse_mtu(self, config): match = re.search(r'mtu (\d+)', config) return dict(mtu=int(match.group(1)))
Parses the config block and returns the configured IP MTU value The provided configuration block is scanned and the configured value for the IP MTU is returned as a dict object. The IP MTU value is expected to always be present in the provided config block Args: config (str): The interface configuration block to parse Return: dict: A dict object intended to be merged into the resource dict
juraj-google-style
def add(self, rule: 'functions.ReplacementRule') -> None: self.matcher.add(rule.pattern, rule.replacement)
Add a new rule to the replacer. Args: rule: The rule to add.
codesearchnet
def parse_config_files_and_bindings(config_files, bindings, finalize_config=True, skip_unknown=False): if (config_files is None): config_files = [] if (bindings is None): bindings = '' for config_file in config_files: parse_config_file(config_file, skip_unknown) parse_config(bindings, skip_unknown) if finalize_config: finalize()
Parse a list of config files followed by extra Gin bindings. This function is equivalent to: for config_file in config_files: gin.parse_config_file(config_file, skip_configurables) gin.parse_config(bindings, skip_configurables) if finalize_config: gin.finalize() Args: config_files: A list of paths to the Gin config files. bindings: A list of individual parameter binding strings. finalize_config: Whether to finalize the config after parsing and binding (defaults to True). skip_unknown: A boolean indicating whether unknown configurables and imports should be skipped instead of causing errors (alternatively a list of configurable names to skip if unknown). See `parse_config` for additional details.
codesearchnet
def oauth2_callback(request): if 'error' in request.GET: reason = request.GET.get( 'error_description', request.GET.get('error', '')) reason = html.escape(reason) return http.HttpResponseBadRequest( 'Authorization failed {0}'.format(reason)) try: encoded_state = request.GET['state'] code = request.GET['code'] except KeyError: return http.HttpResponseBadRequest( 'Request missing state or authorization code') try: server_csrf = request.session[_CSRF_KEY] except KeyError: return http.HttpResponseBadRequest( 'No existing session for this flow.') try: state = json.loads(encoded_state) client_csrf = state['csrf_token'] return_url = state['return_url'] except (ValueError, KeyError): return http.HttpResponseBadRequest('Invalid state parameter.') if client_csrf != server_csrf: return http.HttpResponseBadRequest('Invalid CSRF token.') flow = _get_flow_for_token(client_csrf, request) if not flow: return http.HttpResponseBadRequest('Missing Oauth2 flow.') try: credentials = flow.step2_exchange(code) except client.FlowExchangeError as exchange_error: return http.HttpResponseBadRequest( 'An error has occurred: {0}'.format(exchange_error)) get_storage(request).put(credentials) signals.oauth2_authorized.send(sender=signals.oauth2_authorized, request=request, credentials=credentials) return shortcuts.redirect(return_url)
View that handles the user's return from OAuth2 provider. This view verifies the CSRF state and OAuth authorization code, and on success stores the credentials obtained in the storage provider, and redirects to the return_url specified in the authorize view and stored in the session. Args: request: Django request. Returns: A redirect response back to the return_url.
juraj-google-style
def _validate_first_message(cls, msg): data = cls._unpack_message(msg) logger.debug(data) if data != cls.RTM_HANDSHAKE: raise SlackApiError('Unexpected response: {!r}'.format(data)) logger.info('Joined real-time messaging.')
Check the first message matches the expected handshake. Note: The handshake is provided as :py:attr:`RTM_HANDSHAKE`. Arguments: msg (:py:class:`aiohttp.Message`): The message to validate. Raises: :py:class:`SlackApiError`: If the data doesn't match the expected handshake.
juraj-google-style
def iter_replace_strings(replacements): def function_iter_replace_strings(iterable_strings): 'Yield a formatted string from iterable_strings using a generator.\n\n Args:\n iterable_strings: Iterable containing strings. E.g a file-like\n object.\n\n Returns:\n Yields formatted line.\n\n ' for string in iterable_strings: (yield reduce((lambda s, kv: s.replace(*kv)), replacements.items(), string)) return function_iter_replace_strings
Create a function that uses replacement pairs to process a string. The returned function takes an iterator and yields on each processed line. Args: replacements: Dict containing 'find_string': 'replace_string' pairs Returns: function with signature: iterator of strings = function(iterable)
codesearchnet
def diff_commonOverlap(self, text1, text2): text1_length = len(text1) text2_length = len(text2) if ((text1_length == 0) or (text2_length == 0)): return 0 if (text1_length > text2_length): text1 = text1[(- text2_length):] elif (text1_length < text2_length): text2 = text2[:text1_length] text_length = min(text1_length, text2_length) if (text1 == text2): return text_length best = 0 length = 1 while True: pattern = text1[(- length):] found = text2.find(pattern) if (found == (- 1)): return best length += found if ((found == 0) or (text1[(- length):] == text2[:length])): best = length length += 1
Determine if the suffix of one string is the prefix of another. Args: text1 First string. text2 Second string. Returns: The number of characters common to the end of the first string and the start of the second string.
codesearchnet
def binary_accuracy(y_true, y_pred, threshold=0.5): y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred) threshold = math_ops.cast(threshold, y_pred.dtype) y_pred = math_ops.cast(y_pred > threshold, y_pred.dtype) return backend.mean(math_ops.equal(y_true, y_pred), axis=-1)
Calculates how often predictions match binary labels. Standalone usage: >>> y_true = [[1], [1], [0], [0]] >>> y_pred = [[1], [1], [0], [0]] >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred) >>> assert m.shape == (4,) >>> m.numpy() array([1., 1., 1., 1.], dtype=float32) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. threshold: (Optional) Float representing the threshold for deciding whether prediction values are 1 or 0. Returns: Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
github-repos
def send(self, message_type, data, callback=None, one_way=False): message = validator_pb2.Message( correlation_id=_generate_id(), content=data, message_type=message_type) fut = future.Future(message.correlation_id, message.content, callback, timeout=self._connection_timeout) if not one_way: self._futures.put(fut) self._send_receive_thread.send_message(message) return fut
Sends a message of message_type Args: message_type (validator_pb2.Message): enum value data (bytes): serialized protobuf callback (function): a callback function to call when a response to this message is received Returns: future.Future
juraj-google-style
def _plot_depth_track(self, ax, md, kind='MD'): if kind == 'MD': ax.set_yscale('bounded', vmin=md.min(), vmax=md.max()) elif kind == 'TVD': tvd = self.location.md2tvd(md) ax.set_yscale('piecewise', x=tvd, y=md) else: raise Exception("Kind must be MD or TVD") for sp in ax.spines.values(): sp.set_color('gray') if ax.is_first_col(): pad = -10 ax.spines['left'].set_color('none') ax.yaxis.set_ticks_position('right') for label in ax.get_yticklabels(): label.set_horizontalalignment('right') elif ax.is_last_col(): pad = -10 ax.spines['right'].set_color('none') ax.yaxis.set_ticks_position('left') for label in ax.get_yticklabels(): label.set_horizontalalignment('left') else: pad = -30 for label in ax.get_yticklabels(): label.set_horizontalalignment('center') ax.tick_params(axis='y', colors='gray', labelsize=12, pad=pad) ax.set_xticks([]) ax.set(xticks=[]) ax.depth_track = True return ax
Private function. Depth track plotting. Args: ax (ax): A matplotlib axis. md (ndarray): The measured depths of the track. kind (str): The kind of track to plot. Returns: ax.
juraj-google-style
def parse_record(cls, vcf_line, sample_names): vcf_fields = vcf_line.rstrip('\r\n').split('\t') (chrom, pos, rid, ref, alt, qual, rfilter, info) = vcf_fields[0:8] sample_fields = [] sample_tag_values = {} if (len(vcf_fields) > 9): rformat = vcf_fields[8] sample_fields = vcf_fields[9:] sample_tag_values = VcfRecord._sample_tag_values(sample_names, rformat, sample_fields) return VcfRecord(chrom, pos, ref, alt, rid, qual, rfilter, info, sample_tag_values)
Alternative constructor that parses VcfRecord from VCF string. Aspire to parse/represent the data such that it could be reliably round-tripped. (This nicety means INFO fields and FORMAT tags should be treated as ordered to avoid shuffling.) Args: vcf_line: the VCF variant record as a string; tab separated fields, trailing newlines are ignored. Must have at least 8 fixed fields (through INFO) sample_names: a list of sample name strings; these should match the VCF header column Returns: A mutable VcfRecord.
codesearchnet
def publishMap(self, maps_info, fsInfo=None, itInfo=None): if self.securityhandler is None: print ("Security handler required") return itemInfo = None itemId = None map_results = None replaceInfo = None replaceItem = None map_info = None admin = None try: admin = arcrest.manageorg.Administration(securityHandler=self._securityHandler) map_results = [] for map_info in maps_info: itemInfo = {} if 'ReplaceInfo' in map_info: replaceInfo = map_info['ReplaceInfo'] else: replaceInfo = None if replaceInfo != None: for replaceItem in replaceInfo: if replaceItem['ReplaceType'] == 'Layer': if fsInfo is not None: for fs in fsInfo: if fs is not None and replaceItem['ReplaceString'] == fs['ReplaceTag']: replaceItem['ReplaceString'] = fs['FSInfo']['url'] replaceItem['ItemID'] = fs['FSInfo']['itemId'] replaceItem['ItemFolder'] = fs['FSInfo']['folderId'] if 'convertCase' in fs['FSInfo']: replaceItem['convertCase'] = fs['FSInfo']['convertCase'] elif 'ItemID' in replaceItem: if 'ItemFolder' in replaceItem == False: itemId = replaceItem['ItemID'] itemInfo = admin.content.getItem(itemId=itemId) if itemInfo.owner: if itemInfo.owner == self._securityHandler.username and itemInfo.ownerFolder: replaceItem['ItemFolder'] = itemInfo.ownerFolder else: replaceItem['ItemFolder'] = None elif replaceItem['ReplaceType'] == 'Global': if itInfo is not None: for itm in itInfo: if itm is not None: if replaceItem['ReplaceString'] == itm['ReplaceTag']: if 'ItemInfo' in itm: if 'url' in itm['ItemInfo']: replaceItem['ReplaceString'] = itm['ItemInfo']['url'] if 'ReplaceTag' in map_info: itemInfo = {"ReplaceTag":map_info['ReplaceTag'] } else: itemInfo = {"ReplaceTag":"{WebMap}" } itemInfo['MapInfo'] = self._publishMap(config=map_info, replaceInfo=replaceInfo) map_results.append(itemInfo) print ("%s webmap created" % itemInfo['MapInfo']['Name']) return map_results except common.ArcRestHelperError as e: raise e except Exception as e: line, filename, synerror = trace() raise common.ArcRestHelperError({ "function": "publishMap", "line": line, "filename": filename, "synerror": synerror, }) finally: itemInfo = None itemId = None replaceInfo = None replaceItem = None map_info = None admin = None del itemInfo del itemId del replaceInfo del replaceItem del map_info del admin gc.collect()
Publishes a list of maps. Args: maps_info (list): A list of JSON configuration maps to publish. Returns: list: A list of results from :py:meth:`arcrest.manageorg._content.UserItem.updateItem`.
juraj-google-style