code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def greater_than_evaluator(self, index): condition_name = self.condition_data[index][0] condition_value = self.condition_data[index][1] user_value = self.attributes.get(condition_name) if not validator.is_finite_number(condition_value): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format( self._get_condition_json(index) )) return None if not self.is_value_a_number(user_value): self.logger.warning(audience_logs.UNEXPECTED_TYPE.format( self._get_condition_json(index), type(user_value), condition_name )) return None if not validator.is_finite_number(user_value): self.logger.warning(audience_logs.INFINITE_ATTRIBUTE_VALUE.format( self._get_condition_json(index), condition_name )) return None return user_value > condition_value
Evaluate the given greater than match condition for the user attributes. Args: index: Index of the condition to be evaluated. Returns: Boolean: - True if the user attribute value is greater than the condition value. - False if the user attribute value is less than or equal to the condition value. None: if the condition value isn't finite or the user attribute value isn't finite.
juraj-google-style
def _neg(x, name=None): return negative(x, name)
Computes numerical negative value element-wise. I.e., \(y = -x\). Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
github-repos
def RegisterMessage(self, message): desc = message.DESCRIPTOR self._classes[desc.full_name] = message self.pool.AddDescriptor(desc) return message
Registers the given message type in the local database. Calls to GetSymbol() and GetMessages() will return messages registered here. Args: message: a message.Message, to be registered. Returns: The provided message.
codesearchnet
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535): hasher = _resolve_hasher(algorithm, file_hash) if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash): return True else: return False
Validates a file against a sha256 or md5 hash. Args: fpath: path to the file being validated file_hash: The expected hash string of the file. The sha256 and md5 hash algorithms are both supported. algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. Returns: Whether the file is valid
github-repos
def copy_script(self, filename, id_=-1): if ("jss" in self.connection.keys() and self.connection["jss"].jss_migrated): self._copy_script_migrated(filename, id_, SCRIPT_FILE_TYPE) else: basename = os.path.basename(filename) self._copy(filename, os.path.join(self.connection["mount_point"], "Scripts", basename))
Copy a script to the repo's Script subdirectory. Scripts are copied as files to a path, or, on a "migrated" JSS, are POSTed to the JSS (pass an id if you wish to associate the script with an existing Script object). Args: filename: Path for file to copy. id_: Int ID, used _only_ for migrated repos. Default is -1, which creates a new Script.
juraj-google-style
def _get_authorization_headers(self) -> dict: auth = base64.encodestring((self.client_id + ':' + self.client_secret).encode('latin-1')).decode('latin-1') auth = auth.replace('\n', '').replace(' ', '') auth = 'Basic {}'.format(auth) headers = {'Authorization': auth} return headers
Constructs and returns the Authorization header for the client app. Args: None Returns: header dict for communicating with the authorization endpoints
juraj-google-style
def query_origin_stack(self): ret = [] for stack, id_to_string in zip(self._origin_stacks, self._origin_id_to_strings): ret.append(self._code_def_to_traceback(stack, id_to_string)) return ret
Query the stack of the origin of the execution call. Returns: A `list` of all tracebacks. Each item corresponds to an execution call, i.e., a `SendTracebacks` request. Each item is a `list` of 3-tuples: (filename, lineno, function_name).
github-repos
def _create_authenticator(a_service): if not isinstance(a_service, sm_messages.Service): raise ValueError(u"service is None or not an instance of Service") authentication = a_service.authentication if not authentication: _logger.info(u"authentication is not configured in service, " u"authentication checks will be disabled") return issuers_to_provider_ids = {} issuer_uri_configs = {} for provider in authentication.providers: issuer = provider.issuer jwks_uri = provider.jwksUri open_id = jwks_uri is None issuer_uri_configs[issuer] = suppliers.IssuerUriConfig(open_id, jwks_uri) issuers_to_provider_ids[issuer] = provider.id key_uri_supplier = suppliers.KeyUriSupplier(issuer_uri_configs) jwks_supplier = suppliers.JwksSupplier(key_uri_supplier) authenticator = tokens.Authenticator(issuers_to_provider_ids, jwks_supplier) return authenticator
Create an instance of :class:`google.auth.tokens.Authenticator`. Args: a_service (:class:`endpoints_management.gen.servicemanagement_v1_messages.Service`): a service instance
juraj-google-style
def log_sigmoid(x): return ops.log_sigmoid(x)
Logarithm of the sigmoid activation function. It is defined as `f(x) = log(1 / (1 + exp(-x)))`. Args: x: Input tensor.
github-repos
def _hexdecode(hexstring): _checkString(hexstring, description='hexstring') if ((len(hexstring) % 2) != 0): raise ValueError('The input hexstring must be of even length. Given: {!r}'.format(hexstring)) if (sys.version_info[0] > 2): by = bytes(hexstring, 'latin1') try: return str(binascii.unhexlify(by), encoding='latin1') except binascii.Error as err: new_error_message = 'Hexdecode reported an error: {!s}. Input hexstring: {}'.format(err.args[0], hexstring) raise TypeError(new_error_message) else: try: return hexstring.decode('hex') except TypeError as err: raise TypeError('Hexdecode reported an error: {}. Input hexstring: {}'.format(err.message, hexstring))
Convert a hex encoded string to a byte string. For example '4A' will return 'J', and '04' will return ``'\\x04'`` (which has length 1). Args: hexstring (str): Can be for example 'A3' or 'A3B4'. Must be of even length. Allowed characters are '0' to '9', 'a' to 'f' and 'A' to 'F' (not space). Returns: A string of half the length, with characters corresponding to all 0-255 values for each byte. Raises: TypeError, ValueError
codesearchnet
def ToJson(self, auto_hex=True): jsn = {} jsn['type'] = str(ContractParameterType(self.Type)) if (self.Type == ContractParameterType.Signature): jsn['value'] = self.Value.hex() elif (self.Type == ContractParameterType.ByteArray): if auto_hex: jsn['value'] = self.Value.hex() else: jsn['value'] = self.Value elif (self.Type == ContractParameterType.Boolean): jsn['value'] = self.Value elif (self.Type == ContractParameterType.String): jsn['value'] = str(self.Value) elif (self.Type == ContractParameterType.Integer): jsn['value'] = self.Value elif (self.Type == ContractParameterType.PublicKey): jsn['value'] = self.Value.ToString() elif (self.Type in [ContractParameterType.Hash160, ContractParameterType.Hash256]): jsn['value'] = self.Value.ToString() elif (self.Type == ContractParameterType.Array): res = [] for item in self.Value: if item: res.append(item.ToJson(auto_hex=auto_hex)) jsn['value'] = res elif (self.Type == ContractParameterType.InteropInterface): try: jsn['value'] = self.Value.ToJson() except Exception as e: pass return jsn
Converts a ContractParameter instance to a json representation Returns: dict: a dictionary representation of the contract parameter
codesearchnet
def add(a, b, allow_overflow=False): for m in (a, b): if (not isinstance(m, sc_messages.Money)): raise ValueError((u'Inputs should be of type %s' % (sc_messages.Money,))) if (a.currencyCode != b.currencyCode): raise ValueError(u'Money values need the same currency to be summed') (nano_carry, nanos_sum) = _sum_nanos(a, b) units_sum_no_carry = (a.units + b.units) units_sum = (units_sum_no_carry + nano_carry) if ((units_sum > 0) and (nanos_sum < 0)): units_sum -= 1 nanos_sum += _BILLION elif ((units_sum < 0) and (nanos_sum > 0)): units_sum += 1 nanos_sum -= _BILLION sign_a = _sign_of(a) sign_b = _sign_of(b) if ((sign_a > 0) and (sign_b > 0) and (units_sum >= _INT64_MAX)): if (not allow_overflow): raise OverflowError(u'Money addition positive overflow') else: return sc_messages.Money(units=_INT64_MAX, nanos=MAX_NANOS, currencyCode=a.currencyCode) elif ((sign_a < 0) and (sign_b < 0) and ((units_sum_no_carry <= (- _INT64_MAX)) or (units_sum <= (- _INT64_MAX)))): if (not allow_overflow): raise OverflowError(u'Money addition negative overflow') else: return sc_messages.Money(units=_INT64_MIN, nanos=(- MAX_NANOS), currencyCode=a.currencyCode) else: return sc_messages.Money(units=units_sum, nanos=nanos_sum, currencyCode=a.currencyCode)
Adds two instances of `Money`. Args: a (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): one money value b (:class:`endpoints_management.gen.servicecontrol_v1_messages.Money`): another money value allow_overflow: determines if the addition is allowed to overflow Return: `Money`: an instance of Money Raises: ValueError: if the inputs do not have the same currency code OverflowError: if the sum overflows and allow_overflow is not `True`
codesearchnet
def _remove_session_callback(self, callback_obj, originator): try: callback_objs = [callback_obj] self._session_callbacks.remove(callback_obj) for (cb, cb_objs) in list(self._callback_objs_by_callable[originator].items()): try: cb_objs.remove(callback_obj) if (not cb_objs): del self._callback_objs_by_callable[originator][cb] except KeyError: pass except KeyError: raise ValueError('callback already ran or was already removed, cannot be removed again') for callback_obj in callback_objs: self._trigger_on_change(SessionCallbackRemoved(self, callback_obj))
Remove a callback added earlier with ``add_periodic_callback``, ``add_timeout_callback``, or ``add_next_tick_callback``. Returns: None Raises: KeyError, if the callback was never added
codesearchnet
def get_env(key, *default, **kwargs): assert (len(default) in (0, 1)), 'Too many args supplied.' func = kwargs.get('coerce', (lambda x: x)) required = (len(default) == 0) default = (default[0] if (not required) else None) return _get_env(key, default=default, coerce=func, required=required)
Return env var. This is the parent function of all other get_foo functions, and is responsible for unpacking args/kwargs into the values that _get_env expects (it is the root function that actually interacts with environ). Args: key: string, the env var name to look up. default: (optional) the value to use if the env var does not exist. If this value is not supplied, then the env var is considered to be required, and a RequiredSettingMissing error will be raised if it does not exist. Kwargs: coerce: a func that may be supplied to coerce the value into something else. This is used by the default get_foo functions to cast strings to builtin types, but could be a function that returns a custom class. Returns the env var, coerced if required, and a default if supplied.
codesearchnet
def delete_as(access_token, subscription_id, resource_group, as_name): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/availabilitySets/', as_name, '?api-version=', COMP_API]) return do_delete(endpoint, access_token)
Delete availability set. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. as_name (str): Name of the availability set. Returns: HTTP response.
codesearchnet
def handleresult(self, r): if ((r.status_code >= 400) and (r.status_code < 500)): msg = r.json() raise AuthenticationError((((((str(msg['code']) + ': ') + msg['msg']) + ' (') + msg['ref']) + ')')) elif (r.status_code > 300): err = None try: msg = r.json() err = ServerError((((((str(msg['code']) + ': ') + msg['msg']) + ' (') + msg['ref']) + ')')) except: raise ServerError('Server returned error, but did not give a valid error message') raise err return r
Handles HTTP error codes for the given request Raises: AuthenticationError on the appropriate 4** errors ServerError if the response is not an ok (2**) Arguments: r -- The request result
codesearchnet
def check_memo(self, task_id, task): if ((not self.memoize) or (not task['memoize'])): task['hashsum'] = None return (None, None) hashsum = self.make_hash(task) present = False result = None if (hashsum in self.memo_lookup_table): present = True result = self.memo_lookup_table[hashsum] logger.info('Task %s using result from cache', task_id) task['hashsum'] = hashsum return (present, result)
Create a hash of the task and its inputs and check the lookup table for this hash. If present, the results are returned. The result is a tuple indicating whether a memo exists and the result, since a Null result is possible and could be confusing. This seems like a reasonable option without relying on an cache_miss exception. Args: - task(task) : task from the dfk.tasks table Returns: Tuple of the following: - present (Bool): Is this present in the memo_lookup_table - Result (Py Obj): Result of the function if present in table This call will also set task['hashsum'] to the unique hashsum for the func+inputs.
codesearchnet
def _filter_match(self, span: span, relations: Dict, patterns: List) -> bool: for pattern_id, a_pattern in enumerate(patterns): token_range = relations[pattern_id] if token_range: tokens = [x for x in span[token_range[0]:token_range[1]]] if a_pattern.type == "word": if not self._pre_suf_fix_filter(tokens, a_pattern.prefix, a_pattern.suffix): return False if a_pattern.type == "shape": if not (self._full_shape_filter(tokens, a_pattern.full_shape) and self._pre_suf_fix_filter(tokens, a_pattern.prefix,a_pattern.suffix)): return False if a_pattern.type == "number": if not self._min_max_filter(tokens, a_pattern.min, a_pattern.max): return False return True
Filter the match result according to prefix, suffix, min, max ... Args: span: span relations: Dict patterns: List of pattern Returns: bool
juraj-google-style
def GetFileEntryByPathSpec(self, path_spec): store_index = vshadow.VShadowPathSpecGetStoreIndex(path_spec) if store_index is None: location = getattr(path_spec, 'location', None) if location is None or location != self.LOCATION_ROOT: return None return vshadow_file_entry.VShadowFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True) if store_index < 0 or store_index >= self._vshadow_volume.number_of_stores: return None return vshadow_file_entry.VShadowFileEntry( self._resolver_context, self, path_spec)
Retrieves a file entry for a path specification. Args: path_spec (PathSpec): path specification. Returns: VShadowFileEntry: file entry or None if not available.
juraj-google-style
def _call_partitioner(partitioner, shape, dtype): if not shape.is_fully_defined(): raise ValueError('Shape of a new partitioned variable must be fully defined, but instead was %s.' % (shape,)) if shape.ndims < 1: raise ValueError('A partitioned Variable must have rank at least 1, shape: %s' % shape) slicing = partitioner(shape=shape, dtype=dtype) if not isinstance(slicing, collections_abc.Sequence): raise ValueError('Partitioner must return a sequence, but saw: %s' % slicing) if len(slicing) != shape.ndims: raise ValueError("Partitioner returned a partition list that does not match the Variable's rank: %s vs. %s" % (slicing, shape)) if any((p < 1 for p in slicing)): raise ValueError('Partitioner returned zero partitions for some axes: %s' % slicing) if sum((p > 1 for p in slicing)) > 1: raise ValueError('Can only slice a variable along one dimension: shape: %s, partitioning: %s' % (shape, slicing)) return slicing
Call partitioner validating its inputs/output. Args: partitioner: a function mapping `Tensor` shape and dtype to a list of partitions. shape: shape of the `Tensor` to partition, must have at least two dimensions. dtype: dtype of the elements in the `Tensor`. Returns: A list with elements >=1 and exactly one >1. The index of that element corresponds to the partitioning axis.
github-repos
def save_scan_plot(self, filename="scan.pdf", img_format="pdf", coords=None): plt = self.get_scan_plot(coords) plt.savefig(filename, format=img_format)
Save matplotlib plot of the potential energy surface to a file. Args: filename: Filename to write to. img_format: Image format to use. Defaults to EPS. coords: internal coordinate name to use as abcissa.
juraj-google-style
def RegisterPlugin(cls, plugin_class): name = getattr(plugin_class, 'ARTIFACT_DEFINITION_NAME', plugin_class.__name__) name = name.lower() if (name in cls._plugins): raise KeyError('Artifact plugin class already set for name: {0:s}.'.format(name)) preprocess_plugin = plugin_class() cls._plugins[name] = preprocess_plugin if isinstance(preprocess_plugin, interface.FileSystemArtifactPreprocessorPlugin): cls._file_system_plugins[name] = preprocess_plugin elif isinstance(preprocess_plugin, interface.KnowledgeBasePreprocessorPlugin): cls._knowledge_base_plugins[name] = preprocess_plugin elif isinstance(preprocess_plugin, interface.WindowsRegistryKeyArtifactPreprocessorPlugin): cls._windows_registry_plugins[name] = preprocess_plugin
Registers an preprocess plugin class. Args: plugin_class (type): preprocess plugin class. Raises: KeyError: if plugin class is already set for the corresponding name. TypeError: if the source type of the plugin class is not supported.
codesearchnet
def Open(self, file_object): self._file_object = file_object self._regf_file.open_file_object(self._file_object) return True
Opens the Windows Registry file using a file-like object. Args: file_object (file): file-like object. Returns: bool: True if successful or False if not.
codesearchnet
def _get_updated_values(before_values, after_values): assert (before_values.keys() == after_values.keys()) return dict([(k, [before_values[k], after_values[k]]) for k in before_values.keys() if (before_values[k] != after_values[k])])
Get updated values from 2 dicts of values Args: before_values (dict): values before update after_values (dict): values after update Returns: dict: a diff dict with key is field key, value is tuple of (before_value, after_value)
codesearchnet
def _WriteSerializedAttributeContainerList(self, container_type): if container_type == self._CONTAINER_TYPE_EVENT: if not self._serialized_event_heap.data_size: return number_of_attribute_containers = ( self._serialized_event_heap.number_of_events) else: container_list = self._GetSerializedAttributeContainerList(container_type) if not container_list.data_size: return number_of_attribute_containers = ( container_list.number_of_attribute_containers) if self._serializers_profiler: self._serializers_profiler.StartTiming('write') if container_type == self._CONTAINER_TYPE_EVENT: query = 'INSERT INTO event (_timestamp, _data) VALUES (?, ?)' else: query = 'INSERT INTO {0:s} (_data) VALUES (?)'.format(container_type) values_tuple_list = [] for _ in range(number_of_attribute_containers): if container_type == self._CONTAINER_TYPE_EVENT: timestamp, serialized_data = self._serialized_event_heap.PopEvent() else: serialized_data = container_list.PopAttributeContainer() if self.compression_format == definitions.COMPRESSION_FORMAT_ZLIB: compressed_data = zlib.compress(serialized_data) serialized_data = sqlite3.Binary(compressed_data) else: compressed_data = '' if self._storage_profiler: self._storage_profiler.Sample( 'write', container_type, len(serialized_data), len(compressed_data)) if container_type == self._CONTAINER_TYPE_EVENT: values_tuple_list.append((timestamp, serialized_data)) else: values_tuple_list.append((serialized_data, )) self._cursor.executemany(query, values_tuple_list) if self._serializers_profiler: self._serializers_profiler.StopTiming('write') if container_type == self._CONTAINER_TYPE_EVENT: self._serialized_event_heap.Empty() else: container_list.Empty()
Writes a serialized attribute container list. Args: container_type (str): attribute container type.
juraj-google-style
def get_sns_topic_arn(topic_name, account, region): if topic_name.count(':') == 5 and topic_name.startswith('arn:aws:sns:'): return topic_name session = boto3.Session(profile_name=account, region_name=region) sns_client = session.client('sns') topics = sns_client.list_topics()['Topics'] matched_topic = None for topic in topics: topic_arn = topic['TopicArn'] if topic_name == topic_arn.split(':')[-1]: matched_topic = topic_arn break else: LOG.critical("No topic with name %s found.", topic_name) raise SNSTopicNotFound('No topic with name {0} found'.format(topic_name)) return matched_topic
Get SNS topic ARN. Args: topic_name (str): Name of the topic to lookup. account (str): Environment, e.g. dev region (str): Region name, e.g. us-east-1 Returns: str: ARN for requested topic name
juraj-google-style
def start_listing(self, request: Request) -> ListingResponse: if self._session_state != SessionState.ready: raise RuntimeError('Session not ready') response = ListingResponse() yield from self._prepare_fetch(request, response) yield from self._open_data_stream() mlsd_command = Command('MLSD', self._request.file_path) list_command = Command('LIST', self._request.file_path) try: yield from self._begin_stream(mlsd_command) self._listing_type = 'mlsd' except FTPServerError as error: if error.reply_code in (ReplyCodes.syntax_error_command_unrecognized, ReplyCodes.command_not_implemented): self._listing_type = None else: raise if not self._listing_type: yield from self._begin_stream(list_command) self._listing_type = 'list' _logger.debug('Listing type is %s', self._listing_type) self._session_state = SessionState.directory_request_sent return response
Fetch a file listing. Args: request: Request. Returns: A listing response populated with the initial data connection reply. Once the response is received, call :meth:`download_listing`. Coroutine.
juraj-google-style
def __init__( self, script_type, default_shell=None, run_dir=None, debug=False): self.script_type = script_type self.default_shell = default_shell name = '%s-script' % self.script_type facility = logging.handlers.SysLogHandler.LOG_DAEMON self.logger = logger.Logger(name=name, debug=debug, facility=facility) self.retriever = script_retriever.ScriptRetriever(self.logger, script_type) self.executor = script_executor.ScriptExecutor( self.logger, script_type, default_shell=default_shell) self._RunScripts(run_dir=run_dir)
Constructor. Args: script_type: string, the metadata script type to run. default_shell: string, the default shell to execute the script. run_dir: string, the base directory location of the temporary directory. debug: bool, True if debug output should write to the console.
juraj-google-style
def preprocess_frame(frame): frame = common_layers.convert_rgb_to_real(frame) frame = frame - 0.5 frame, _ = glow_ops.uniform_binning_correction(frame) return frame
Preprocess frame. 1. Converts [0, 255] to [-0.5, 0.5] 2. Adds uniform noise. Args: frame: 3-D Tensor representing pixels. Returns: frame: 3-D Tensor with values in between [-0.5, 0.5]
juraj-google-style
def match_date(date): date_pattern = re.compile('^(19|20)\\d\\d[- /.](0[1-9]|1[012])[- /.](0[1-9]|[12][0-9]|3[01])') if re.match(date_pattern, date): return True return False
Check if a string is a valid date Args: date(str) Returns: bool
codesearchnet
def get_tri_area(pts): (a, b, c) = (pts[0], pts[1], pts[2]) v1 = (np.array(b) - np.array(a)) v2 = (np.array(c) - np.array(a)) area_tri = abs((sp.linalg.norm(sp.cross(v1, v2)) / 2)) return area_tri
Given a list of coords for 3 points, Compute the area of this triangle. Args: pts: [a, b, c] three points
codesearchnet
def element_spec(self): raise NotImplementedError('Optional.element_spec')
The type specification of an element of this optional. >>> optional = tf.experimental.Optional.from_value(42) >>> print(optional.element_spec) tf.TensorSpec(shape=(), dtype=tf.int32, name=None) Returns: A (nested) structure of `tf.TypeSpec` objects matching the structure of an element of this optional, specifying the type of individual components.
github-repos
def to_json(self, **kwargs) -> JSONValueType:
Returns a plain Python value as a representation for this object. A plain Python value are basic python types that can be serialized into JSON, e.g: ``bool``, ``int``, ``float``, ``str``, ``dict`` (with string keys), ``list``, ``tuple`` where the container types should have plain Python values as their values. Args: **kwargs: Keyword arguments as flags to control JSON conversion. Returns: A plain Python value.
github-repos
def create_lease_object_from_subnet(self, subnet): if '/' not in subnet: subnet = '{}/{}'.format(subnet, self._cidr) try: if not self.is_leasable_subnet(subnet): raise LagoSubnetLeaseOutOfRangeException( subnet, self.get_allowed_range() ) except AddrFormatError: raise LagoSubnetLeaseMalformedAddrException(subnet) return Lease(store_path=self.path, subnet=subnet)
Create a lease from ip in a dotted decimal format, (for example `192.168.200.0/24`). the _cidr will be added if not exist in `subnet`. Args: subnet (str): The value of the third octet Returns: Lease: Lease object which represents the requested subnet. Raises: LagoSubnetLeaseOutOfRangeException: If the resultant subnet is malformed or out of the range of the store.
juraj-google-style
def absl_to_standard(level): if not isinstance(level, int): raise TypeError('Expect an int level, found {}'.format(type(level))) if level < ABSL_FATAL: level = ABSL_FATAL if level <= ABSL_DEBUG: return ABSL_TO_STANDARD[level] return STANDARD_DEBUG - level + 1
Converts an integer level from the absl value to the standard value. Args: level: int, an absl.logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in standard logging.
juraj-google-style
def round_model(model: typing.Dict[str, typing.Dict[str, float]], scale: int) -> typing.Dict[str, typing.Dict[str, int]]: model_rounded: typing.Dict[str, typing.Dict[str, int]] = dict() for feature_group, features in model.items(): for feature_content, score in features.items(): scaled_score = int(score * scale) if abs(scaled_score) > 0: model_rounded.setdefault(feature_group, {}) model_rounded[feature_group][feature_content] = scaled_score return model_rounded
Rounds the scores in the model to integer after scaling. Args: model (Dict[str, Dict[str, float]]): The model to round scores. scale (int, optional): A scale factor to multiply scores. Returns: model_rounded (Dict[str, Dict[str, int]]) The rounded model.
github-repos
def ed25519_public_key_to_string(key): return base64.b64encode(key.public_bytes( encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw, ), None).decode('utf-8')
Convert an ed25519 public key to a base64-encoded string. Args: key (Ed25519PublicKey): the key to write to the file. Returns: str: the key representation as a str
juraj-google-style
def variables_accessed(variables): accessed = [] for variable in variables: if variable.trainable: accessed.extend(_variables_override(variable)) for var in accessed: pywrap_tfe.TFE_Py_TapeVariableAccessed(var) pywrap_tfe.TFE_Py_VariableWatcherVariableAccessed(var)
Notifies all tapes in the stack that variables have been accessed. Only trainable variables are marked as accessed. Args: variables: iterable of variables to mark as accessed.
github-repos
def get_speaker_info(self, refresh=False, timeout=None): if self.speaker_info and refresh is False: return self.speaker_info else: response = requests.get('http: ':1400/xml/device_description.xml', timeout=timeout) dom = XML.fromstring(response.content) device = dom.find('{urn:schemas-upnp-org:device-1-0}device') if device is not None: self.speaker_info['zone_name'] = device.findtext( '{urn:schemas-upnp-org:device-1-0}roomName') self.speaker_info['player_icon'] = device.findtext( '{urn:schemas-upnp-org:device-1-0}iconList/' '{urn:schemas-upnp-org:device-1-0}icon/' '{urn:schemas-upnp-org:device-1-0}url' ) self.speaker_info['uid'] = self.uid self.speaker_info['serial_number'] = device.findtext( '{urn:schemas-upnp-org:device-1-0}serialNum') self.speaker_info['software_version'] = device.findtext( '{urn:schemas-upnp-org:device-1-0}softwareVersion') self.speaker_info['hardware_version'] = device.findtext( '{urn:schemas-upnp-org:device-1-0}hardwareVersion') self.speaker_info['model_number'] = device.findtext( '{urn:schemas-upnp-org:device-1-0}modelNumber') self.speaker_info['model_name'] = device.findtext( '{urn:schemas-upnp-org:device-1-0}modelName') self.speaker_info['display_version'] = device.findtext( '{urn:schemas-upnp-org:device-1-0}displayVersion') mac = self.speaker_info['serial_number'].split(':')[0] self.speaker_info['mac_address'] = mac return self.speaker_info return None
Get information about the Sonos speaker. Arguments: refresh(bool): Refresh the speaker info cache. timeout: How long to wait for the server to send data before giving up, as a float, or a `(connect timeout, read timeout)` tuple e.g. (3, 5). Default is no timeout. Returns: dict: Information about the Sonos speaker, such as the UID, MAC Address, and Zone Name.
juraj-google-style
def _construct_key(self, rule_id: str, spacy_rule_id:int) -> int: hash_key = (rule_id, spacy_rule_id) hash_v = hash(hash_key) + sys.maxsize + 1 self._hash_map[hash_v] = hash_key return hash_v
Use a mapping to store the information about rule_id for each matches, create the mapping key here Args: rule_id: str spacy_rule_id:int Returns: int
juraj-google-style
def update_in_hdx(self): capacity = self.data.get('capacity') if (capacity is not None): del self.data['capacity'] self._update_in_hdx('user', 'id') if (capacity is not None): self.data['capacity'] = capacity
Check if user exists in HDX and if so, update user Returns: None
codesearchnet
def Scan(self, scan_context, auto_recurse=True, scan_path_spec=None): if (not scan_context): raise ValueError('Invalid scan context.') scan_context.updated = False if scan_path_spec: scan_node = scan_context.GetScanNode(scan_path_spec) else: scan_node = scan_context.GetUnscannedScanNode() if scan_node: self._ScanNode(scan_context, scan_node, auto_recurse=auto_recurse)
Scans for supported formats. Args: scan_context (SourceScannerContext): source scanner context. auto_recurse (Optional[bool]): True if the scan should automatically recurse as far as possible. scan_path_spec (Optional[PathSpec]): path specification to indicate where the source scanner should continue scanning, where None indicates the scanner will start with the sources. Raises: ValueError: if the scan context is invalid.
codesearchnet
def _get_backend(filename): filename = os.path.abspath(filename) with _backends_lock: if (filename not in _backends): _backends[filename] = _MultiprocessStorageBackend(filename) return _backends[filename]
A helper method to get or create a backend with thread locking. This ensures that only one backend is used per-file per-process, so that thread and process locks are appropriately shared. Args: filename: The full path to the credential storage file. Returns: An instance of :class:`_MultiprocessStorageBackend`.
codesearchnet
def all_near_zero(a: Union[(float, complex, Iterable[float], np.ndarray)], *, atol: float=1e-08) -> bool: return np.all(np.less_equal(np.abs(a), atol))
Checks if the tensor's elements are all near zero. Args: a: Tensor of elements that could all be near zero. atol: Absolute tolerance.
codesearchnet
def GetSources(self, event): if self.DATA_TYPE != event.data_type: raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format( event.data_type)) source_long = getattr(event, 'source_long', 'UNKNOWN') source_append = getattr(event, 'source_append', None) if source_append: source_long = '{0:s} {1:s}'.format(source_long, source_append) return self.SOURCE_SHORT, source_long
Determines the the short and long source for an event object. Args: event (EventObject): event. Returns: tuple(str, str): short and long source string. Raises: WrongFormatter: if the event object cannot be formatted by the formatter.
juraj-google-style
def index(cls): res = requests.get(cls.URL, headers=HEADERS, verify=False) res.raise_for_status() return res.json()
Fetches all records. Returns: `dict`. The JSON formatted response. Raises: `requests.exceptions.HTTPError`: The status code is not ok.
codesearchnet
def fast_cond_v2(pred, true_fn, false_fn, name=None): if isinstance(pred, bool): raise TypeError('pred must not be a Python bool', pred) if not name: name = 'fast_cond' with ops.name_scope(name) as scope: true_name = util.unique_fn_name(scope, 'true') false_name = util.unique_fn_name(scope, 'false') pred = _normalize_pred(pred) true_graph = func_graph_module.func_graph_from_py_func(true_name, true_fn, [], {}, func_graph=util.CondBranchFuncGraph(true_name, collections=ops.get_default_graph()._collections), add_control_dependencies=False, op_return_value=pred) false_graph = func_graph_module.func_graph_from_py_func(false_name, false_fn, [], {}, func_graph=util.CondBranchFuncGraph(false_name, collections=ops.get_default_graph()._collections), add_control_dependencies=False, op_return_value=pred) verify_captures(_COND, [true_graph, false_graph]) return _build_cond(pred, true_graph, false_graph, true_graph.external_captures, false_graph.external_captures, building_gradient=False, add_identities=False, prevent_lowering=True, name=scope)
Like cond_v2, except emits an If op and applies various optimizations. This function is intended to be used for cases where the cond is used to implement a simple conditional control flow operator. It makes the following assumptions: 1. The conditional is never differentiated. 2. The caller does not rely on V1 control flow semantics, i.e. for cross device execution, pruning subgraphs of the true or false branches, or non-strict evaluation order. 3. The caller manually configures any control dependencies within the graphs. In this case, the cond will be lowered to a single If (or StatelessIf) op and the true and false graphs will be executed as TF functions. Args: pred: boolean Tensor true_fn: function to execute if pred is true false_fn: function to execute if pred is false name: the name for the If op. Returns: A list of Tensors which are the outputs of the If op. Does not include intermediate outputs.
github-repos
def _list_node_attributes(self, node_name): lines = [] lines.append('') lines.append('Node attributes:') attrs = self._debug_dump.node_attributes(node_name) for attr_key in attrs: lines.append(' %s:' % attr_key) attr_val_str = repr(attrs[attr_key]).strip().replace('\n', ' ') lines.append(' %s' % attr_val_str) lines.append('') return debugger_cli_common.RichTextLines(lines)
List neighbors (inputs or recipients) of a node. Args: node_name: Name of the node of which the attributes are to be listed. Returns: A RichTextLines object.
github-repos
def all_reduce_indexed_slices(self, input_slices: indexed_slices.IndexedSlices, options: Optional[collective_util.Options]=None) -> indexed_slices.IndexedSlices: options = self._options.merge(options) with ops.device(self._device): def all_gather_indexed_slices(all_gather_fn: Callable[[core.TensorLike, Optional[collective_util.Options]], core.Tensor]) -> indexed_slices.IndexedSlices: all_values = all_gather_fn(input_slices.values, options) if options.implementation == collective_util.CommunicationImplementation.NCCL: control = [all_values] else: control = [] with ops.control_dependencies(control): all_indices = all_gather_fn(input_slices.indices, options) return indexed_slices.IndexedSlices(values=all_values, indices=all_indices, dense_shape=input_slices.dense_shape) length = array_ops.shape(input_slices.indices) all_lengths = self._all_gather(length, options) def all_gather_with_padding(input_tensor: core.TensorLike, options: Optional[collective_util.Options]) -> core.Tensor: max_length = math_ops.reduce_max(all_lengths) padded_tensor = _pad_util(input_tensor, max_length) all_padded_tensors = self._all_gather(padded_tensor, options) split_tensors = [] for i in range(self._group_size): start_pos = i * max_length split_tensors.append(all_padded_tensors[start_pos:start_pos + all_lengths[i]]) return array_ops.concat(split_tensors, 0) return cond.cond(math_ops.equal(math_ops.reduce_max(all_lengths), math_ops.reduce_min(all_lengths)), lambda: all_gather_indexed_slices(self._all_gather), lambda: all_gather_indexed_slices(all_gather_with_padding))
All-reduce an IndexedSlices. This method can be called outside tf.function. Args: input_slices: an IndexedSlices. options: an optional tf.distribute.experimental.CommunicationOptions. If provided, it overrides the default options. Returns: The reduced IndexedSlices.
github-repos
def create_heroku_connect_schema(using=DEFAULT_DB_ALIAS): connection = connections[using] with connection.cursor() as cursor: cursor.execute(_SCHEMA_EXISTS_QUERY, [settings.HEROKU_CONNECT_SCHEMA]) schema_exists = cursor.fetchone()[0] if schema_exists: return False cursor.execute("CREATE SCHEMA %s;", [AsIs(settings.HEROKU_CONNECT_SCHEMA)]) with connection.schema_editor() as editor: for model in get_heroku_connect_models(): editor.create_model(model) editor.execute('CREATE EXTENSION IF NOT EXISTS "hstore";') from heroku_connect.models import (TriggerLog, TriggerLogArchive) for cls in [TriggerLog, TriggerLogArchive]: editor.create_model(cls) return True
Create Heroku Connect schema. Note: This function is only meant to be used for local development. In a production environment the schema will be created by Heroku Connect. Args: using (str): Alias for database connection. Returns: bool: ``True`` if the schema was created, ``False`` if the schema already exists.
juraj-google-style
def set_pattern_step_setpoint(self, patternnumber, stepnumber, setpointvalue): _checkPatternNumber(patternnumber) _checkStepNumber(stepnumber) _checkSetpointValue(setpointvalue, self.setpoint_max) address = _calculateRegisterAddress('setpoint', patternnumber, stepnumber) self.write_register(address, setpointvalue, 1)
Set the setpoint value for a step. Args: * patternnumber (integer): 0-7 * stepnumber (integer): 0-7 * setpointvalue (float): Setpoint value
codesearchnet
def _use_memcache(self, key, options=None): flag = ContextOptions.use_memcache(options) if (flag is None): flag = self._memcache_policy(key) if (flag is None): flag = ContextOptions.use_memcache(self._conn.config) if (flag is None): flag = True return flag
Return whether to use memcache for this key. Args: key: Key instance. options: ContextOptions instance, or None. Returns: True if the key should be cached in memcache, False otherwise.
codesearchnet
def writeline(self, line=b'', sep=b'\n', echo=None): self.writelines([line], sep, echo)
Write a byte sequences to the channel and terminate it with carriage return and line feed. Args: line(bytes): The line to send. sep(bytes): The separator to use after each line. echo(bool): Whether to echo the written data to stdout. Raises: EOFError: If the channel was closed before all data was sent.
codesearchnet
def transformers_to_megatron_fix_query_key_value_ordering(param, checkpoint_version, num_splits, num_heads, hidden_size): input_shape = param.size() if checkpoint_version == 1.0: current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:] param = param.view(*current_shape) param = param.transpose(0, 2) param = param.transpose(1, 2).contiguous() elif checkpoint_version >= 2.0: current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:] param = param.view(*current_shape) param = param.transpose(0, 1).contiguous() param = param.view(*input_shape) return param
Permutes layout of param tensor to the one compatible with respective NVIDIA Megatron-LM checkpoint versions. Input is [num_splits * num_heads * hidden_size, :] and output is [num_heads * hidden_size * num_splits, :] for version 1.0 and [num_heads * num_splits * hidden_size, :] for version 2.0 and later. If param is the weight tensor of the self-attention block, the param needs to be already transposed before calling this function. Args: param (torch.Tensor): the tensor to permute checkpoint_version (int): the version of the checkpoint. num_splits (int): the number of projections, usually 3 for (Query, Key, Value) num_heads (int): the number of attention heads hidden_size (int): the hidden size per head
github-repos
def post_async(self, path, params=None): request = Post(self._get_next_id(), path, params) request.set_callback(self._q.put) future = self._dispatch_request(request) return future
Asynchronously calls a function on a child block Args: path (list): The path to post to params (dict): parameters for the call Returns: Future: as single Future that will resolve to the result
juraj-google-style
def ParseFromUnicode(self, value): precondition.AssertType(value, Text) value = value.strip() super(ClientURN, self).ParseFromUnicode(value) match = self.CLIENT_ID_RE.match(self._string_urn) if not match: raise type_info.TypeValueError("Client urn malformed: %s" % value) clientid = match.group("clientid") clientid_correctcase = "".join((clientid[0].upper(), clientid[1:].lower())) self._string_urn = self._string_urn.replace(clientid, clientid_correctcase, 1)
Parse a string into a client URN. Convert case so that all URNs are of the form C.[0-9a-f]. Args: value: string value to parse
juraj-google-style
def _parse_string(self, xml): if not isinstance(xml, HTMLElement): xml = dhtmlparser.parseString(str(xml)) record = xml.find("record") if not record: raise ValueError("There is no <record> in your MARC XML document!") record = record[0] self.oai_marc = len(record.find("oai_marc")) > 0 if not self.oai_marc: leader = record.find("leader") if len(leader) >= 1: self.leader = leader[0].getContent() if self.oai_marc: self._parse_control_fields(record.find("fixfield"), "id") self._parse_data_fields(record.find("varfield"), "id", "label") else: self._parse_control_fields(record.find("controlfield"), "tag") self._parse_data_fields(record.find("datafield"), "tag", "code") if self.oai_marc and "LDR" in self.controlfields: self.leader = self.controlfields["LDR"]
Parse MARC XML document to dicts, which are contained in self.controlfields and self.datafields. Args: xml (str or HTMLElement): input data Also detect if this is oai marc format or not (see elf.oai_marc).
juraj-google-style
def GetDecoder(cls, encoding_method): encoding_method = encoding_method.lower() decoder = cls._decoders.get(encoding_method, None) if (not decoder): return None return decoder()
Retrieves the decoder object for a specific encoding method. Args: encoding_method (str): encoding method identifier. Returns: Decoder: decoder or None if the encoding method does not exists.
codesearchnet
def __init__(self, timeseries, loop=None): self.timeseries = timeseries self.queue = deque() self.continuation_url = timeseries._base_url
Construct an iterator. Args: timeseries: the timeseries to iterate over loop: The asyncio loop to use for iterating
juraj-google-style
def singleOrPair(obj): if (len(list(obj.__class__.__mro__)) <= 2): return 'Neither' elif (ancestorJr(obj) is Pair): return 'Pair' elif (ancestor(obj) is Single): return 'Single' else: return 'Neither'
Chech an object is single or pair or neither. Of course,, all pairs are single, so what the function is really detecting is whether an object is only single or at the same time a pair. Args: obj (object): Literally anything. Returns: str: 'Single', or 'Pair', or 'Neither'
codesearchnet
def _GetAccessToken(self): d = {'assertion': self._GenerateAssertion(), 'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer'} try: body = parse.urlencode(d) except AttributeError: body = urllib.urlencode(d) req = urllib_request.Request(RpcHelper.TOKEN_ENDPOINT) req.add_header('Content-type', 'application/x-www-form-urlencoded') binary_body = body.encode('utf-8') raw_response = urllib_request.urlopen(req, binary_body) return simplejson.loads(raw_response.read())['access_token']
Gets oauth2 access token for Gitkit API using service account. Returns: string, oauth2 access token.
codesearchnet
def __convertChannelMask(self, channelsArray): maskSet = 0 for eachChannel in channelsArray: mask = (1 << eachChannel) maskSet = (maskSet | mask) return maskSet
convert channelsArray to bitmask format Args: channelsArray: channel array (i.e. [21, 22]) Returns: bitmask format corresponding to a given channel array
codesearchnet
async def register_service(self, short_name, long_name, allow_duplicate=True): try: await self.send_command(OPERATIONS.CMD_REGISTER_SERVICE, dict(name=short_name, long_name=long_name), MESSAGES.RegisterServiceResponse) except ArgumentError: if not allow_duplicate: raise
Register a new service with the service manager. Args: short_name (string): A unique short name for this service that functions as an id long_name (string): A user facing name for this service allow_duplicate (boolean): Don't throw an error if this service is already registered. This is important if the service is preregistered for example. Raises: ArgumentError: if the short_name is already taken
juraj-google-style
def _postprocess_for_mg(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7): keep_by_nms = batched_nms(boxes=mask_boxes.float(), scores=iou_scores, idxs=torch.zeros(mask_boxes.shape[0]), iou_threshold=amg_crops_nms_thresh) iou_scores = iou_scores[keep_by_nms] rle_masks = [rle_masks[i] for i in keep_by_nms] mask_boxes = mask_boxes[keep_by_nms] masks = [_rle_to_mask(rle) for rle in rle_masks] return (masks, iou_scores, rle_masks, mask_boxes)
Perform NMS (Non Maximum Suppression) on the outputs. Args: rle_masks (`torch.Tensor`): binary masks in the RLE format iou_scores (`torch.Tensor` of shape (nb_masks, 1)): iou_scores predicted by the model mask_boxes (`torch.Tensor`): The bounding boxes corresponding to segmentation masks amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7): NMS threshold.
github-repos
def check(self, instance, format): if format not in self.checkers: return func, raises = self.checkers[format] result, cause = None, None try: result = func(instance) except raises as e: cause = e if not result: raise FormatError( "%r is not a %r" % (instance, format), cause=cause, )
Check whether the instance conforms to the given format. Arguments: instance (*any primitive type*, i.e. str, number, bool): The instance to check format (str): The format that instance should conform to Raises: FormatError: if the instance does not conform to ``format``
juraj-google-style
def starting_wall_time(self): return self._reader.starting_wall_time()
Wall timestamp for when the debugged TensorFlow program started. Returns: Stating wall time as seconds since the epoch, as a `float`.
github-repos
def ParseBookmarkRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) rev_host = self._GetRowValue(query_hash, row, 'rev_host') bookmark_type = self._GetRowValue(query_hash, row, 'type') event_data = FirefoxPlacesBookmarkEventData() event_data.host = (rev_host or 'N/A') event_data.offset = self._GetRowValue(query_hash, row, 'id') event_data.places_title = self._GetRowValue(query_hash, row, 'places_title') event_data.query = query event_data.title = self._GetRowValue(query_hash, row, 'bookmark_title') event_data.type = self._BOOKMARK_TYPES.get(bookmark_type, 'N/A') event_data.url = self._GetRowValue(query_hash, row, 'url') event_data.visit_count = self._GetRowValue(query_hash, row, 'visit_count') timestamp = self._GetRowValue(query_hash, row, 'dateAdded') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_ADDED) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = self._GetRowValue(query_hash, row, 'lastModified') if timestamp: date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(timestamp=timestamp) event = time_events.DateTimeValuesEvent(date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a bookmark row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
codesearchnet
def section(self, section): if not isinstance(self._container, ConfigUpdater): raise ValueError("Sections can only be added at section level!") if isinstance(section, str): section = Section(section, container=self._container) elif not isinstance(section, Section): raise ValueError("Parameter must be a string or Section type!") if section.name in [block.name for block in self._container if isinstance(block, Section)]: raise DuplicateSectionError(section.name) self._container.structure.insert(self._idx, section) self._idx += 1 return self
Creates a section block Args: section (str or :class:`Section`): name of section or object Returns: self for chaining
juraj-google-style
def do_block(args): rest_client = RestClient(args.url, args.user) if (args.subcommand == 'list'): block_generator = rest_client.list_blocks() blocks = [] left = args.count for block in block_generator: blocks.append(block) left -= 1 if (left <= 0): break keys = ('num', 'block_id', 'batches', 'txns', 'signer') headers = tuple(((k.upper() if (k != 'batches') else 'BATS') for k in keys)) def parse_block_row(block): batches = block.get('batches', []) txns = [t for b in batches for t in b['transactions']] return (block['header'].get('block_num', 0), block['header_signature'], len(batches), len(txns), block['header']['signer_public_key']) if (args.format == 'default'): fmt.print_terminal_table(headers, blocks, parse_block_row) elif (args.format == 'csv'): fmt.print_csv(headers, blocks, parse_block_row) elif ((args.format == 'json') or (args.format == 'yaml')): data = [{k: d for (k, d) in zip(keys, parse_block_row(b))} for b in blocks] if (args.format == 'yaml'): fmt.print_yaml(data) elif (args.format == 'json'): fmt.print_json(data) else: raise AssertionError('Missing handler: {}'.format(args.format)) else: raise AssertionError('Missing handler: {}'.format(args.format)) if (args.subcommand == 'show'): output = rest_client.get_block(args.block_id) if args.key: if (args.key in output): output = output[args.key] elif (args.key in output['header']): output = output['header'][args.key] else: raise CliException('key "{}" not found in block or header'.format(args.key)) if (args.format == 'yaml'): fmt.print_yaml(output) elif (args.format == 'json'): fmt.print_json(output) else: raise AssertionError('Missing handler: {}'.format(args.format))
Runs the block list or block show command, printing output to the console Args: args: The parsed arguments sent to the command at runtime
codesearchnet
def construct(cls, name, version=None): other = VersionedObject(None) other.name_ = name other.version_ = Version() if version is None else version return other
Create a VersionedObject directly from an object name and version. Args: name: Object name string. version: Version object.
juraj-google-style
def authenticate(self, connection_certificate=None, connection_info=None, request_credentials=None): if ((self.users_url is None) or (self.groups_url is None)): raise exceptions.ConfigurationError('The SLUGS URL must be specified.') user_id = utils.get_client_identity_from_certificate(connection_certificate) try: response = requests.get(self.users_url.format(user_id)) except Exception: raise exceptions.ConfigurationError('A connection could not be established using the SLUGS URL.') if (response.status_code == 404): raise exceptions.PermissionDenied('Unrecognized user ID: {}'.format(user_id)) response = requests.get(self.groups_url.format(user_id)) if (response.status_code == 404): raise exceptions.PermissionDenied('Group information could not be retrieved for user ID: {}'.format(user_id)) return (user_id, response.json().get('groups'))
Query the configured SLUGS service with the provided credentials. Args: connection_certificate (cryptography.x509.Certificate): An X.509 certificate object obtained from the connection being authenticated. Required for SLUGS authentication. connection_info (tuple): A tuple of information pertaining to the connection being authenticated, including the source IP address and a timestamp (e.g., ('127.0.0.1', 1519759267.467451)). Optional, defaults to None. Ignored for SLUGS authentication. request_credentials (list): A list of KMIP Credential structures containing credential information to use for authentication. Optional, defaults to None. Ignored for SLUGS authentication.
codesearchnet
def he_init(n_inputs, n_outputs, activation_fn, uniform=True): def in_relu_family(activation_fn): if isinstance(activation_fn, collections.Sequence): activation_fn = activation_fn[0] return (activation_fn in (tf.nn.relu, tf.nn.relu6)) if in_relu_family(activation_fn): stddev = math.sqrt((2.0 / n_inputs)) return tf.random_normal_initializer(stddev=stddev) else: return xavier_init(n_inputs, n_outputs, uniform)
Sets the parameter initialization using the method described. This method is designed to keep the scale of the gradients roughly the same in all layers with ReLU activations. He et al. (2015): Delving deep into rectifiers: surpassing human-level performance on imageNet classification. International Conference on Computer Vision. For activations other than ReLU and ReLU6, this method uses Xavier initialization as in xavier_init(). Args: n_inputs: The number of input nodes into each output. n_outputs: The number of output nodes for each input. activation_fn: Activation function used in this layer. uniform: If uniform distribution will be used for Xavier initialization. Normal distribution will be used if False. Returns: An initializer.
codesearchnet
def Open(self, filename, read_only=False): if self._connection: raise RuntimeError('Cannot open database already opened.') self.filename = filename self.read_only = read_only try: self._connection = sqlite3.connect(filename) except sqlite3.OperationalError: return False if not self._connection: return False self._cursor = self._connection.cursor() if not self._cursor: return False return True
Opens the database file. Args: filename (str): filename of the database. read_only (Optional[bool]): True if the database should be opened in read-only mode. Since sqlite3 does not support a real read-only mode we fake it by only permitting SELECT queries. Returns: bool: True if successful. Raises: RuntimeError: if the database is already opened.
juraj-google-style
def inner(*args): haspoly = sum([isinstance(arg, Poly) for arg in args]) if not haspoly: return numpy.sum(numpy.prod(args, 0), 0) out = args[0] for arg in args[1:]: out = out * arg return sum(out)
Inner product of a polynomial set. Args: args (chaospy.poly.base.Poly): The polynomials to perform inner product on. Returns: (chaospy.poly.base.Poly): Resulting polynomial. Examples: >>> x,y = cp.variable(2) >>> P = cp.Poly([x-1, y]) >>> Q = cp.Poly([x+1, x*y]) >>> print(cp.inner(P, Q)) q0^2+q0q1^2-1 >>> x = numpy.arange(4) >>> print(cp.inner(x, x)) 14
juraj-google-style
def __register_class(self, parsed_config): methods = parsed_config.get('methods') if not methods: return service_classes = set() for method in methods.itervalues(): rosy_method = method.get('rosyMethod') if rosy_method and '.' in rosy_method: method_class = rosy_method.split('.', 1)[0] service_classes.add(method_class) for service_class in service_classes: if service_class in self.__registered_classes: raise api_exceptions.ApiConfigurationError( 'API class %s has already been registered.' % service_class) self.__registered_classes.add(service_class)
Register the class implementing this config, so we only add it once. Args: parsed_config: The JSON object with the API configuration being added. Raises: ApiConfigurationError: If the class has already been registered.
juraj-google-style
def set_clang_compiler_path_win(environ_cp): default_clang_path = 'C:/Program Files/LLVM/bin/clang.exe' if not os.path.exists(default_clang_path): default_clang_path = shutil.which('clang') or '' clang_compiler_path = prompt_loop_or_load_from_env(environ_cp, var_name='CLANG_COMPILER_PATH', var_default=default_clang_path, ask_for_var='Please specify the path to clang executable.', check_success=os.path.exists, resolve_symlinks=True, error_msg='Invalid clang path. %s cannot be found. Note that Clang is nowpreferred compiler. You may use MSVC by removing --config=win_clang') write_action_env_to_bazelrc('CLANG_COMPILER_PATH', clang_compiler_path) write_to_bazelrc(f'build --repo_env=CC="{clang_compiler_path}"') write_to_bazelrc(f'build --repo_env=BAZEL_COMPILER="{clang_compiler_path}"') return clang_compiler_path
Set CLANG_COMPILER_PATH and environment variables. Loop over user prompts for clang path until receiving a valid response. Default is used if no input is given. Set CLANG_COMPILER_PATH and write environment variables CC and BAZEL_COMPILER to .bazelrc. Args: environ_cp: (Dict) copy of the os.environ. Returns: string value for clang_compiler_path.
github-repos
def move_to(self, destination_filename: str, alter_if_clash: bool = True) -> None: if not self.src_filename: return if alter_if_clash: counter = 0 while os.path.exists(destination_filename): root, ext = os.path.splitext(destination_filename) destination_filename = "{r}_{c}{e}".format( r=root, c=counter, e=ext) counter += 1 else: if os.path.exists(destination_filename): src = self.rescue_filename or self.src_filename log.warning("Destination exists; won't move {!r} to {!r}", src, destination_filename) return if self.rescue_filename: shutil.move(self.rescue_filename, destination_filename) os.remove(self.src_filename) log.info("Moved recovered file {!r} to {!r} and deleted corrupted " "original {!r}", self.rescue_filename, destination_filename, self.src_filename) self.rescue_filename = "" else: shutil.move(self.src_filename, destination_filename) log.info("Moved {!r} to {!r}", self.src_filename, destination_filename) self.src_filename = ""
Move the file to which this class refers to a new location. The function will not overwrite existing files (but offers the option to rename files slightly to avoid a clash). Args: destination_filename: filename to move to alter_if_clash: if ``True`` (the default), appends numbers to the filename if the destination already exists, so that the move can proceed.
juraj-google-style
def present_weather_codes(self, value=None): if value is not None: try: value = int(value) except ValueError: raise ValueError( 'value {} need to be of type int ' 'for field `present_weather_codes`'.format(value)) self._present_weather_codes = value
Corresponds to IDD Field `present_weather_codes` Args: value (int): value for IDD Field `present_weather_codes` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def load(tiff_filename): tiff_filename = os.path.expanduser(tiff_filename) try: img = tiff.imread(tiff_filename) except Exception as e: raise ValueError('Could not load file {0} for conversion.'.format(tiff_filename)) raise return numpy.array(img)
Import a TIFF file into a numpy array. Arguments: tiff_filename: A string filename of a TIFF datafile Returns: A numpy array with data from the TIFF file
codesearchnet
def nlargest(self, n=None): if n is None: return sorted(self.counts(), key=itemgetter(1), reverse=True) else: return heapq.nlargest(n, self.counts(), key=itemgetter(1))
List the n most common elements and their counts. List is from the most common to the least. If n is None, the list all element counts. Run time should be O(m log m) where m is len(self) Args: n (int): The number of elements to return
juraj-google-style
def duration_to_string(duration): (m, s) = divmod(duration, 60) (h, m) = divmod(m, 60) return ('%d:%02d:%02d' % (h, m, s))
Converts a duration to a string Args: duration (int): The duration in seconds to convert Returns s (str): The duration as a string
codesearchnet
def set_input(self, p_name, value): name = self.python_names.get(p_name) if ((p_name is None) or (name not in self.get_input_names())): raise ValueError('Invalid input "{}"'.format(p_name)) self.step_inputs[name] = value
Set a Step's input variable to a certain value. The value comes either from a workflow input or output of a previous step. Args: name (str): the name of the Step input value (str): the name of the output variable that provides the value for this input. Raises: ValueError: The name provided is not a valid input name for this Step.
codesearchnet
def create_standalone_context(require=None, **settings) -> 'Context': backend = os.environ.get('MODERNGL_BACKEND') if (backend is not None): settings['backend'] = backend ctx = Context.__new__(Context) (ctx.mglo, ctx.version_code) = mgl.create_standalone_context(settings) ctx._screen = None ctx.fbo = None ctx._info = None ctx.extra = None if ((require is not None) and (ctx.version_code < require)): raise ValueError('Requested OpenGL version {}, got version {}'.format(require, ctx.version_code)) return ctx
Create a standalone ModernGL context. Example:: # Create a context with highest possible supported version ctx = moderngl.create_context() # Require at least OpenGL 4.3 ctx = moderngl.create_context(require=430) Keyword Arguments: require (int): OpenGL version code. Returns: :py:class:`Context` object
codesearchnet
class PipelineException(Exception): def __init__(self, task: str, model: str, reason: str): super().__init__(reason) self.task = task self.model = model
Raised by a [`Pipeline`] when handling __call__. Args: task (`str`): The task of the pipeline. model (`str`): The model used by the pipeline. reason (`str`): The error message to display.
github-repos
def _make_request(self, verb: str, endpoint: str, **kwargs: dict[str, Any]) -> requests.Response: res = self._session.request(verb, urllib.parse.urljoin('https: res.raise_for_status() return res.json()
Helper method to make a request and raise an HTTPError if one occurred. Arguments: verb: The HTTP verb to use endpoint: The endpoint to make the request to **kwargs: The json that will be sent as the body of the request. Returns: a requests.Response object containing the response from the API. Raises: requests.exceptions.HTTPError
github-repos
def split_code_in_indented_blocks(code: str, indent_level: str='', start_prompt: Optional[str]=None, end_prompt: Optional[str]=None) -> List[str]: index = 0 lines = code.split('\n') if start_prompt is not None: while not lines[index].startswith(start_prompt): index += 1 blocks = ['\n'.join(lines[:index])] else: blocks = [] current_block = [lines[index]] index += 1 while index < len(lines) and (end_prompt is None or not lines[index].startswith(end_prompt)): if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level: if len(current_block) > 0 and get_indent(current_block[-1]).startswith(indent_level + ' '): current_block.append(lines[index]) blocks.append('\n'.join(current_block)) if index < len(lines) - 1: current_block = [lines[index + 1]] index += 1 else: current_block = [] else: blocks.append('\n'.join(current_block)) current_block = [lines[index]] else: current_block.append(lines[index]) index += 1 if len(current_block) > 0: blocks.append('\n'.join(current_block)) if end_prompt is not None and index < len(lines): blocks.append('\n'.join(lines[index:])) return blocks
Split some code into its indented blocks, starting at a given level. Args: code (`str`): The code to split. indent_level (`str`): The indent level (as string) to use for identifying the blocks to split. start_prompt (`str`, *optional*): If provided, only starts splitting at the line where this text is. end_prompt (`str`, *optional*): If provided, stops splitting at a line where this text is. Warning: The text before `start_prompt` or after `end_prompt` (if provided) is not ignored, just not split. The input `code` can thus be retrieved by joining the result. Returns: `List[str]`: The list of blocks.
github-repos
def make_view(controller, context, data): if isinstance(data, BlockModel): view = _make_view_subclass(Block, controller, context, data) elif isinstance(data, AttributeModel): view = Attribute(controller, context, data) elif isinstance(data, MethodModel): view = Method(controller, context, data) elif isinstance(data, Model): view = _make_view_subclass(View, controller, context, data) elif isinstance(data, dict): d = OrderedDict() for k, v in data.items(): d[k] = make_view(controller, context, v) view = d elif isinstance(data, list): view = [make_view(controller, context, x) for x in data] else: view = data return view
Make a View subclass containing properties specific for given data Args: controller (Controller): The child controller that hosts the data context (Context): The context the parent has made that the View should use for manipulating the data data (Model): The actual data that context will be manipulating Returns: View: A View subclass instance that provides a user-focused API to the given data
juraj-google-style
def fft(x, axis=-1, padding_samples=0): if padding_samples > 0: padded = np.concatenate( [x, np.zeros((len(x), padding_samples), dtype=x.dtype)], axis=axis) else: padded = x transformed = np.fft.rfft(padded, axis=axis, norm='ortho') sr = audio_sample_rate(int(Seconds(1) / x.dimensions[axis].frequency)) scale = LinearScale.from_sample_rate(sr, transformed.shape[-1]) new_dimensions = list(x.dimensions) new_dimensions[axis] = FrequencyDimension(scale) return ArrayWithUnits(transformed, new_dimensions)
Apply an FFT along the given dimension, and with the specified amount of zero-padding Args: x (ArrayWithUnits): an :class:`~zounds.core.ArrayWithUnits` instance which has one or more :class:`~zounds.timeseries.TimeDimension` axes axis (int): The axis along which the fft should be applied padding_samples (int): The number of padding zeros to apply along axis before performing the FFT
juraj-google-style
def _unique_constraint_name(table: str, field, keys): postfix = '_'.join(keys) return '{table}_{field}_unique_{postfix}'.format(table=table, field=field.column, postfix=postfix)
Gets the name for a UNIQUE INDEX that applies to one or more keys in a hstore field. Arguments: table: The name of the table the field is a part of. field: The hstore field to create a UNIQUE INDEX for. key: The name of the hstore key to create the name for. This can also be a tuple of multiple names. Returns: The name for the UNIQUE index.
codesearchnet
def _ReadEntry(self, parser_mediator, file_object, file_offset): entry_map = self._GetDataTypeMap('linux_libc6_utmp_entry') try: (entry, _) = self._ReadStructureFromFileObject(file_object, file_offset, entry_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError('Unable to parse utmp entry at offset: 0x{0:08x} with error: {1!s}.'.format(file_offset, exception)) if (entry.type not in self._SUPPORTED_TYPES): raise errors.UnableToParseFile('Unsupported type: {0:d}'.format(entry.type)) encoding = (parser_mediator.codepage or 'utf-8') try: username = entry.username.split(b'\x00')[0] username = username.decode(encoding) except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning('unable to decode username string') username = None try: terminal = entry.terminal.split(b'\x00')[0] terminal = terminal.decode(encoding) except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning('unable to decode terminal string') terminal = None if (terminal == '~'): terminal = 'system boot' try: hostname = entry.hostname.split(b'\x00')[0] hostname = hostname.decode(encoding) except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning('unable to decode hostname string') hostname = None if ((not hostname) or (hostname == ':0')): hostname = 'localhost' if (entry.ip_address[4:] == self._EMPTY_IP_ADDRESS[4:]): ip_address = self._FormatPackedIPv4Address(entry.ip_address[:4]) else: ip_address = self._FormatPackedIPv6Address(entry.ip_address) event_data = UtmpEventData() event_data.hostname = hostname event_data.exit_status = entry.exit_status event_data.ip_address = ip_address event_data.offset = file_offset event_data.pid = entry.pid event_data.terminal = terminal event_data.terminal_identifier = entry.terminal_identifier event_data.type = entry.type event_data.username = username timestamp = (entry.microseconds + (entry.timestamp * definitions.MICROSECONDS_PER_SECOND)) return (timestamp, event_data)
Reads an utmp entry. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. file_offset (int): offset of the data relative to the start of the file-like object. Returns: tuple: containing: int: timestamp, which contains the number of microseconds since January 1, 1970, 00:00:00 UTC. UtmpEventData: event data of the utmp entry read. Raises: ParseError: if the entry cannot be parsed.
codesearchnet
def log_cdf_laplace(x, name='log_cdf_laplace'): with ops.name_scope(name, values=[x]): x = ops.convert_to_tensor(x, name='x') lower_solution = -np.log(2.0) + x safe_exp_neg_x = math_ops.exp(-math_ops.abs(x)) upper_solution = math_ops.log1p(-0.5 * safe_exp_neg_x) return array_ops.where_v2(x < 0.0, lower_solution, upper_solution)
Log Laplace distribution function. This function calculates `Log[L(x)]`, where `L(x)` is the cumulative distribution function of the Laplace distribution, i.e. ```L(x) := 0.5 * int_{-infty}^x e^{-|t|} dt``` For numerical accuracy, `L(x)` is computed in different ways depending on `x`, ``` x <= 0: Log[L(x)] = Log[0.5] + x, which is exact 0 < x: Log[L(x)] = Log[1 - 0.5 * e^{-x}], which is exact ``` Args: x: `Tensor` of type `float32`, `float64`. name: Python string. A name for the operation (default="log_ndtr"). Returns: `Tensor` with `dtype=x.dtype`. Raises: TypeError: if `x.dtype` is not handled.
github-repos
def set_vector_catch(self, flags): res = self._dll.JLINKARM_WriteVectorCatch(flags) if res < 0: raise errors.JLinkException(res) return None
Sets vector catch bits of the processor. The CPU will jump to a vector if the given vector catch is active, and will enter a debug state. This has the effect of halting the CPU as well, meaning the CPU must be explicitly restarted. Args: self (JLink): the ``JLink`` instance Returns: ``None`` Raises: JLinkException: on error.
juraj-google-style
def cancel_merge_when_pipeline_succeeds(self, **kwargs): path = ('%s/%s/cancel_merge_when_pipeline_succeeds' % (self.manager.path, self.get_id())) server_data = self.manager.gitlab.http_put(path, **kwargs) self._update_attrs(server_data)
Cancel merge when the pipeline succeeds. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabMROnBuildSuccessError: If the server could not handle the request
juraj-google-style
def recv(self, socket_, encoding=None): unpacker = msgpack.Unpacker(encoding=encoding) response = socket_.recv(8) if (response == b''): raise TensorForceError(('No data received by socket.recv in call to method `recv` ' + '(listener possibly closed)!')) orig_len = int(response) received_len = 0 while True: data = socket_.recv(min((orig_len - received_len), self.max_msg_len)) if (not data): raise TensorForceError('No data of len {} received by socket.recv in call to method `recv`!'.format((orig_len - received_len))) data_len = len(data) received_len += data_len unpacker.feed(data) if (received_len == orig_len): break for message in unpacker: sts = message.get('status', message.get(b'status')) if sts: if ((sts == 'ok') or (sts == b'ok')): return message else: raise TensorForceError('RemoteEnvironment server error: {}'.format(message.get('message', 'not specified'))) else: raise TensorForceError("Message without field 'status' received!") raise TensorForceError('No message encoded in data stream (data stream had len={})'.format(orig_len))
Receives a message as msgpack-numpy encoded byte-string from the given socket object. Blocks until something was received. Args: socket_: The python socket object to use. encoding (str): The encoding to use for unpacking messages from the socket. Returns: The decoded (as dict) message received.
codesearchnet
def _ConvertDictToCollectionsCounter(cls, json_dict): collections_counter = collections.Counter() for (key, value) in iter(json_dict.items()): if (key == '__type__'): continue collections_counter[key] = value return collections_counter
Converts a JSON dict into a collections.Counter. The dictionary of the JSON serialized objects consists of: { '__type__': 'collections.Counter' ... } Here '__type__' indicates the object base type. In this case this should be 'collections.Counter'. The rest of the elements of the dictionary make up the preprocessing object properties. Args: json_dict (dict[str, object]): JSON serialized objects. Returns: collections.Counter: counter.
codesearchnet
def get_config_dict(self, services, hostname=None): if (not isinstance(services, (tuple, list))): services = [services] endpoints_util.check_list_type(services, remote._ServiceClass, 'services', allow_none=False) return self.__api_descriptor(services, hostname=hostname)
JSON dict description of a protorpc.remote.Service in API format. Args: services: Either a single protorpc.remote.Service or a list of them that implements an api/version. hostname: string, Hostname of the API, to override the value set on the current service. Defaults to None. Returns: dict, The API descriptor document as a JSON dict.
codesearchnet
def from_sites(cls, sites, charge=None, validate_proximity=False, to_unit_cell=False): if (len(sites) < 1): raise ValueError(('You need at least one site to construct a %s' % cls)) prop_keys = [] props = {} lattice = None for (i, site) in enumerate(sites): if (not lattice): lattice = site.lattice elif (site.lattice != lattice): raise ValueError('Sites must belong to the same lattice') for (k, v) in site.properties.items(): if (k not in prop_keys): prop_keys.append(k) props[k] = ([None] * len(sites)) props[k][i] = v for (k, v) in props.items(): if any(((vv is None) for vv in v)): warnings.warn(('Not all sites have property %s. Missing values are set to None.' % k)) return cls(lattice, [site.species for site in sites], [site.frac_coords for site in sites], charge=charge, site_properties=props, validate_proximity=validate_proximity, to_unit_cell=to_unit_cell)
Convenience constructor to make a Structure from a list of sites. Args: sites: Sequence of PeriodicSites. Sites must have the same lattice. validate_proximity (bool): Whether to check if there are sites that are less than 0.01 Ang apart. Defaults to False. to_unit_cell (bool): Whether to translate sites into the unit cell. Returns: (Structure) Note that missing properties are set as None.
codesearchnet
def matching_selectors(self, partial_selector): if (partial_selector in self._selector_map): return [partial_selector] selector_components = partial_selector.split('.') node = self._selector_tree for component in reversed(selector_components): if (component not in node): return [] node = node[component] selectors = [] dfs_stack = [node] while dfs_stack: node = dfs_stack.pop().copy() selector = node.pop(_TERMINAL_KEY, None) dfs_stack.extend(node.values()) if selector: selectors.append(selector) return selectors
Retrieves all selectors matching `partial_selector`. For instance, if "one.a.b" and "two.a.b" are stored in a `SelectorMap`, both `matching_selectors('b')` and `matching_selectors('a.b')` will return them. In the event that `partial_selector` exactly matches an existing complete selector, only that complete selector is returned. For instance, if "a.b.c.d" and "c.d" are stored, `matching_selectors('c.d')` will return only `['c.d']`, while `matching_selectors('d')` will return both. Args: partial_selector: The partial selector to find matches for. Returns: A list of selectors matching `partial_selector`.
codesearchnet
def diag_part(self, name='diag_part'): with self._name_scope(name): return self._diag_part()
Efficiently get the [batch] diagonal part of this operator. If this operator has shape `[B1,...,Bb, M, N]`, this returns a `Tensor` `diagonal`, of shape `[B1,...,Bb, min(M, N)]`, where `diagonal[b1,...,bb, i] = self.to_dense()[b1,...,bb, i, i]`. ``` my_operator = LinearOperatorDiag([1., 2.]) # Efficiently get the diagonal my_operator.diag_part() ==> [1., 2.] # Equivalent, but inefficient method tf.linalg.diag_part(my_operator.to_dense()) ==> [1., 2.] ``` Args: name: A name for this `Op`. Returns: diag_part: A `Tensor` of same `dtype` as self.
github-repos
def end(self): return (self.last.lineno, self.last.column + len(self.last.value))
The end of the logical line. Returns: A tuple of the ending line number and column.
github-repos