code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def SetCodepage(self, codepage): try: codecs.getencoder(codepage) self._codepage = codepage except LookupError: raise ValueError('Unsupported codepage: {0:s}'.format(codepage))
Sets the codepage. Args: codepage (str): codepage. Raises: ValueError: if the codepage is not supported.
juraj-google-style
def parse_page(raw_page): ret = {'title': get_title(raw_page), 'id': get_id(raw_page)} if (':' in ret['title']): return None ret['revisions'] = get_revisions(raw_page) return ret
Create a dictionary with title, id, and list of revisions. The dictionary contains: "title": a string "id": an integer "revisions": a list of strings Args: raw_page: a string Returns: a dictionary, or None in the case of an error.
codesearchnet
def on_created(self, event): self._logger.debug('Detected create event on watched path: %s', event.src_path) self._process_event(event)
Function called everytime a new file is created. Args: event: Event to process.
juraj-google-style
def getlibversion(): status, major_v, minor_v, release, info = _C.Hgetlibversion() _checkErr('getlibversion', status, "cannot get lib version") return major_v, minor_v, release, info
Get the library version info. Args: no argument Returns: 4-element tuple with the following components: -major version number (int) -minor version number (int) -complete library version number (int) -additional information (string) C library equivalent : Hgetlibversion
juraj-google-style
def _get_batches_of_transformed_samples(self, index_array): batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype) filepaths = self.filepaths for i, j in enumerate(index_array): img = image_utils.load_img(filepaths[j], color_mode=self.color_mode, target_size=self.target_size, interpolation=self.interpolation, keep_aspect_ratio=self.keep_aspect_ratio) x = image_utils.img_to_array(img, data_format=self.data_format) if hasattr(img, 'close'): img.close() if self.image_data_generator: params = self.image_data_generator.get_random_transform(x.shape) x = self.image_data_generator.apply_transform(x, params) x = self.image_data_generator.standardize(x) batch_x[i] = x if self.save_to_dir: for i, j in enumerate(index_array): img = image_utils.array_to_img(batch_x[i], self.data_format, scale=True) fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix, index=j, hash=np.random.randint(10000000.0), format=self.save_format) img.save(os.path.join(self.save_to_dir, fname)) if self.class_mode == 'input': batch_y = batch_x.copy() elif self.class_mode in {'binary', 'sparse'}: batch_y = np.empty(len(batch_x), dtype=self.dtype) for i, n_observation in enumerate(index_array): batch_y[i] = self.classes[n_observation] elif self.class_mode == 'categorical': batch_y = np.zeros((len(batch_x), len(self.class_indices)), dtype=self.dtype) for i, n_observation in enumerate(index_array): batch_y[i, self.classes[n_observation]] = 1.0 elif self.class_mode == 'multi_output': batch_y = [output[index_array] for output in self.labels] elif self.class_mode == 'raw': batch_y = self.labels[index_array] else: return batch_x if self.sample_weight is None: return (batch_x, batch_y) else: return (batch_x, batch_y, self.sample_weight[index_array])
Gets a batch of transformed samples. Args: index_array: Array of sample indices to include in batch. Returns: A batch of transformed samples.
github-repos
def logaddexp(x1, x2): if any_symbolic_tensors((x1, x2)): return Logaddexp().symbolic_call(x1, x2) return backend.numpy.logaddexp(x1, x2)
Logarithm of the sum of exponentiations of the inputs. Calculates `log(exp(x1) + exp(x2))`. Args: x1: Input tensor. x2: Input tensor. Returns: Output tensor, element-wise logarithm of the sum of exponentiations of the inputs.
github-repos
def __setattr__(self, __key: Hashable, __value: Any) -> None: try: self[__key] = __value except Exception as err: raise AttributeError(str(err))
Support item assignment via dot notation. Args: __key: Key to set value for __value: Value to set key to
juraj-google-style
def start_queue_runners(self, sess, queue_runners=None): if context.executing_eagerly(): raise RuntimeError('Queues are not compatible with eager execution.') if queue_runners is None: queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS) threads = [] for qr in queue_runners: threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True, start=True)) return threads
Start threads for `QueueRunners`. Note that the queue runners collected in the graph key `QUEUE_RUNNERS` are already started automatically when you create a session with the supervisor, so unless you have non-collected queue runners to start you do not need to call this explicitly. Args: sess: A `Session`. queue_runners: A list of `QueueRunners`. If not specified, we'll use the list of queue runners gathered in the graph under the key `GraphKeys.QUEUE_RUNNERS`. Returns: The list of threads started for the `QueueRunners`. Raises: RuntimeError: If called with eager execution enabled. @compatibility(eager) Queues are not compatible with eager execution. To ingest data when eager execution is enabled, use the `tf.data` API. @end_compatibility
github-repos
def __init__(self, api_key): self.session = Session() self.session.auth = HTTPBasicAuth(api_key, 'NoPassBecauseKey!') self._load_apis()
Initialize a new HelpScout client. Args: api_key (str): The API key to use for this session.
juraj-google-style
def _strip_leading_zeros(coeffs, threshold=_COEFFICIENT_THRESHOLD): while (np.abs(coeffs[(- 1)]) < threshold): coeffs = coeffs[:(- 1)] return coeffs
r"""Strip leading zero coefficients from a polynomial. .. note:: This assumes the polynomial :math:`f` defined by ``coeffs`` has been normalized (via :func:`.normalize_polynomial`). Args: coeffs (numpy.ndarray): ``d + 1``-array of coefficients in monomial / power basis. threshold (Optional[float]): The point :math:`\tau` below which a a coefficient will be considered to be numerically zero. Returns: numpy.ndarray: The same coefficients without any unnecessary zero terms.
codesearchnet
def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-08, use_locking=False, name='Adadelta'): super(AdadeltaOptimizer, self).__init__(use_locking, name) self._lr = learning_rate self._rho = rho self._epsilon = epsilon self._lr_t = None self._rho_t = None self._epsilon_t = None
Construct a new Adadelta optimizer. Args: learning_rate: A `Tensor` or a floating point value. The learning rate. To match the exact form in the original paper use 1.0. rho: A `Tensor` or a floating point value. The decay rate. epsilon: A `Tensor` or a floating point value. A constant epsilon used to better conditioning the grad update. use_locking: If `True` use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to "Adadelta".
github-repos
def __init__(self, channel): self.CreateTopic = channel.unary_unary( "/google.pubsub.v1.Publisher/CreateTopic", request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.Topic.SerializeToString, response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.Topic.FromString, ) self.UpdateTopic = channel.unary_unary( "/google.pubsub.v1.Publisher/UpdateTopic", request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.UpdateTopicRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.Topic.FromString, ) self.Publish = channel.unary_unary( "/google.pubsub.v1.Publisher/Publish", request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.PublishRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.PublishResponse.FromString, ) self.GetTopic = channel.unary_unary( "/google.pubsub.v1.Publisher/GetTopic", request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.GetTopicRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.Topic.FromString, ) self.ListTopics = channel.unary_unary( "/google.pubsub.v1.Publisher/ListTopics", request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.ListTopicsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.ListTopicsResponse.FromString, ) self.ListTopicSubscriptions = channel.unary_unary( "/google.pubsub.v1.Publisher/ListTopicSubscriptions", request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.ListTopicSubscriptionsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.ListTopicSubscriptionsResponse.FromString, ) self.ListTopicSnapshots = channel.unary_unary( "/google.pubsub.v1.Publisher/ListTopicSnapshots", request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.ListTopicSnapshotsRequest.SerializeToString, response_deserializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.ListTopicSnapshotsResponse.FromString, ) self.DeleteTopic = channel.unary_unary( "/google.pubsub.v1.Publisher/DeleteTopic", request_serializer=google_dot_cloud_dot_pubsub__v1_dot_proto_dot_pubsub__pb2.DeleteTopicRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def load_recipe(self, recipe): self.recipe = recipe for module_description in recipe['modules']: module_name = module_description['name'] module = self.config.get_module(module_name)(self) self._module_pool[module_name] = module
Populates the internal module pool with modules declared in a recipe. Args: recipe: Dict, recipe declaring modules to load.
codesearchnet
def _list_like_func(self, func, axis, *args, **kwargs): func_prepared = self._prepare_method((lambda df: pandas.DataFrame(df.apply(func, axis, *args, **kwargs)))) new_data = self._map_across_full_axis(axis, func_prepared) new_index = ([(f if isinstance(f, string_types) else f.__name__) for f in func] if (axis == 0) else self.index) new_columns = ([(f if isinstance(f, string_types) else f.__name__) for f in func] if (axis == 1) else self.columns) return self.__constructor__(new_data, new_index, new_columns)
Apply list-like function across given axis. Args: func: The function to apply. axis: Target axis to apply the function along. Returns: A new PandasQueryCompiler.
codesearchnet
def weighting_function(max_num_bins: int, up: torch.Tensor, reg_scale: int) -> torch.Tensor: upper_bound1 = abs(up[0]) * abs(reg_scale) upper_bound2 = abs(up[0]) * abs(reg_scale) * 2 step = (upper_bound1 + 1) ** (2 / (max_num_bins - 2)) left_values = [-step ** i + 1 for i in range(max_num_bins right_values = [step ** i - 1 for i in range(1, max_num_bins values = [-upper_bound2] + left_values + [torch.zeros_like(up[0][None])] + right_values + [upper_bound2] values = torch.cat(values, 0) return values
Generates the non-uniform Weighting Function W(n) for bounding box regression. Args: max_num_bins (int): Max number of the discrete bins. up (Tensor): Controls upper bounds of the sequence, where maximum offset is ±up * H / W. reg_scale (float): Controls the curvature of the Weighting Function. Larger values result in flatter weights near the central axis W(max_num_bins/2)=0 and steeper weights at both ends. Returns: Tensor: Sequence of Weighting Function.
github-repos
def add_to_loader(loader_cls: Type, classes: List[Type]) -> None: if not isinstance(classes, list): classes = [classes] for class_ in classes: tag = '!{}'.format(class_.__name__) if issubclass(class_, enum.Enum): loader_cls.add_constructor(tag, EnumConstructor(class_)) elif issubclass(class_, str) or issubclass(class_, UserString): loader_cls.add_constructor(tag, UserStringConstructor(class_)) else: loader_cls.add_constructor(tag, Constructor(class_)) if not hasattr(loader_cls, '_registered_classes'): loader_cls._registered_classes = dict() loader_cls._registered_classes[tag] = class_
Registers one or more classes with a YAtiML loader. Once a class has been registered, it can be recognized and \ constructed when reading a YAML text. Args: loader_cls: The loader to register the classes with. classes: The class(es) to register, a plain Python class or a \ list of them.
juraj-google-style
def seconds(value: Union[int, float]) -> Duration: return float(value)
Converts input value from seconds to a `Duration` in seconds. Since the `Duration` object is equivalent to a `float` value in seconds, this method does nothing else than casting the input to `float`. It may be used in order to make the code more explicit. Explicit time units: ```python >>> duration = tp.duration.seconds(3) >>> duration 3.0 >>> # Usage in a window operation >>> a = tp.event_set( ... timestamps=[1, 2, 6], ... features={"f1": [1, 5, -5]}, ... ) >>> a.moving_sum(window_length=duration) indexes: ... timestamps: [1. 2. 6.] 'f1': [ 1 6 -5] ... ``` Args: value: Number of seconds. Returns: Same number of seconds.
github-repos
def deprecate_moved_module(deprecated_name, new_module, deletion_version): def getter(name): if getter not in _PRINTED_WARNING and _PRINT_DEPRECATION_WARNINGS: _PRINTED_WARNING[getter] = True _log_deprecation('Please fix your imports. Module %s has been moved to %s. The old module will be deleted in version %s.', deprecated_name, new_module.__name__, deletion_version) return getattr(new_module, name) return getter
Logs a warning when a module that has been moved to a new location is used. Copy the following code into the old module: ``` import deprecation import new_module __getattr__ = deprecation.deprecate_moved_module( __name__, new_module, "2.9") # adjust version number. ``` Args: deprecated_name: Name of old module. new_module: Module to replace the old module. deletion_version: Version of TensorFlow in which the old module will be removed. Returns: A function that logs a warning and returns the symbol from the new module. Set this function as the module's `__getattr__`.
github-repos
def pretty_print_fhir_to_json_string(fhir_proto: message.Message, *, indent_size: int=2) -> str: printer = _json_printer.JsonPrinter.pretty_printer(_PRIMITIVE_HANDLER, indent_size=indent_size) return printer.print(fhir_proto)
Returns a FHIR JSON representation with spaces and newlines. Args: fhir_proto: The proto to serialize into a "pretty" JSON string. indent_size: An integer denoting the size of space indentation for lexical scoping. Defaults to 2. Returns: A FHIR JSON string representation with spaces and newlines.
github-repos
def _create_delegate_handler(delegate): @coroutine def handler(*args): (yield) (yield delegate.send(Transition(args, delegate))) return handler
Creates a handler function that creates a co-routine that can yield once with the given positional arguments to the delegate as a transition. Args: delegate (Coroutine): The co-routine to delegate to. Returns: A :class:`callable` handler that returns a co-routine that ignores the data it receives and sends with the arguments given to the handler as a :class:`Transition`.
codesearchnet
def maybe_get_static_value(x, dtype=None): if x is None: return x try: x_ = tf.get_static_value(x) except TypeError: x_ = x if x_ is None or dtype is None: return x_ return np.array(x_, dtype)
Helper which tries to return a static value. Given `x`, extract it's value statically, optionally casting to a specific dtype. If this is not possible, None is returned. Args: x: `Tensor` for which to extract a value statically. dtype: Optional dtype to cast to. Returns: Statically inferred value if possible, otherwise None.
juraj-google-style
def __init__(self, columns: list[str], min_value: float=0.0, max_value: float=1.0, name: Optional[str]=None): super().__init__(columns) self.min_value = min_value self.max_value = max_value self.name = name if self.max_value <= self.min_value: raise ValueError('max_value must be greater than min_value')
This function applies a scaling transformation on the given columns of incoming data. The transformation scales the input values to the range [min_value, max_value]. Args: columns: A list of column names to apply the transformation on. min_value: The minimum value of the output range. max_value: The maximum value of the output range. name: A name for the operation (optional).
github-repos
def SaveTransaction(self, tx): coins = self.GetCoins() changed = [] added = [] deleted = [] found_coin = False for input in tx.inputs: coin = None for coinref in coins: test_coin = coinref.Reference if (test_coin == input): coin = coinref if (coin is None): return False if ((coin.State & CoinState.Spent) > 0): return False elif ((coin.State & CoinState.Confirmed) == 0): return False coin.State |= CoinState.Spent coin.State &= (~ CoinState.Confirmed) changed.append(coin) for (index, output) in enumerate(tx.outputs): state = self.CheckAddressState(output.ScriptHash) key = CoinReference(tx.Hash, index) if ((state & AddressState.InWallet) > 0): newcoin = Coin.CoinFromRef(coin_ref=key, tx_output=output, state=CoinState.Unconfirmed) self._coins[key] = newcoin if ((state & AddressState.WatchOnly) > 0): newcoin.State |= CoinState.WatchOnly added.append(newcoin) if isinstance(tx, ClaimTransaction): for claim in tx.Claims: claim_coin = self._coins[claim] claim_coin.State |= CoinState.Claimed claim_coin.State &= (~ CoinState.Confirmed) changed.append(claim_coin) self.OnSaveTransaction(tx, added, changed, deleted) return True
This method is used to after a transaction has been made by this wallet. It updates the states of the coins In the wallet to reflect the new balance, but the coins remain in a ``CoinState.UNCONFIRMED`` state until The transaction has been processed by the network. The results of these updates can be used by overriding the ``OnSaveTransaction`` method, and, for example persisting the results to a database. Args: tx (Transaction): The transaction that has been made by this wallet. Returns: bool: True is successfully processes, otherwise False if input is not in the coin list, already spent or not confirmed.
codesearchnet
def __init__(self, metadata=0, metadata_mask=0): super().__init__(InstructionType.OFPIT_WRITE_METADATA) self.metadata = metadata self.metadata_mask = metadata_mask
Create InstructionWriteMetadata with the optional parameters below. Args: metadata (int): Metadata value to write. metadata_mask (int): Metadata write bitmask.
juraj-google-style
def run_profilers(run_object, prof_config, verbose=False): if (len(prof_config) > len(set(prof_config))): raise AmbiguousConfigurationError(('Profiler configuration %s is ambiguous' % prof_config)) available_profilers = {opt for (opt, _) in _PROFILERS} for option in prof_config: if (option not in available_profilers): raise BadOptionError(('Unknown option: %s' % option)) run_stats = OrderedDict() present_profilers = ((o, p) for (o, p) in _PROFILERS if (o in prof_config)) for (option, prof) in present_profilers: curr_profiler = prof(run_object) if verbose: print(('Running %s...' % curr_profiler.__class__.__name__)) run_stats[option] = curr_profiler.run() return run_stats
Runs profilers on run_object. Args: run_object: An object (string or tuple) for profiling. prof_config: A string with profilers configuration. verbose: True if info about running profilers should be shown. Returns: An ordered dictionary with collected stats. Raises: AmbiguousConfigurationError: when prof_config is ambiguous. BadOptionError: when unknown options are present in configuration.
codesearchnet
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]: sep = [self.sep_token_id] if token_ids_1 is None: return len(token_ids_0 + sep) * [0] return len(token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLMProphetNet does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros.
github-repos
def broadcast_structure(to_structure, from_structure): from_parts = tf.nest.flatten(from_structure) if (len(from_parts) == 1): from_structure = tf.nest.map_structure((lambda _: from_parts[0]), to_structure) return from_structure
Broadcasts `from_structure` to `to_structure`. This is useful for downstream usage of `zip` or `tf.nest.map_structure`. If `from_structure` is a singleton, it is tiled to match the structure of `to_structure`. Note that the elements in `from_structure` are not copied if this tiling occurs. Args: to_structure: A structure. from_structure: A structure. Returns: new_from_structure: Same structure as `to_structure`. #### Example: ```python a_structure = ['a', 'b', 'c'] b_structure = broadcast_structure(a_structure, 'd') # -> ['d', 'd', 'd'] c_structure = tf.nest.map_structure( lambda a, b: a + b, a_structure, b_structure) # -> ['ad', 'bd', 'cd'] ```
codesearchnet
def __init__(self, item_cls, data): super(ResourceList, self).__init__() if data is not None: data = json.loads(data) if type(data) is not dict else data paging = data['list_info'] raw_items = data.get(self.items_keys[item_cls.__name__]) if raw_items: for raw_item in raw_items: self.append(item_cls(raw_item)) self.page = paging['page'] self.num_pages = paging['num_pages'] self.num_results = paging['num_results'] self.page_size = paging['page_size']
Initialization of the list Args: item_cls (str): Object class matching the list items data (str or dict): A dictionary or raw JSON string that is returned by a request.
juraj-google-style
def __init__(self, resolver_context, file_object=None): if file_object: raise ValueError('File object value set.') super(RawFile, self).__init__(resolver_context) self._file_objects = []
Initializes a file-like object. Args: resolver_context (Context): resolver context. file_object (Optional[FileIO]): file-like object. Raises: ValueError: when file_object is set.
juraj-google-style
def write_bit(self, registeraddress, value, functioncode=5): _checkFunctioncode(functioncode, [5, 15]) _checkInt(value, minvalue=0, maxvalue=1, description='input value') self._genericCommand(functioncode, registeraddress, value)
Write one bit to the slave. Args: * registeraddress (int): The slave register address (use decimal numbers, not hex). * value (int): 0 or 1 * functioncode (int): Modbus function code. Can be 5 or 15. Returns: None Raises: ValueError, TypeError, IOError
juraj-google-style
def logical_or(x1, x2): if any_symbolic_tensors((x1, x2)): return LogicalOr().symbolic_call(x1, x2) return backend.numpy.logical_or(x1, x2)
Computes the element-wise logical OR of the given input tensors. Zeros are treated as `False` and non-zeros are treated as `True`. Args: x1: Input tensor. x2: Input tensor. Returns: Output tensor, element-wise logical OR of the inputs.
github-repos
def _call_api(self, verb, url, **request_kwargs): api = 'https: auth_headers = {'Authorization': 'token {}'.format(self.api_token)} headers = {**auth_headers, **request_kwargs.pop('headers', {})} return getattr(requests, verb)(api, headers=headers, **request_kwargs)
Perform a github API call Args: verb (str): Can be "post", "put", or "get" url (str): The base URL with a leading slash for Github API (v3) auth (str or HTTPBasicAuth): A Github API token or a HTTPBasicAuth object
juraj-google-style
def _build_insert_compiler(self, rows: List[Dict]): objs = [] field_count = len(rows[0]) for index, row in enumerate(rows): if field_count != len(row): raise SuspiciousOperation(( 'In bulk upserts, you cannot have rows with different field ' 'configurations. Row {0} has a different field config than ' 'the first row.' ).format(index)) objs.append(self.model(**row)) self._for_write = True insert_fields, update_fields = self._get_upsert_fields(rows[0]) query = PostgresInsertQuery(self.model) query.conflict_action = self.conflict_action query.conflict_target = self.conflict_target query.index_predicate = self.index_predicate query.values(objs, insert_fields, update_fields) connection = django.db.connections[self.db] compiler = PostgresInsertCompiler(query, connection, self.db) return compiler
Builds the SQL compiler for a insert query. Arguments: rows: A list of dictionaries, where each entry describes a record to insert. Returns: The SQL compiler for the insert.
juraj-google-style
def to_yaml(obj): if not isinstance(obj, CompoundValue) and hasattr(obj, 'transfer'): if hasattr(obj, 'message'): payload = obj.message header = 'Message' elif hasattr(obj, 'request'): payload = obj.request header = 'Request' elif hasattr(obj, 'response'): payload = obj.response header = 'Response' else: raise ValueError('Cannot generate YAML representation for %r' % type(obj)) prefix = ' (header, obj.transfer.source_node_id or 'Anon', obj.transfer.dest_node_id or 'All', obj.transfer.ts_monotonic, obj.transfer.ts_real) return prefix + _to_yaml_impl(payload) else: return _to_yaml_impl(obj)
This function returns correct YAML representation of a UAVCAN structure (message, request, or response), or a DSDL entity (array or primitive), or a UAVCAN transfer, with comments for human benefit. Args: obj: Object to convert. Returns: Unicode string containing YAML representation of the object.
juraj-google-style
def densifying_unary(default_value): def wrap_densifying_unary(func): @functools.wraps(func) def sparse_wrapper(x, *args, **kwargs): if isinstance(x, tf.SparseTensor): sparse_output = sparse_with_values(x, func(x.values, *args, **kwargs)) return sparse_to_dense(sparse_output, tf.cast(default_value, sparse_output.values.dtype)) elif isinstance(x, tf.IndexedSlices): sparse_output_values = func(x.values, *args, **kwargs) output = tf.fill(x.dense_shape, tf.cast(default_value, sparse_output_values.dtype)) return tf.tensor_scatter_nd_update(output, tf.expand_dims(x.indices, 1), sparse_output_values) return func(x, *args, **kwargs) return sparse_wrapper return wrap_densifying_unary
Decorator to add support for `tf.SparseTensor` and `tf.IndexedSlices` to a non-zero-preserving element-wise unary operator. There are requirements on the operator for this decorator to work correctly: - The operator must be element-wise - The operator must be unary (one input tensor and one output tensor) - The operator must return a tensor of the same shape. Additional arguments to the function (besides the input tensor) are supported. The returned result is a dense tensor and contains `default_value` outside of the indices of the input tensor. Args: default_value: The value to use outside of indices. It must be the value that the operator returns for zero values. Returns: Wrapped function that supports `tf.SparseTensor` and `tf.IndexedSlices`.
github-repos
def query(self, query): cursor = self.child_datastore.query(query) cursor._iterable = deserialized_gen(self.serializer, cursor._iterable) return cursor
Returns an iterable of objects matching criteria expressed in `query` De-serializes values on the way out, using a :ref:`deserialized_gen` to avoid incurring the cost of de-serializing all data at once, or ever, if iteration over results does not finish (subject to order generator constraint). Args: query: Query object describing the objects to return. Raturns: iterable cursor with all objects matching criteria
juraj-google-style
def update(self, webhookId, name=None, targetUrl=None, **request_parameters): check_type(webhookId, basestring, may_be_none=False) check_type(name, basestring) check_type(targetUrl, basestring) put_data = dict_from_items_with_values(request_parameters, name=name, targetUrl=targetUrl) json_data = self._session.put(((API_ENDPOINT + '/') + webhookId), json=put_data) return self._object_factory(OBJECT_TYPE, json_data)
Update a webhook, by ID. Args: webhookId(basestring): The webhook ID. name(basestring): A user-friendly name for this webhook. targetUrl(basestring): The URL that receives POST requests for each event. **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: Webhook: A Webhook object with the updated Webex Teams webhook details. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error.
codesearchnet
class Partition(PTransformWithSideInputs): class ApplyPartitionFnFn(DoFn): def process(self, element, partitionfn, n, *args, **kwargs): partition = partitionfn.partition_for(element, n, *args, **kwargs) if not 0 <= partition < n: raise ValueError('PartitionFn specified out-of-bounds partition index: %d not in [0, %d)' % (partition, n)) yield pvalue.TaggedOutput(str(partition), element) def make_fn(self, fn, has_side_inputs): return fn if isinstance(fn, PartitionFn) else CallableWrapperPartitionFn(fn) def expand(self, pcoll): n = int(self.args[0]) args, kwargs = util.insert_values_in_args(self.args, self.kwargs, self.side_inputs) return pcoll | ParDo(self.ApplyPartitionFnFn(), self.fn, *args, **kwargs).with_outputs(*[str(t) for t in range(n)])
Split a PCollection into several partitions. Uses the specified PartitionFn to separate an input PCollection into the specified number of sub-PCollections. When apply()d, a Partition() PTransform requires the following: Args: partitionfn: a PartitionFn, or a callable with the signature described in CallableWrapperPartitionFn. n: number of output partitions. The result of this PTransform is a simple list of the output PCollections representing each of n partitions, in order.
github-repos
def design_stat_cooling(self, value='Cooling'): if (value is not None): try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str for field `design_stat_cooling`'.format(value)) if (',' in value): raise ValueError('value should not contain a comma for field `design_stat_cooling`') vals = set() vals.add('Cooling') if (value not in vals): raise ValueError('value {} is not an accepted value for field `design_stat_cooling`'.format(value)) self._design_stat_cooling = value
Corresponds to IDD Field `design_stat_cooling` Args: value (str): value for IDD Field `design_stat_cooling` Accepted values are: - Cooling Default value: Cooling if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def __init__(self, path): self.me = Path(path) self.tag = 'dict' self.value = []
init Args: path (str): The absolute path of the plist
juraj-google-style
def from_json(cls, key, scopes, subject=None): credentials_type = key['type'] if (credentials_type != 'service_account'): raise ValueError(('key: expected type service_account (got %s)' % credentials_type)) email = key['client_email'] key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, key['private_key']) return cls(key=key, email=email, scopes=scopes, subject=subject)
Alternate constructor intended for using JSON format of private key. Args: key (dict) - Parsed JSON with service account credentials. scopes (Union[str, collections.Iterable[str]]) - List of permissions that the application requests. subject (str) - The email address of the user for which the application is requesting delegated access. Returns: ServiceAccount
codesearchnet
def str2fl(x): def helper_to_fl(s_): ' deals with odd string imports converts to float' if (s_ == ''): return 'null' elif (',' in s_): s_ = s_.replace(',', '') try: return float(s_) except: return s_ fl_lst = [] if isinstance(x[0], str): for xi in range(len(x)): fl_lst.append(helper_to_fl(x[xi])) elif isinstance(x[0], list): for xi in range(len(x)): fl_lst.append(str2fl(x[xi])) else: return False return fl_lst
Recurses through lists and converts lists of string to float Args: x: string or list of strings
codesearchnet
def searchFor(page, text, hit_max = 16, quads = False): CheckParent(page) dl = page.getDisplayList() tp = dl.getTextPage() rlist = tp.search(text, hit_max = hit_max, quads = quads) dl = None tp = None return rlist
Search for a string on a page. Args: text: string to be searched for hit_max: maximum hits quads: return quads instead of rectangles Returns: a list of rectangles or quads, each containing one occurrence.
juraj-google-style
def PureDotProductAttention(dropout=0.0, mode='train'): def init_fun(_, input_shapes): q_shape, _, v_shape, _ = input_shapes output_shape = q_shape[:-1] + (v_shape[-1],) return output_shape, () def apply_fun(params, inputs, **kwargs): del params q, k, v, mask = inputs rng = kwargs.get('rng', None) return DotProductAttention(q, k, v, mask, dropout=dropout, mode=mode, rng=rng) return init_fun, apply_fun
Pure single-headed self-attention. Args: dropout: float: dropout rate mode: str: 'train' or 'eval' Returns: Pure single-headed attention layer. (No Dense transforms on input.)
juraj-google-style
def add_region_location(self, region, locations=None, use_live=True): return self.add_country_locations(Country.get_countries_in_region(region, exception=HDXError, use_live=use_live), locations=locations)
Add all countries in a region. If a 3 digit UNStats M49 region code is not provided, value is parsed as a region name. If any country is already added, it is ignored. Args: region (str): M49 region, intermediate region or subregion to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries in region added or False if any already present.
codesearchnet
def __init__(self, step): logger.debug("starting") self.description = None self.foreach_items = None self.in_parameters = None self.retry_decorator = None self.run_me = True self.skip_me = False self.swallow_me = False self.name = None self.while_decorator = None if isinstance(step, dict): self.name = step['name'] logger.debug(f"{self.name} is complex.") self.in_parameters = step.get('in', None) self.description = step.get('description', None) if self.description: logger.info(f"{self.name}: {self.description}") self.foreach_items = step.get('foreach', None) retry_definition = step.get('retry', None) if retry_definition: self.retry_decorator = RetryDecorator(retry_definition) self.run_me = step.get('run', True) self.skip_me = step.get('skip', False) self.swallow_me = step.get('swallow', False) while_definition = step.get('while', None) if while_definition: self.while_decorator = WhileDecorator(while_definition) else: logger.debug(f"{step} is a simple string.") self.name = step self.module = pypyr.moduleloader.get_module(self.name) try: self.run_step_function = getattr(self.module, 'run_step') except AttributeError: logger.error(f"The step {self.name} in module {self.module} " "doesn't have a run_step(context) function.") raise logger.debug("done")
Initialize the class. No duh, huh?. You can happily expect the initializer to initialize all member attributes. Args: step: a string or a dict. This is the actual step as it exists in the pipeline yaml - which is to say it can just be a string for a simple step, or a dict for a complex step.
juraj-google-style
def orphan_entry(self, rval: RawObject) -> 'ArrayEntry': val = self.entry_from_raw(rval) return ArrayEntry(0, EmptyList(), EmptyList(), val, None, self, val.timestamp)
Return an isolated entry of the receiver. Args: rval: Raw object to be used for the returned entry.
codesearchnet
def lookup_rest_method(self, orig_request): method_name, method, params = self.config_manager.lookup_rest_method( orig_request.path, orig_request.request_uri, orig_request.http_method) orig_request.method_name = method_name return method, params
Looks up and returns rest method for the currently-pending request. Args: orig_request: An ApiRequest, the original request from the user. Returns: A tuple of (method descriptor, parameters), or (None, None) if no method was found for the current request.
juraj-google-style
def addSources(self, *sources): self._sources.extend(sources) debug.logger & debug.flagCompiler and debug.logger( 'current MIB source(s): %s' % ', '.join([str(x) for x in self._sources])) return self
Add more ASN.1 MIB source repositories. MibCompiler.compile will invoke each of configured source objects in order of their addition asking each to fetch MIB module specified by name. Args: sources: reader object(s) Returns: reference to itself (can be used for call chaining)
juraj-google-style
def _get_snpeff_transcript(self, transcript_info): transcript = Transcript(hgnc_symbol=transcript_info.get('Gene_Name'), transcript_id=transcript_info.get('Feature'), ensembl_id=transcript_info.get('Gene_ID'), biotype=transcript_info.get('Transcript_BioType'), consequence=transcript_info.get('Annotation'), exon=transcript_info.get('Rank'), HGVSc=transcript_info.get('HGVS.c'), HGVSp=transcript_info.get('HGVS.p')) return transcript
Create a transcript based on the snpeff annotation Args: transcript_info (dict): A dict with snpeff info Returns: transcript (puzzle.models.Transcript): A Transcripts
codesearchnet
def _CopyFileObjectToTemporaryFile(self, file_object, temporary_file): file_object.seek(0, os.SEEK_SET) data = file_object.read(self._READ_BUFFER_SIZE) while data: temporary_file.write(data) data = file_object.read(self._READ_BUFFER_SIZE)
Copies the contents of the file-like object to a temporary file. Args: file_object (dfvfs.FileIO): file-like object. temporary_file (file): temporary file.
codesearchnet
def _generate_api_config_with_root(self, request): actual_root = self._get_actual_root(request) generator = api_config.ApiConfigGenerator() api = request.body_json['api'] version = request.body_json['version'] lookup_key = (api, version) service_factories = self._backend.api_name_version_map.get(lookup_key) if (not service_factories): return None service_classes = [service_factory.service_class for service_factory in service_factories] config_dict = generator.get_config_dict(service_classes, hostname=actual_root) for config in config_dict.get('items', []): lookup_key_with_root = (config.get('name', ''), config.get('version', ''), actual_root) self._config_manager.save_config(lookup_key_with_root, config) return config_dict
Generate an API config with a specific root hostname. This uses the backend object and the ApiConfigGenerator to create an API config specific to the hostname of the incoming request. This allows for flexible API configs for non-standard environments, such as localhost. Args: request: An ApiRequest, the transformed request sent to the Discovery API. Returns: A string representation of the generated API config.
codesearchnet
def set_back(self, x: int, y: int, r: int, g: int, b: int) -> None: i = self.width * y + x self.back_r[i] = r self.back_g[i] = g self.back_b[i] = b
Set the background color of one cell. Args: x (int): X position to change. y (int): Y position to change. r (int): Red background color, from 0 to 255. g (int): Green background color, from 0 to 255. b (int): Blue background color, from 0 to 255.
juraj-google-style
def tanh(x): return nn.tanh(x)
Element-wise tanh. Args: x: A tensor or variable. Returns: A tensor.
github-repos
def _slice_single_param(param, param_ndims_to_matrix_ndims, slices, batch_shape): param = _broadcast_parameter_with_batch_shape(param, param_ndims_to_matrix_ndims, array_ops.ones_like(batch_shape)) if hasattr(param, 'batch_shape_tensor'): param_batch_shape = param.batch_shape_tensor() else: param_batch_shape = array_ops.shape(param) param_batch_rank = array_ops.size(param_batch_shape) param_batch_shape = param_batch_shape[:param_batch_rank - param_ndims_to_matrix_ndims] if tensor_util.constant_value(array_ops.size(batch_shape)) != 0 and tensor_util.constant_value(array_ops.size(param_batch_shape)) == 0: return param param_slices = _sanitize_slices(slices, intended_shape=batch_shape, deficient_shape=param_batch_shape) if param_ndims_to_matrix_ndims > 0: if Ellipsis not in [slc for slc in slices if not tensor_util.is_tensor(slc)]: param_slices.append(Ellipsis) param_slices += [slice(None)] * param_ndims_to_matrix_ndims return param.__getitem__(tuple(param_slices))
Slices into the batch shape of a single parameter. Args: param: The original parameter to slice; either a `Tensor` or an object with batch shape (LinearOperator). param_ndims_to_matrix_ndims: `int` number of right-most dimensions used for inferring matrix shape of the `LinearOperator`. For non-Tensor parameters, this is the number of this param's batch dimensions used by the matrix shape of the parent object. slices: iterable of slices received by `__getitem__`. batch_shape: The parameterized object's batch shape `Tensor`. Returns: new_param: Instance of the same type as `param`, batch-sliced according to `slices`.
github-repos
def _find_op(graph: ops.Graph, op_name: Optional[str]) -> Optional[ops.Operation]: if not op_name: return None init_op = graph.get_operation_by_name(op_name) logging.debug('Op found in the graph: %s', op_name) return init_op
Finds the operation with `op_name`. Args: graph: The graph to find from. op_name: Name of the node. Returns: The operation that corresponds to `op_name`. Returns None iff op_name is an empty string or None. Raises: ValueError: `op_name` is malformed.
github-repos
def delete_url(self, url, token=''): if (token == ''): token = self._user_token return requests.delete(url, headers={'Authorization': 'Token {}'.format(token)}, verify=False)
Returns a delete resquest object taking in a url and user token. Arguments: url (str): The url to make post to token (str): The authentication token Returns: obj: Delete request object
codesearchnet
def is_mod_class(mod, cls): return inspect.isclass(cls) and inspect.getmodule(cls) == mod
Checks if a class in a module was declared in that module. Args: mod: the module cls: the class
juraj-google-style
def extraterrestrial_horizontal_radiation(self, value=9999.0): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `extraterrestrial_horizontal_radiation`'.format(value)) if (value < 0.0): raise ValueError('value need to be greater or equal 0.0 for field `extraterrestrial_horizontal_radiation`') self._extraterrestrial_horizontal_radiation = value
Corresponds to IDD Field `extraterrestrial_horizontal_radiation` Args: value (float): value for IDD Field `extraterrestrial_horizontal_radiation` Unit: Wh/m2 value >= 0.0 Missing value: 9999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def _init_journal(self, permissive=True): nowstamp = datetime.now().strftime("%d-%b-%Y %H:%M:%S.%f")[:-3] self._add_entry(templates.INIT .format(time_stamp=nowstamp)) if permissive: self._add_entry(templates.INIT_DEBUG)
Add the initialization lines to the journal. By default adds JrnObj variable and timestamp to the journal contents. Args: permissive (bool): if True most errors in journal will not cause Revit to stop journal execution. Some still do.
juraj-google-style
def _kl_dirichlet_dirichlet(d1, d2, name=None): with tf.name_scope(name or "kl_dirichlet_dirichlet"): digamma_sum_d1 = tf.math.digamma( tf.reduce_sum(input_tensor=d1.concentration, axis=-1, keepdims=True)) digamma_diff = tf.math.digamma(d1.concentration) - digamma_sum_d1 concentration_diff = d1.concentration - d2.concentration return ( tf.reduce_sum(input_tensor=concentration_diff * digamma_diff, axis=-1) - tf.math.lbeta(d1.concentration) + tf.math.lbeta(d2.concentration))
Batchwise KL divergence KL(d1 || d2) with d1 and d2 Dirichlet. Args: d1: instance of a Dirichlet distribution object. d2: instance of a Dirichlet distribution object. name: (optional) Name to use for created operations. default is "kl_dirichlet_dirichlet". Returns: Batchwise KL(d1 || d2)
juraj-google-style
def get_analysis_types(adapter, total_cases, institute_id=None, slice_query=None): query = {} subquery = {} if institute_id and slice_query: subquery = adapter.cases(owner=institute_id, name_query=slice_query, yield_query=True) elif institute_id: subquery = adapter.cases(owner=institute_id, yield_query=True) elif slice_query: subquery = adapter.cases(name_query=slice_query, yield_query=True) query = {'$match': subquery} pipeline = [] if query: pipeline.append(query) pipeline.append({'$unwind': '$individuals'}) pipeline.append({'$group': {'_id': '$individuals.analysis_type', 'count': {'$sum': 1}}}) analysis_query = adapter.case_collection.aggregate(pipeline) analysis_types = [{'name': group['_id'], 'count': group['count']} for group in analysis_query] return analysis_types
Return information about analysis types. Group cases based on analysis type for the individuals. Args: adapter(adapter.MongoAdapter) total_cases(int): Total number of cases institute_id(str) slice_query(str): Query to filter cases to obtain statistics for. Returns: analysis_types array of hashes with name: analysis_type(str), count: count(int)
juraj-google-style
def forward_log_det_jacobian(self, x, event_ndims, name='forward_log_det_jacobian'): return self._call_forward_log_det_jacobian(x, event_ndims, name)
Returns both the forward_log_det_jacobian. Args: x: `Tensor`. The input to the "forward" Jacobian determinant evaluation. event_ndims: Number of dimensions in the probabilistic events being transformed. Must be greater than or equal to `self.forward_min_event_ndims`. The result is summed over the final dimensions to produce a scalar Jacobian determinant for each event, i.e. it has shape `x.shape.ndims - event_ndims` dimensions. name: The name to give this op. Returns: `Tensor`, if this bijector is injective. If not injective this is not implemented. Raises: TypeError: if `self.dtype` is specified and `y.dtype` is not `self.dtype`. NotImplementedError: if neither `_forward_log_det_jacobian` nor {`_inverse`, `_inverse_log_det_jacobian`} are implemented, or this is a non-injective bijector.
github-repos
def get_table_map_prompt() -> t.Tuple: table_prompts = [] table_map = get_table_map() for k, v in table_map.items(): data_str = f"Table name is {k}.\n It's located at {v['uri']} and containing following columns: {', '.join(v['columns'])}" table_prompts.append(data_str) return ('\n'.join(table_prompts), table_map)
Generate a prompt containing information about each table in the dataset. Returns: tuple: A tuple containing the prompt string and the table map dictionary.
github-repos
def transfer(self, data): if (not isinstance(data, (bytes, bytearray, list))): raise TypeError('Invalid data type, should be bytes, bytearray, or list.') try: buf = array.array('B', data) except OverflowError: raise ValueError('Invalid data bytes.') (buf_addr, buf_len) = buf.buffer_info() spi_xfer = _CSpiIocTransfer() spi_xfer.tx_buf = buf_addr spi_xfer.rx_buf = buf_addr spi_xfer.len = buf_len try: fcntl.ioctl(self._fd, SPI._SPI_IOC_MESSAGE_1, spi_xfer) except OSError as e: raise SPIError(e.errno, ('SPI transfer: ' + e.strerror)) if isinstance(data, bytes): return bytes(bytearray(buf)) elif isinstance(data, bytearray): return bytearray(buf) elif isinstance(data, list): return buf.tolist()
Shift out `data` and return shifted in data. Args: data (bytes, bytearray, list): a byte array or list of 8-bit integers to shift out. Returns: bytes, bytearray, list: data shifted in. Raises: SPIError: if an I/O or OS error occurs. TypeError: if `data` type is invalid. ValueError: if data is not valid bytes.
codesearchnet
def save_data(X, y, path): catalog = {'.csv': save_csv, '.sps': save_libsvm, '.h5': save_hdf5} ext = os.path.splitext(path)[1] func = catalog[ext] if y is None: y = np.zeros((X.shape[0], )) func(X, y, path)
Save data as a CSV, LibSVM or HDF5 file based on the file extension. Args: X (numpy or scipy sparse matrix): Data matrix y (numpy array): Target vector. If None, all zero vector will be saved. path (str): Path to the CSV, LibSVM or HDF5 file to save data.
juraj-google-style
def node_op_type(self, node_name, device_name=None): if not self._debug_graphs: raise LookupError('Node op types are not loaded from partition graphs yet.') device_name = self._infer_device_name(device_name, node_name) return self._debug_graphs[device_name].node_op_types[node_name]
Get the op type of given node. Args: node_name: (`str`) name of the node. device_name: (`str`) name of the device. If there is only one device or if node_name exists on only one device, this argument is optional. Returns: (`str`) op type of the node. Raises: LookupError: If node op types have not been loaded from partition graphs yet.
github-repos
def make_basket_put_payoff(strike_price, dtype=None, name=None): strike_price = tf.convert_to_tensor(strike_price, dtype=dtype, name='strike_price') put_valuer = functools.partial(_put_valuer, strike_price=strike_price, dtype=dtype, name=name) return put_valuer
Produces a callable from samples to payoff of a simple basket put option. Args: strike_price: A `Tensor` of `dtype` consistent with `samples` and shape `[num_samples, num_strikes]`. dtype: Optional `dtype`. Either `tf.float32` or `tf.float64`. The `dtype` If supplied, represents the `dtype` for the 'strike_price' as well as for the input argument of the output payoff callable. Default value: `None`, which means that the `dtype` inferred by TensorFlow is used. name: Python `str` name prefixed to Ops created by the callable created by this function. Default value: `None` which is mapped to the default name 'put_valuer' Returns: A callable from `Tensor` of shape `[num_samples, num_exercise_times, dim]` and a scalar `Tensor` representing current time to a `Tensor` of shape `[num_samples, num_strikes]`.
github-repos
def apply(self, predictions: Iterable[AnomalyPrediction]) -> AnomalyPrediction: result_dict: dict[str, Any] = {} _AggModelIdMixin.add_model_id(self, result_dict) _SourcePredictionMixin.add_source_predictions(self, result_dict, predictions) labels = [prediction.label for prediction in predictions if prediction.label is not None and prediction.label != self._missing_label] if len(labels) > 0: result_dict['label'] = self._agg(labels) elif all(map(lambda x: x.label is None, predictions)): result_dict['label'] = None else: result_dict['label'] = self._missing_label return AnomalyPrediction(**result_dict)
Applies the label aggregation function to a list of predictions. Args: predictions (Iterable[AnomalyPrediction]): A collection of `AnomalyPrediction` objects to be aggregated. Returns: AnomalyPrediction: A single `AnomalyPrediction` object with the aggregated label. The aggregated label is determined as follows: - If there are any non-missing and non-error labels, the `agg_func` is applied to aggregate them. - If all labels are error labels (`None`), the aggregated label is also `None`. - If there are a mix of missing and error labels, the aggregated label is the `missing_label`.
github-repos
def readSchedules(self, tableset): self.setContext("readSchedules") try: req_table = binascii.hexlify(str(tableset).zfill(1)) req_str = "01523102303037" + req_table + "282903" self.request(False) req_crc = self.calc_crc16(req_str[2:].decode("hex")) req_str += req_crc self.m_serial_port.write(req_str.decode("hex")) raw_ret = self.m_serial_port.getResponse(self.getContext()) self.serialPostEnd() return_crc = self.calc_crc16(raw_ret[1:-2]) if tableset == ReadSchedules.Schedules_1_To_4: unpacked_read = self.unpackStruct(raw_ret, self.m_schd_1_to_4) self.convertData(unpacked_read, self.m_schd_1_to_4, self.m_kwh_precision) if str(return_crc) == str(self.m_schd_1_to_4["crc16"][MeterData.StringValue]): ekm_log("Schedules 1 to 4 CRC success (06 return") self.setContext("") return True elif tableset == ReadSchedules.Schedules_5_To_6: unpacked_read = self.unpackStruct(raw_ret, self.m_schd_5_to_6) self.convertData(unpacked_read, self.m_schd_5_to_6, self.m_kwh_precision) if str(return_crc) == str(self.m_schd_5_to_6["crc16"][MeterData.StringValue]): ekm_log("Schedules 5 to 8 CRC success (06 return)") self.setContext("") return True except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext("") return False
Serial call to read schedule tariffs buffer Args: tableset (int): :class:`~ekmmeters.ReadSchedules` buffer to return. Returns: bool: True on completion and ACK.
juraj-google-style
def should_fire(self, time_domain, timestamp, window, context): pass
Whether this trigger should cause the window to fire. Args: time_domain: WATERMARK for event-time timers and REAL_TIME for processing-time timers. timestamp: for time_domain WATERMARK, it represents the watermark: (a lower bound on) the watermark of the system and for time_domain REAL_TIME, it represents the trigger: timestamp of the processing-time timer. window: the window whose trigger is being considered context: a context (e.g. a TriggerContext instance) for managing state and setting timers Returns: whether this trigger should cause a firing
github-repos
def _validate_path(self, settings, name, value): if (not os.path.exists(value)): raise SettingsInvalidError("Path from setting '{name}' does not exists: {value}".format(name=name, value=value)) return value
Validate path exists Args: settings (dict): Current settings. name (str): Setting name. value (str): Path to validate. Raises: boussole.exceptions.SettingsInvalidError: If path does not exists. Returns: str: Validated path.
codesearchnet
def _match_elements(dom, matches): out = {} for (key, content) in matches.items(): pattern = content['data'].strip() if ('\n' in pattern): pattern = pattern.split() transformer = (lambda x: x.strip().split()) else: transformer = (lambda x: x.strip()) matching_elements = _locate_element(dom, pattern, transformer=transformer) not_found_msg = content.get('notfoundmsg', '').replace('$name', key) if (not not_found_msg.strip()): not_found_msg = ("Can't locate variable '%s' with content '%s'!" % (key, pattern)) content['notfoundmsg'] = not_found_msg tagname = content.get('tagname', '').strip().lower() if tagname: matching_elements = filter((lambda x: (x.getTagName().strip().lower() == tagname)), matching_elements) if (not matching_elements): raise UserWarning(not_found_msg) if (len(matching_elements) > 1): raise UserWarning((("Ambigious content '%s'!" % content) + 'Content was found in multiple elements!')) out[key] = matching_elements[0] return out
Find location of elements matching patterns specified in `matches`. Args: dom (obj): HTMLElement DOM tree. matches (dict): Structure: ``{"var": {"data": "match", ..}, ..}``. Returns: dict: Structure: ``{"var": {"data": HTMLElement_obj, ..}, ..}``
codesearchnet
def get_url(access_token, endpoint=ams_rest_endpoint, flag=True): return do_ams_get_url(endpoint, access_token, flag)
Get Media Services Final Endpoint URL. Args: access_token (str): A valid Azure authentication token. endpoint (str): Azure Media Services Initial Endpoint. flag (bol): flag. Returns: HTTP response. JSON body.
juraj-google-style
def reload_config(self, dockercfg_path=None): self._auth_configs = auth.load_config(dockercfg_path, credstore_env=self.credstore_env)
Force a reload of the auth configuration Args: dockercfg_path (str): Use a custom path for the Docker config file (default ``$HOME/.docker/config.json`` if present, otherwise``$HOME/.dockercfg``) Returns: None
codesearchnet
def set(self, response: 'requests.Response') -> None: self.data[response.url] = SavedEndpoint( response.json(), self._get_expiration(response.headers) )
Adds a response to the cache. Args: response: response from ESI Returns: None
juraj-google-style
def Where(self, field): where_builder = _WhereBuilder(self, field) self.where_builders.append(where_builder) return where_builder
Creates a WHERE builder using a provided field. Args: field: the field to be added as an argument in the WHERE clause. Returns: The created WHERE builder.
codesearchnet
def Allowance(self, wallet, owner_addr, requestor_addr): invoke_args = [self.ScriptHash.ToString(), 'allowance', [PromptUtils.parse_param(owner_addr, wallet), PromptUtils.parse_param(requestor_addr, wallet)]] (tx, fee, results, num_ops, engine_success) = TestInvokeContract(wallet, invoke_args, None, True) return (tx, fee, results)
Return the amount of tokens that the `requestor_addr` account can transfer from the `owner_addr` account. Args: wallet (neo.Wallets.Wallet): a wallet instance. owner_addr (str): public address of the account to transfer the given amount from. requestor_addr (str): public address of the account that requests the transfer. Returns: tuple: InvocationTransaction: the transaction. int: the transaction fee. list: the neo VM evaluation stack results.
codesearchnet
def get_variant_dict(variant_line, header_line=None): if not header_line: logger.debug("No header line, use only first 8 mandatory fields") header_line = ['CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO'] logger.debug("Building variant dict from variant line {0} and header"\ " line {1}".format(variant_line, '\t'.join(header_line))) splitted_line = variant_line.rstrip().split('\t') if len(splitted_line) < len(header_line): logger.info('\t'.join(header_line)) logger.info('\t'.join(splitted_line)) raise SyntaxError("Length of variant line differs from length of"\ " header line") return dict(zip(header_line, splitted_line))
Parse a variant line Split a variant line and map the fields on the header columns Args: variant_line (str): A vcf variant line header_line (list): A list with the header columns Returns: variant_dict (dict): A variant dictionary
juraj-google-style
def get_course_grade(self, course_id, username): results = self.client.courses(course_id).get(username=username) for row in results: if (row.get('username') == username): return row raise HttpNotFoundError('No grade record found for course={}, username={}'.format(course_id, username))
Retrieve the grade for the given username for the given course_id. Args: * ``course_id`` (str): The string value of the course's unique identifier * ``username`` (str): The username ID identifying the user for which to retrieve the grade. Raises: HttpNotFoundError if no grade found for the given user+course. Returns: a dict containing: * ``username``: A string representation of a user's username passed in the request. * ``course_key``: A string representation of a Course ID. * ``passed``: Boolean representing whether the course has been passed according the course's grading policy. * ``percent``: A float representing the overall grade for the course * ``letter_grade``: A letter grade as defined in grading_policy (e.g. 'A' 'B' 'C' for 6.002x) or None
codesearchnet
def channels_rename(self, *, channel: str, name: str, **kwargs) -> SlackResponse: self._validate_xoxp_token() kwargs.update({"channel": channel, "name": name}) return self.api_call("channels.rename", json=kwargs)
Renames a channel. Args: channel (str): The channel id. e.g. 'C1234567890' name (str): The new channel name. e.g. 'newchannel'
juraj-google-style
def ping(self, destination, length=20): print '%s call ping' % self.port print 'destination: %s' %destination try: cmd = 'ping %s -c 1 -s %s -I %s' % (destination, str(length), WPAN_INTERFACE) if self._is_net: ssh_stdin, ssh_stdout, ssh_stderr = self.handle.exec_command(cmd) else: self._sendline(cmd) self._expect(cmd) time.sleep(1) except Exception, e: ModuleHelper.WriteIntoDebugLogger('ping() Error: ' + str(e))
send ICMPv6 echo request with a given length to a unicast destination address Args: destination: the unicast destination address of ICMPv6 echo request length: the size of ICMPv6 echo request payload
juraj-google-style
def ndcg(truth, recommend, k=None): if k is None: k = len(recommend) def idcg(n_possible_truth): res = 0. for n in range(n_possible_truth): res += 1. / np.log2(n + 2) return res dcg = 0. for n, r in enumerate(recommend[:k]): if r not in truth: continue dcg += 1. / np.log2(n + 2) res_idcg = idcg(np.min([truth.size, k])) if res_idcg == 0.: return 0. return dcg / res_idcg
Normalized Discounted Cumulative Grain (NDCG). Args: truth (numpy 1d array): Set of truth samples. recommend (numpy 1d array): Ordered set of recommended samples. k (int): Top-k items in `recommend` will be recommended. Returns: float: NDCG.
juraj-google-style
def get_upstream_artifacts_full_paths_per_task_id(context): upstream_artifacts = context.task['payload']['upstreamArtifacts'] task_ids_and_relative_paths = [(artifact_definition['taskId'], artifact_definition['paths']) for artifact_definition in upstream_artifacts] optional_artifacts_per_task_id = get_optional_artifacts_per_task_id(upstream_artifacts) upstream_artifacts_full_paths_per_task_id = {} failed_paths_per_task_id = {} for (task_id, paths) in task_ids_and_relative_paths: for path in paths: try: path_to_add = get_and_check_single_upstream_artifact_full_path(context, task_id, path) add_enumerable_item_to_dict(dict_=upstream_artifacts_full_paths_per_task_id, key=task_id, item=path_to_add) except ScriptWorkerTaskException: if (path in optional_artifacts_per_task_id.get(task_id, [])): log.warning('Optional artifact "{}" of task "{}" not found'.format(path, task_id)) add_enumerable_item_to_dict(dict_=failed_paths_per_task_id, key=task_id, item=path) else: raise return (upstream_artifacts_full_paths_per_task_id, failed_paths_per_task_id)
List the downloaded upstream artifacts. Args: context (scriptworker.context.Context): the scriptworker context. Returns: dict, dict: lists of the paths to upstream artifacts, sorted by task_id. First dict represents the existing upstream artifacts. The second one maps the optional artifacts that couldn't be downloaded Raises: scriptworker.exceptions.ScriptWorkerTaskException: when an artifact doesn't exist.
codesearchnet
def Snapshot(self, request, global_params=None): config = self.GetMethodConfig('Snapshot') return self._RunMethod(config, request, global_params=global_params)
Snapshot the state of a streaming job. Args: request: (DataflowProjectsJobsSnapshotRequest) input message global_params: (StandardQueryParameters, default: None) global arguments Returns: (Snapshot) The response message.
github-repos
async def on_message(message): server = message.server author = message.author channel = message.channel content = message.content data = datatools.get_data() if not data["discord"]["servers"][server.id][_data.modulename]["activated"]: return if server is not None and author != channel.server.me: normal_replies = data["discord"]["servers"][server.id][_data.modulename]["normal"] tts_replies = data["discord"]["servers"][server.id][_data.modulename]["tts"] for r in normal_replies.keys(): if r in content.lower().replace(' ', ''): await client.send_typing(channel) await client.send_message(channel, normal_replies[r]) for r in tts_replies.keys(): if r in content.lower().replace(' ', ''): await client.send_typing(channel) await client.send_message(channel, tts_replies[r])
The on_message event handler for this module Args: message (discord.Message): Input message
juraj-google-style
def period_length_in_days(self, period_tensor): return (self + period_tensor).ordinal() - self._ordinals
Computes the number of days in each period. Args: period_tensor: A PeriodTensor object broadcastable to the shape of "self". Returns: An int32 tensor with numbers of days each period takes. #### Example ```python dates = tff.datetime.dates_from_tuples([(2020, 2, 25), (2020, 3, 2)]) dates.period_length_in_days(month()) # [29, 31] periods = tff.datetime.months([1, 2]) dates.period_length_in_days(periods) # [29, 61] ```
github-repos
def assert_visible(self, selector, testid=None, **kwargs): self.info_log( "Assert visible selector(%s) testid(%s)" % (selector, testid) ) highlight = kwargs.get( 'highlight', BROME_CONFIG['highlight']['highlight_on_assertion_success'] ) self.debug_log("effective highlight: %s" % highlight) wait_until_visible = kwargs.get( 'wait_until_visible', BROME_CONFIG['proxy_driver']['wait_until_visible_before_assert_visible'] ) self.debug_log("effective wait_until_visible: %s" % wait_until_visible) if wait_until_visible: self.wait_until_visible(selector, raise_exception=False) element = self.find( selector, raise_exception=False, wait_until_visible=False, wait_until_present=False ) if element and element.is_displayed(raise_exception=False): if highlight: element.highlight( style=BROME_CONFIG['highlight']['style_on_assertion_success'] ) if testid is not None: self.create_test_result(testid, True) return True else: if testid is not None: self.create_test_result(testid, False) return False
Assert that the element is visible in the dom Args: selector (str): the selector used to find the element testid (str): the test_id or a str Kwargs: wait_until_visible (bool) highlight (bool) Returns: bool: True is the assertion succeed; False otherwise.
juraj-google-style
def template_file( task: Task, template: str, path: str, jinja_filters: FiltersDict = None, **kwargs: Any ) -> Result: jinja_filters = jinja_filters or {} or task.nornir.config.jinja2.filters text = jinja_helper.render_from_file( template=template, path=path, host=task.host, jinja_filters=jinja_filters, **kwargs ) return Result(host=task.host, result=text)
Renders contants of a file with jinja2. All the host data is available in the template Arguments: template: filename path: path to dir with templates jinja_filters: jinja filters to enable. Defaults to nornir.config.jinja2.filters **kwargs: additional data to pass to the template Returns: Result object with the following attributes set: * result (``string``): rendered string
juraj-google-style
def unpack(self, buff, offset=0): begin = offset hexas = [] while begin < offset + 8: number = struct.unpack("!B", buff[begin:begin+1])[0] hexas.append("%.2x" % number) begin += 1 self._value = ':'.join(hexas)
Unpack a binary message into this object's attributes. Unpack the binary value *buff* and update this object attributes based on the results. Args: buff (bytes): Binary data package to be unpacked. offset (int): Where to begin unpacking. Raises: Exception: If there is a struct unpacking error.
juraj-google-style
def do_check(func, files, status): for file_name in files: with open(file_name, 'r') as f: output = func.parse(f.read(), file_name) if output: status.append('{0}: {1}'.format(file_name, output)) return status
Generic do_check helper method Args: func (function): Specific function to call files (list): list of files to run against status (list): list of pre-receive check failures to eventually print to the user Returns: status list of current pre-redeive check failures. Might be an empty list.
codesearchnet
def add(self, key, minhash): if len(minhash) < self.k*self.l: raise ValueError("The num_perm of MinHash out of range") if key in self.keys: raise ValueError("The given key has already been added") self.keys[key] = [self._H(minhash.hashvalues[start:end]) for start, end in self.hashranges] for H, hashtable in zip(self.keys[key], self.hashtables): hashtable[H].append(key)
Add a unique key, together with a MinHash (or weighted MinHash) of the set referenced by the key. Note: The key won't be searchbale until the :func:`datasketch.MinHashLSHForest.index` method is called. Args: key (hashable): The unique identifier of the set. minhash (datasketch.MinHash): The MinHash of the set.
juraj-google-style
def _rollback(self): if (not self.in_progress): raise ValueError(_CANT_ROLLBACK) try: self._client._firestore_api.rollback(self._client._database_string, self._id, metadata=self._client._rpc_metadata) finally: self._clean_up()
Roll back the transaction. Raises: ValueError: If no transaction is in progress.
codesearchnet
def _message_received(self, msg): msg = Message.from_node(msg) return self.dispatch(msg)
Callback run when an XMPP Message is reveived. This callback delivers the message to every behaviour that is waiting for it. First, the aioxmpp.Message is converted to spade.message.Message Args: msg (aioxmpp.Messagge): the message just received. Returns: list(asyncio.Future): a list of futures of the append of the message at each matched behaviour.
codesearchnet
def remove_attribute(self, attr): update = [fapi._attr_rem(attr)] r = fapi.update_workspace_attributes(self.namespace, self.name, update, self.api_url) self.data['workspace']['attributes'].pop(attr, None) fapi._check_response_code(r, 200)
Remove attribute from a workspace. Args: attr (str): attribute name
codesearchnet
def get_upstream_fork_point(self): possible_relatives = [] try: if (not self.repo): return None try: active_branch = self.repo.active_branch except (TypeError, ValueError): logger.debug('git is in a detached head state') return None else: tracking_branch = active_branch.tracking_branch() if tracking_branch: possible_relatives.append(tracking_branch.commit) if (not possible_relatives): for branch in self.repo.branches: tracking_branch = branch.tracking_branch() if (tracking_branch is not None): possible_relatives.append(tracking_branch.commit) head = self.repo.head most_recent_ancestor = None for possible_relative in possible_relatives: for ancestor in self.repo.merge_base(head, possible_relative): if (most_recent_ancestor is None): most_recent_ancestor = ancestor elif self.repo.is_ancestor(most_recent_ancestor, ancestor): most_recent_ancestor = ancestor return most_recent_ancestor except exc.GitCommandError as e: logger.debug('git remote upstream fork point could not be found') logger.debug(e.message) return None
Get the most recent ancestor of HEAD that occurs on an upstream branch. First looks at the current branch's tracking branch, if applicable. If that doesn't work, looks at every other branch to find the most recent ancestor of HEAD that occurs on a tracking branch. Returns: git.Commit object or None
codesearchnet
def _convert_ddb_list_to_list(conversion_list): ret_list = [] for v in conversion_list: for v1 in v: ret_list.append(v[v1]) return ret_list
Given a dynamodb list, it will return a python list without the dynamodb datatypes Args: conversion_list (dict): a dynamodb list which includes the datatypes Returns: list: Returns a sanitized list without the dynamodb datatypes
codesearchnet
def fit(self, X): self.constant_value = self._get_constant_value(X) if self.constant_value is None: self.model = scipy.stats.gaussian_kde(X) else: self._replace_constant_methods() self.fitted = True
Fit Kernel density estimation to an list of values. Args: X: 1-d `np.ndarray` or `pd.Series` or `list` datapoints to be estimated from. This function will fit a gaussian_kde model to a list of datapoints and store it as a class attribute.
juraj-google-style
def ch_start_time(self, *channels: List[Channel]) -> int: intervals = list(itertools.chain(*(self._table[chan] for chan in channels if (chan in self._table)))) if intervals: return min((interval.begin for interval in intervals)) return 0
Return earliest start time in this collection. Args: *channels: Channels over which to obtain start_time.
codesearchnet
def get_first_content(el_list, alt=None, strip=True): if (not el_list): return alt content = el_list[0].getContent() if strip: content = content.strip() if (not content): return alt return content
Return content of the first element in `el_list` or `alt`. Also return `alt` if the content string of first element is blank. Args: el_list (list): List of HTMLElement objects. alt (default None): Value returner when list or content is blank. strip (bool, default True): Call .strip() to content. Returns: str or alt: String representation of the content of the first element \ or `alt` if not found.
codesearchnet