code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def add_dynamic_element(self, name, description): self._pb.add(Name=name, Description=description, Value='*') return self
Adds a dynamic namespace element to the end of the Namespace. A dynamic namespace element is defined by an element that contains a non-static data relative to the metric being collected. For instance, when collecting metrics for a given virtual machine the namespace element that contains the virtual-machine-id would be dynamic. This is modeled by the a NamespaceElement when its `name` attribute contains the value 'virtual-machine-id'. In this example the `value` attribute would be set to the ID of the virtual machine when the metric is collected. Args: value (:py:class:`snap_plugin.v1.namespace_element.NamespaceElement`): namespace element Returns: :py:class:`snap_plugin.v1.namespace.Namespace`
codesearchnet
def get_location(self, locations=None): countries = self.data.get('groups', None) if not countries: return list() return [Locations.get_location_from_HDX_code(x['name'], locations=locations, configuration=self.configuration) for x in countries]
Return the dataset's location Args: locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. Returns: List[str]: list of locations or [] if there are none
juraj-google-style
def _validate_alias_command(alias_command): if not alias_command: raise CLIError(EMPTY_ALIAS_ERROR) split_command = shlex.split(alias_command) boundary_index = len(split_command) for i, subcommand in enumerate(split_command): if not re.match('^[a-z]', subcommand.lower()) or i > COLLISION_CHECK_LEVEL_DEPTH: boundary_index = i break command_to_validate = ' '.join(split_command[:boundary_index]).lower() for command in azext_alias.cached_reserved_commands: if re.match(r'([a-z\-]*\s)*{}($|\s)'.format(command_to_validate), command): return _validate_positional_arguments(shlex.split(alias_command))
Check if the alias command is valid. Args: alias_command: The command to validate.
juraj-google-style
def function_cyl_co(script, r_func='r', theta_func='theta', z_func='z'): r = 'sqrt(x^2+y^2)' if (isinstance(script, FilterScript) and (script.ml_version >= '2016.12')): theta = 'atan2(y, x)' else: theta = mp_func.mp_atan2('y', 'x') r_func = re.sub('\\br\\b', r, r_func).replace('theta', theta) theta_func = re.sub('\\br\\b', r, theta_func).replace('theta', theta) z_func = re.sub('\\br\\b', r, z_func).replace('theta', theta) x_func = '(r)*cos(theta)'.replace('r', r_func).replace('theta', theta_func) y_func = '(r)*sin(theta)'.replace('r', r_func).replace('theta', theta_func) vert_function(script, x_func, y_func, z_func) return None
Geometric function using cylindrical coordinates. Define functions in Z up cylindrical coordinates, with radius 'r', angle 'theta', and height 'z' See "function" docs for additional usage info and accepted parameters. Args: r_func (str): function to generate new coordinates for radius theta_func (str): function to generate new coordinates for angle. 0 degrees is on the +X axis. z_func (str): function to generate new coordinates for height Layer stack: No impacts MeshLab versions: 2016.12 1.3.4BETA
codesearchnet
def get_choices(field): empty_label = getattr(field.field, "empty_label", False) needs_empty_value = False choices = [] if hasattr(field.field, "_choices"): choices = field.field._choices elif hasattr(field.field, "_queryset"): queryset = field.field._queryset field_name = getattr(field.field, "to_field_name") or "pk" choices += ((getattr(obj, field_name), str(obj)) for obj in queryset) if choices and (choices[0][1] == BLANK_CHOICE_DASH[0][1] or choices[0][0]): needs_empty_value = True if not choices[0][0]: del choices[0] if empty_label == BLANK_CHOICE_DASH[0][1]: empty_label = None if empty_label or not field.field.required: if needs_empty_value: choices.insert(0, ("", empty_label or BLANK_CHOICE_DASH[0][1])) return choices
Find choices of a field, whether it has choices or has a queryset. Args: field (BoundField): Django form boundfield Returns: list: List of choices
juraj-google-style
def __init__(self, cflags): super(cxx_standard, self).__init__() self._stdcxx = None self._is_implicit = False for key in cxx_standard.__STD_CXX: if key in cflags: self._stdcxx = key self._cplusplus = cxx_standard.__STD_CXX[key] if not self._stdcxx: if '-std=' in cflags: raise RuntimeError('Unknown -std=c++xx flag used') self._stdcxx = '-std=c++03' self._cplusplus = cxx_standard.__STD_CXX['-std=c++03'] self._is_implicit = True
Class constructor that parses the XML generator's command line Args: cflags (str): cflags command line arguments passed to the XML generator
juraj-google-style
def create_graph_from_data(self, data): self.arguments['{SCORE}'] = self.score self.arguments['{VERBOSE}'] = str(self.verbose).upper() self.arguments['{BETA}'] = str(self.beta) self.arguments['{OPTIM}'] = str(self.optim).upper() self.arguments['{ALPHA}'] = str(self.alpha) results = self._run_bnlearn(data, verbose=self.verbose) graph = nx.DiGraph() graph.add_edges_from(results) return graph
Run the algorithm on data. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the algorithm.
codesearchnet
def is_line_in_file(filename: str, line: str) -> bool: assert "\n" not in line with open(filename, "r") as file: for fileline in file: if fileline == line: return True return False
Detects whether a line is present within a file. Args: filename: file to check line: line to search for (as an exact match)
juraj-google-style
def _flush(self, buffer): container, obj = self._client_args with _handle_client_exception(): self._client.put_object(container, obj, buffer)
Flush the write buffers of the stream if applicable. Args: buffer (memoryview): Buffer content.
juraj-google-style
def transform(self, X): assert (np.shape(X)[0] == len(self._weights)), 'BlendingOptimizer: Number of models to blend its predictions and weights does not match: n_models={}, weights_len={}'.format(np.shape(X)[0], len(self._weights)) blended_predictions = (np.average(np.power(X, self._power), weights=self._weights, axis=0) ** (1.0 / self._power)) return {'y_pred': blended_predictions}
Performs predictions blending using the trained weights. Args: X (array-like): Predictions of different models. Returns: dict with blended predictions (key is 'y_pred').
codesearchnet
class IdeficsVisionEncoder(nn.Module): def __init__(self, config: IdeficsVisionConfig): super().__init__() self.config = config self.layers = nn.ModuleList([IdeficsVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, inputs_embeds, attention_mask: Optional[torch.Tensor]=None, causal_attention_mask: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> Union[Tuple, BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func(encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions) else: layer_outputs = encoder_layer(hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple((v for v in [hidden_states, encoder_states, all_attentions] if v is not None)) return BaseModelOutput(last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions)
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`IdeficsVisionEncoderLayer`]. Args: config: IdeficsVisionConfig
github-repos
def export(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE): return self.client.api.export(self.id, chunk_size)
Export the contents of the container's filesystem as a tar archive. Args: chunk_size (int): The number of bytes returned by each iteration of the generator. If ``None``, data will be streamed as it is received. Default: 2 MB Returns: (str): The filesystem tar archive Raises: :py:class:`docker.errors.APIError` If the server returns an error.
juraj-google-style
def union(self, *others: 'Substitution') -> 'Substitution': new_subst = Substitution(self) for other in others: for (variable_name, replacement) in other.items(): new_subst.try_add_variable(variable_name, replacement) return new_subst
Try to merge the substitutions. If a variable occurs in multiple substitutions, try to merge the replacements. See :meth:`union_with_variable` to see how replacements are merged. Does not modify any of the original substitutions. Example: >>> subst1 = Substitution({'x': Multiset(['a', 'b']), 'z': a}) >>> subst2 = Substitution({'x': ('a', 'b'), 'y': ('c', )}) >>> print(subst1.union(subst2)) {x ↦ (a, b), y ↦ (c), z ↦ a} Args: others: The other substitutions to merge with this one. Returns: The new substitution with the other substitutions merged. Raises: ValueError: if a variable occurs in multiple substitutions but cannot be merged because the substitutions conflict.
codesearchnet
def put(self, entity): self._cur_batch.put(entity) self._num_mutations += 1 if (self._num_mutations >= MAX_MUTATIONS_IN_BATCH): self.commit() self.begin()
Adds mutation of the entity to the mutation buffer. If mutation buffer reaches its capacity then this method commit all pending mutations from the buffer and emties it. Args: entity: entity which should be put into the datastore
codesearchnet
def count_divisors(n): if (not isinstance(n, int)): raise TypeError('Expecting a strictly positive integer') if (n <= 0): raise ValueError('Expecting a strictly positive integer') number_of_divisors = 1 remain = n for p in prime_generator(): if (p > n): return number_of_divisors exponent = 1 while ((remain % p) == 0): remain = (remain exponent += 1 number_of_divisors *= exponent if (remain == 1): return number_of_divisors
Count the number of divisors of an integer n Args: n (int): strictly positive integer Returns: The number of distinct divisors of n Raises: TypeError: if n is not an integer ValueError: if n is negative
codesearchnet
def on_test_batch_end(self, batch, logs=None):
Called at the end of a batch in `evaluate` methods. Also called at the end of a validation batch in the `fit` methods, if validation data is provided. Subclasses should override for any actions to run. Note that if the `steps_per_execution` argument to `compile` in `Model` is set to `N`, this method will only be called every `N` batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.
github-repos
def add(self, element): assert not self._committed if not self._stacked: self._elements.append(element) return if self._elements and isinstance(self._elements[-1], (WindowedValue, _Bundle._StackedWindowedValues)) and (self._elements[-1].timestamp == element.timestamp) and (self._elements[-1].windows == element.windows) and (self._elements[-1].pane_info == element.pane_info): if isinstance(self._elements[-1], WindowedValue): self._elements[-1] = _Bundle._StackedWindowedValues(self._elements[-1]) self._elements[-1].add_value(element.value) else: self._elements.append(element)
Outputs an element to this bundle. Args: element: WindowedValue
github-repos
def to_proto(self, export_scope=None): if export_scope is None or self._variable.name.startswith(export_scope): var_def = variable_pb2.VariableDef() var_def.variable_name = ops.strip_name_scope(self._variable.name, export_scope) if self._initial_value is not None: var_def.initial_value_name = ops.strip_name_scope(self._initial_value.name, export_scope) var_def.trainable = self.trainable var_def.synchronization = self.synchronization.value var_def.aggregation = self.aggregation.value var_def.initializer_name = ops.strip_name_scope(self.initializer.name, export_scope) var_def.snapshot_name = ops.strip_name_scope(self._snapshot.name, export_scope) if self._save_slice_info: var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(export_scope=export_scope)) return var_def else: return None
Converts a `Variable` to a `VariableDef` protocol buffer. Args: export_scope: Optional `string`. Name scope to remove. Returns: A `VariableDef` protocol buffer, or `None` if the `Variable` is not in the specified name scope.
github-repos
def filter_spent_outputs(self, outputs): links = [o.to_dict() for o in outputs] txs = list(query.get_spending_transactions(self.connection, links)) spends = {TransactionLink.from_dict(input_['fulfills']) for tx in txs for input_ in tx['inputs']} return [ff for ff in outputs if ff not in spends]
Remove outputs that have been spent Args: outputs: list of TransactionLink
juraj-google-style
def list_devices(device_type=None): device_type = device_type.upper() if device_type else None tf_devices = tf.config.list_logical_devices(device_type=device_type) cpu_devices = [] other_devices = [] for device in tf_devices: if device.device_type.lower() == 'cpu': cpu_devices.append(device) else: other_devices.append(device) if device_type is None: tf_devices = other_devices if len(other_devices) > 0 else cpu_devices return [f'{device.device_type.lower()}:{device.name.split(':')[-1]}' for device in tf_devices]
Return all the available devices based on the device type. Note that this should return the global devices in a distributed setting. Args: device_type: string of `"cpu"`, `"gpu"` or `"tpu"`. Default to `gpu` or `tpu` if available when device_type is not provided. Otherwise will return the `cpu` devices. Return: List of devices that are available for distribute computation.
github-repos
def update_state(self, y_true, y_pred, sample_weight=None): metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.TRUE_NEGATIVES: self.true_negatives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives, metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives}, y_true, y_pred, thresholds=self.thresholds, thresholds_distributed_evenly=self._thresholds_distributed_evenly, class_id=self.class_id, sample_weight=sample_weight)
Accumulates confusion matrix statistics. Args: y_true: The ground truth values. y_pred: The predicted values. sample_weight: Optional weighting of each example. Defaults to `1`. Can be a tensor whose rank is either 0, or the same rank as `y_true`, and must be broadcastable to `y_true`.
github-repos
def release_dates(self, **kwargs): path = self._get_id_path('release_dates') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the release dates and certification for a specific movie id. Args: append_to_response: (optional) Comma separated, any movie method. Returns: A dict representation of the JSON returned from the API.
codesearchnet
def to_csv(evset: EventSet, path: str, sep: str=',', na_rep: Optional[str]=None, columns: Optional[List[str]]=None): df = to_pandas(evset) df.to_csv(path, index=False, sep=sep, na_rep=na_rep, columns=columns)
Saves an [`EventSet`][temporian.EventSet] to a CSV file. Example: ```python >>> output_path = str(tmp_dir / "output_data.csv") >>> evset = tp.event_set(timestamps=[1,], features={"f1": [0.1]}) >>> tp.to_csv(evset, output_path) ``` Args: evset: EventSet to save. path: Path to the file. sep: Separator to use. na_rep: Representation to use for missing values. columns: Columns to save. If `None`, saves all columns.
github-repos
def idle(self, stop_signals: tuple=(SIGINT, SIGTERM, SIGABRT)): def signal_handler(*args): self.is_idle = False for s in stop_signals: signal(s, signal_handler) self.is_idle = True while self.is_idle: time.sleep(1) self.stop()
Blocks the program execution until one of the signals are received, then gently stop the Client by closing the underlying connection. Args: stop_signals (``tuple``, *optional*): Iterable containing signals the signal handler will listen to. Defaults to (SIGINT, SIGTERM, SIGABRT).
codesearchnet
def check_config(config, path): messages = [] config_copy = get_frozen_copy(config) missing_keys = (set(DEFAULT_CONFIG.keys()) - set(config_copy.keys())) if missing_keys: messages.append('Missing config keys {}!'.format(missing_keys)) for (key, value) in config_copy.items(): if (key not in DEFAULT_CONFIG): messages.append('Unknown key {} in {}!'.format(key, path)) continue if (value is None): messages.append(_VALUE_UNDEFINED_MESSAGE.format(path=path, key=key)) else: value_type = type(value) if (isinstance(DEFAULT_CONFIG[key], Mapping) and ('by-cot-product' in DEFAULT_CONFIG[key])): default_type = type(DEFAULT_CONFIG[key]['by-cot-product'][config['cot_product']]) else: default_type = type(DEFAULT_CONFIG[key]) if (value_type is not default_type): messages.append('{} {}: type {} is not {}!'.format(path, key, value_type, default_type)) if (value in ('...', b'...')): messages.append(_VALUE_UNDEFINED_MESSAGE.format(path=path, key=key)) if ((key in ('provisioner_id', 'worker_group', 'worker_type', 'worker_id')) and (not _is_id_valid(value))): messages.append('{} doesn\'t match "{}" (required by Taskcluster)'.format(key, _GENERIC_ID_REGEX.pattern)) return messages
Validate the config against DEFAULT_CONFIG. Any unknown keys or wrong types will add error messages. Args: config (dict): the running config. path (str): the path to the config file, used in error messages. Returns: list: the error messages found when validating the config.
codesearchnet
def _fill_parameters(self): self._parameters = self._config.get('parameters', {}) self._fill_defaults() for k in self._parameters.keys(): try: if (self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']')): parts = self._parameters[k].split(':') tmp = parts[1].replace(']', '') val = self._get_ssm_parameter(tmp) if val: self._parameters[k] = val else: logging.error('SSM parameter {} not found'.format(tmp)) return False elif (self._parameters[k] == self.ASK): val = None a1 = '__x___' a2 = '__y___' prompt1 = "Enter value for '{}': ".format(k) prompt2 = "Confirm value for '{}': ".format(k) while (a1 != a2): a1 = getpass.getpass(prompt=prompt1) a2 = getpass.getpass(prompt=prompt2) if (a1 == a2): val = a1 else: print('values do not match, try again') self._parameters[k] = val except: pass return True
Fill in the _parameters dict from the properties file. Args: None Returns: True Todo: Figure out what could go wrong and at least acknowledge the the fact that Murphy was an optimist.
codesearchnet
def _MergeEventTag(self, storage_writer, attribute_container): if attribute_container.CONTAINER_TYPE != 'event_tag': return event_identifier = attribute_container.GetEventIdentifier() if not event_identifier: return stored_event_tag = self._event_tag_index.GetEventTagByIdentifier( storage_writer, event_identifier) if stored_event_tag: attribute_container.AddComment(stored_event_tag.comment) attribute_container.AddLabels(stored_event_tag.labels) self._event_tag_index.SetEventTag(attribute_container)
Merges an event tag with the last stored event tag. If there is an existing event the provided event tag is updated with the contents of the existing one. After which the event tag index is updated. Args: storage_writer (StorageWriter): storage writer. attribute_container (AttributeContainer): container.
juraj-google-style
def wrap_layer_functions(layer, serialization_cache): if isinstance(layer, keras_load.RevivedLayer) and (not isinstance(layer, sequential_lib.Sequential)): return {fn_name: getattr(layer.keras_api, fn_name, None) for fn_name in serialized_attributes.LayerAttributes.all_functions} original_fns = _replace_child_layer_functions(layer, serialization_cache) original_losses = _reset_layer_losses(layer) call_collection = LayerCallCollection(layer) call_fn_with_losses = call_collection.add_function(_wrap_call_and_conditional_losses(layer), '{}_layer_call_and_return_conditional_losses'.format(layer.name), match_layer_training_arg=True) call_fn = call_collection.add_function(_extract_outputs_from_fn(layer, call_fn_with_losses), '{}_layer_call_fn'.format(layer.name), match_layer_training_arg=False) fns = {'call_and_return_conditional_losses': call_fn_with_losses, '__call__': call_fn} if layer._activity_regularizer is not None: fns['activity_regularizer_fn'] = _wrap_activity_regularizer(layer) fns['call_and_return_all_conditional_losses'] = call_collection.add_function(_append_activity_regularizer_loss(layer, call_fn_with_losses, fns['activity_regularizer_fn']), '{}_layer_call_and_return_all_conditional_losses'.format(layer.name), match_layer_training_arg=False) else: fns['activity_regularizer_fn'] = None fns['call_and_return_all_conditional_losses'] = call_fn_with_losses with tracing_scope(): call_collection.trace_with_input_signature() with base_layer_utils.call_context().enter(layer, inputs=None, build_graph=True, training=None, saving=True): for fn in fns.values(): if fn is not None and fn.input_signature is not None: if isinstance(fn, LayerCall): fn = fn.wrapped_call fn.get_concrete_function() _restore_child_layer_functions(original_fns) _restore_layer_losses(original_losses) return fns
Returns dict of wrapped layer call function and losses in tf.functions. Args: layer: Keras Layer object. serialization_cache: Dictionary shared between all objects during serialization. Returns: A dictionary containing all keras tf.functions to serialize. See LayerAttributes and ModelAttributes for the list of all attributes.
github-repos
def __init__(self, entries: Iterable[Tuple[int, TItem]] = (), *, drop_duplicate_entries: bool=False): self._buckets = [] self._offset = 0 self._len = 0 self._drop_set = (set() if drop_duplicate_entries else None) for p, e in entries: self.enqueue(p, e)
Initializes a new priority queue. Args: entries: Initial contents of the priority queue. drop_duplicate_entries: If set, the priority queue will ignore operations that enqueue a (priority, item) pair that is already in the priority queue. Note that duplicates of an item may still be enqueued, as long as they have different priorities.
juraj-google-style
def _project_dict(self, **kwargs: Dict[(str, Any)]) -> Dict[(str, Hist)]: get_hist_args = copy.deepcopy(kwargs) projection_name_args = copy.deepcopy(kwargs) for (key, input_observable) in self.observable_to_project_from.items(): (output_hist, projection_name, projection_name_args) = self._project_observable(input_key=key, input_observable=input_observable, get_hist_args=get_hist_args, projection_name_args=projection_name_args, **kwargs) output_hist_args = projection_name_args output_hist_args.update({'output_hist': output_hist, 'projection_name': projection_name}) output_key_name = self.output_key_name(**output_hist_args) self.output_observable[output_key_name] = self.output_hist(**output_hist_args) return self.output_observable
Driver function for projecting and storing a dictionary of observables. Args: kwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...) Returns: The projected histograms. The projected histograms are also stored in ``output_observable``.
codesearchnet
def get_subscribed_services_names(cls): accounts_for_service = Account.get_accounts_for_service service_data = cls._get_music_services_data().values() return [service['Name'] for service in service_data if (len(accounts_for_service(service['ServiceType'])) > 0)]
Get a list of the names of all subscribed music services. Returns: list: A list of strings.
codesearchnet
def sync_l(self, option: str='all') -> None: if (option in ['system', 'vendor', 'oem', 'data', 'all']): self._execute('-s', self.device_sn, 'sync', '-l', option) else: raise ValueError('There is no option named: {!r}.'.format(option))
List but don't copy. Args: option: 'system', 'vendor', 'oem', 'data', 'all'
codesearchnet
def _SigSegvHandler(self, signal_number, stack_frame): self._OnCriticalError() if self._original_sigsegv_handler is not None: signal.signal(signal.SIGSEGV, self._original_sigsegv_handler) os.kill(self._pid, signal.SIGSEGV)
Signal handler for the SIGSEGV signal. Args: signal_number (int): numeric representation of the signal. stack_frame (frame): current stack frame or None.
juraj-google-style
def from_maildir(self, codes: str) -> FrozenSet[Flag]: flags = set() for code in codes: if (code == ','): break to_sys = self._to_sys.get(code) if (to_sys is not None): flags.add(to_sys) else: to_kwd = self._to_kwd.get(code) if (to_kwd is not None): flags.add(to_kwd) return frozenset(flags)
Return the set of IMAP flags that correspond to the letter codes. Args: codes: The letter codes to map.
codesearchnet
def load_api_folder(api_folder_path): api_definition_mapping = {} api_items_mapping = load_folder_content(api_folder_path) for (api_file_path, api_items) in api_items_mapping.items(): if isinstance(api_items, list): for api_item in api_items: (key, api_dict) = api_item.popitem() api_id = (api_dict.get('id') or api_dict.get('def') or api_dict.get('name')) if ((key != 'api') or (not api_id)): raise exceptions.ParamsError('Invalid API defined in {}'.format(api_file_path)) if (api_id in api_definition_mapping): raise exceptions.ParamsError('Duplicated API ({}) defined in {}'.format(api_id, api_file_path)) else: api_definition_mapping[api_id] = api_dict elif isinstance(api_items, dict): if (api_file_path in api_definition_mapping): raise exceptions.ParamsError('Duplicated API defined: {}'.format(api_file_path)) else: api_definition_mapping[api_file_path] = api_items return api_definition_mapping
load api definitions from api folder. Args: api_folder_path (str): api files folder. api file should be in the following format: [ { "api": { "def": "api_login", "request": {}, "validate": [] } }, { "api": { "def": "api_logout", "request": {}, "validate": [] } } ] Returns: dict: api definition mapping. { "api_login": { "function_meta": {"func_name": "api_login", "args": [], "kwargs": {}} "request": {} }, "api_logout": { "function_meta": {"func_name": "api_logout", "args": [], "kwargs": {}} "request": {} } }
codesearchnet
def prepare_xml_read(data, objectify=False): mod = _objectify if objectify else etree if hasattr(data, 'readlines'): data = mod.parse(data).getroot() elif isinstance(data, list): data = mod.fromstring(''.join(data)) elif isinstance(data, basestring): data = mod.parse(open(data)).getroot() else: raise TypeError('Unable to handle data of type %r' % type(data)) return data
Prepare various input types for XML parsing. Args: data (iter): Data to read objectify (bool): Parse using lxml's objectify data binding Returns: etree.ElementTree: Tree suitable for parsing Raises: TypeError: Invalid value for data
juraj-google-style
def _cancel_grpc(operations_stub, operation_name): request_pb = operations_pb2.CancelOperationRequest(name=operation_name) operations_stub.CancelOperation(request_pb)
Cancel an operation using a gRPC client. Args: operations_stub (google.longrunning.operations_pb2.OperationsStub): The gRPC operations stub. operation_name (str): The name of the operation.
codesearchnet
def depth_april_average_ground_temperature(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `depth_april_average_ground_temperature`'.format(value)) self._depth_april_average_ground_temperature = value
Corresponds to IDD Field `depth_april_average_ground_temperature` Args: value (float): value for IDD Field `depth_april_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def call(self, input_values: tf.Tensor, attention_mask: tf.Tensor | None=None, token_type_ids: tf.Tensor | None=None, position_ids: tf.Tensor | None=None, head_mask: tf.Tensor | None=None, inputs_embeds: tf.Tensor | None=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, training: bool=False) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: output_hidden_states = output_hidden_states if output_hidden_states else self.config.output_hidden_states output_attentions = output_attentions if output_attentions else self.config.output_attentions return_dict = return_dict if return_dict else self.config.return_dict outputs = self.hubert(input_values=input_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training) return outputs
Returns: Example: ```python >>> from transformers import AutoProcessor, TFHubertModel >>> from datasets import load_dataset >>> import soundfile as sf >>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft") >>> model = TFHubertModel.from_pretrained("facebook/hubert-large-ls960-ft") >>> def map_to_array(batch): ... speech, _ = sf.read(batch["file"]) ... batch["speech"] = speech ... return batch >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1 >>> hidden_states = model(input_values).last_hidden_state ```
github-repos
def base_type(self, value): if value == self._defaults['baseType'] and 'baseType' in self._values: del self._values['baseType'] else: self._values['baseType'] = value
The base_type property. Args: value (string). the property value.
juraj-google-style
def create_app(config=None, config_obj=None): app = Flask(__name__) configure_app(app, config=config, config_obj=config_obj) register_blueprints(app) bind_extensions(app) return app
Flask app factory function. Args: config (Optional[path]): path to a Python module config file config_obj (Optional[class]): Python config object
juraj-google-style
def __init__(self, return_type, cl_function_name, parameter_list, cl_code_file, var_replace_dict=None, **kwargs): self._var_replace_dict = var_replace_dict with open(os.path.abspath(cl_code_file), 'r') as f: code = f.read() if var_replace_dict is not None: code = code % var_replace_dict super().__init__(return_type, cl_function_name, parameter_list, code, **kwargs) self._code = code
Create a CL function for a library function. These functions are not meant to be optimized, but can be used a helper functions in models. Args: cl_function_name (str): The name of the CL function cl_code_file (str): The location of the code file var_replace_dict (dict): In the cl_code file these replacements will be made (using the % format function of Python)
juraj-google-style
def mock(self, url=None, **kw): if kw.get('activate'): kw.pop('activate') self.activate() mock = Mock(url=url, **kw) mock._engine = self self.add_mock(mock) return mock
Creates and registers a new HTTP mock in the current engine. Arguments: url (str): request URL to mock. activate (bool): force mock engine activation. Defaults to ``False``. **kw (mixed): variadic keyword arguments for ``Mock`` constructor. Returns: pook.Mock: new mock instance.
codesearchnet
def _process_tensor_event(self, event, thresholds): return self._make_pr_entry( event.step, event.wall_time, tensor_util.make_ndarray(event.tensor_proto), thresholds)
Converts a TensorEvent into a dict that encapsulates information on it. Args: event: The TensorEvent to convert. thresholds: An array of floats that ranges from 0 to 1 (in that direction and inclusive of 0 and 1). Returns: A JSON-able dictionary of PR curve data for 1 step.
juraj-google-style
def get_learning_rate(self, iter): lr = self.init_lr for iter_step in self.iter_steps: if iter >= iter_step: lr *= self.gamma return lr
Get learning rate with exponential decay based on current iteration. Args: iter (int): Current iteration (starting with 0). Returns: float: Learning rate
juraj-google-style
def ctc_batch_cost(y_true, y_pred, input_length, label_length): label_length = math_ops.cast(array_ops.squeeze(label_length, axis=-1), dtypes_module.int32) input_length = math_ops.cast(array_ops.squeeze(input_length, axis=-1), dtypes_module.int32) sparse_labels = math_ops.cast(ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32) y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon()) return array_ops.expand_dims(ctc.ctc_loss(inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
Runs CTC loss algorithm on each batch element. Args: y_true: tensor `(samples, max_string_length)` containing the truth labels. y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, 1)` containing the sequence length for each batch item in `y_pred`. label_length: tensor `(samples, 1)` containing the sequence length for each batch item in `y_true`. Returns: Tensor with shape (samples,1) containing the CTC loss of each element.
github-repos
def measure_power(self, hz, duration, tag, offset=30): num = (duration * hz) oset = (offset * hz) data = None self.usb('auto') time.sleep(1) with self.dut.handle_usb_disconnect(): time.sleep(1) try: data = self.take_samples(hz, num, sample_offset=oset) if (not data): raise MonsoonError(('No data was collected in measurement %s.' % tag)) data.tag = tag self.dut.log.info('Measurement summary: %s', repr(data)) return data finally: self.mon.StopDataCollection() self.log.info('Finished taking samples, reconnecting to dut.') self.usb('on') self.dut.adb.wait_for_device(timeout=DEFAULT_TIMEOUT_USB_ON) time.sleep(10) self.dut.log.info('Dut reconnected.')
Measure power consumption of the attached device. Because it takes some time for the device to calm down after the usb connection is cut, an offset is set for each measurement. The default is 30s. The total time taken to measure will be (duration + offset). Args: hz: Number of samples to take per second. duration: Number of seconds to take samples for in each step. offset: The number of seconds of initial data to discard. tag: A string that's the name of the collected data group. Returns: A MonsoonData object with the measured power data.
codesearchnet
def _Completion(self, match): r word = str(match.group())[2:-2] return '(' + ('(').join(word) + ')?' * len(word)
r"""Replaces double square brackets with variable length completion. Completion cannot be mixed with regexp matching or '\' characters i.e. '[[(\n)]] would become (\(n)?)?.' Args: match: A regex Match() object. Returns: String of the format '(a(b(c(d)?)?)?)?'.
juraj-google-style
def _evolve_subsystem(self, state, qargs): mat = np.reshape(self.data, self._shape) state_size = len(state) state_dims = self._automatic_dims(None, state_size) if self.input_dims() != len(qargs) * (2, ): raise QiskitError( "Channel input dimensions are not compatible with state subsystem dimensions." ) tensor = np.reshape(state, 2 * state_dims) num_inidices = len(state_dims) indices = [num_inidices - 1 - qubit for qubit in qargs ] + [2 * num_inidices - 1 - qubit for qubit in qargs] tensor = self._einsum_matmul(tensor, mat, indices) return np.reshape(tensor, [state_size, state_size])
Evolve a quantum state by the operator. Args: state (QuantumState): The input statevector or density matrix. qargs (list): a list of QuantumState subsystem positions to apply the operator on. Returns: QuantumState: the output quantum state. Raises: QiskitError: if the operator dimension does not match the specified QuantumState subsystem dimensions.
juraj-google-style
def enable_save_as_bf16(variables: List[tf_variables.Variable]): for v in variables: if isinstance(v, d_variable.DVariable): v.save_as_bf16 = True
Allows float32 DVariables to be checkpointed and restored as bfloat16. The method only affects the DVariable part inside the model and leaves non-DTensor Variables/Tensors untouched. Args: variables: A list of tf.Variable to be enabled with bfloat16 save/restore. Only has effect on DTensor Variables as they go through d_variables with DTensor Specific logis.
github-repos
def from_json(cls, data): required_keys = ('solar_model', 'month', 'day_of_month') for key in required_keys: assert key in data, 'Required key "{}" is missing!'.format(key) if data['solar_model'] == 'ASHRAEClearSky': return OriginalClearSkyCondition.from_json(data) if data['solar_model'] == 'ASHRAETau': return RevisedClearSkyCondition.from_json(data) if 'daylight_savings_indicator' not in data: data['daylight_savings_indicator'] = 'No' optional_keys = ('beam_shced', 'diff_sched') for key in optional_keys: if key not in data: data[key] = '' return cls(data['month'], data['day_of_month'], data['clearness'], data['daylight_savings_indicator'], data['beam_shced'], data['diff_sched'])
Create a Sky Condition from a dictionary. Args: data = { "solar_model": string, "month": int, "day_of_month": int, "daylight_savings_indicator": string // "Yes" or "No"}
juraj-google-style
def WriteUInt160(self, value): if type(value) is UInt160: value.Serialize(self) else: raise Exception("value must be UInt160 instance ")
Write a UInt160 type to the stream. Args: value (UInt160): Raises: Exception: when `value` is not of neocore.UInt160 type.
juraj-google-style
def _truncate_float(matchobj, format_str='0.2g'): if matchobj.group(0): return format(float(matchobj.group(0)), format_str) return ''
Truncate long floats Args: matchobj (re.Match): contains original float format_str (str): format specifier Returns: str: returns truncated float
juraj-google-style
def block(self, cutoffs=None, values=None, n_bins=0, right=False, function=None): params = self.__dict__.copy() if (values is not None) and (cutoffs is None): cutoffs = values[1:] if (cutoffs is None) and (n_bins == 0): cutoffs = np.mean(self) if (n_bins != 0) and (cutoffs is None): mi, ma = np.amin(self), np.amax(self) cutoffs = np.linspace(mi, ma, n_bins+1) cutoffs = cutoffs[:-1] try: data = np.digitize(self, cutoffs, right) except ValueError: data = np.digitize(self, [cutoffs], right) if (function is None) and (values is None): return Curve(data, params=params) data = data.astype(float) f = function or utils.null tops, vals = utils.find_edges(data) if values is None: for top, base in zip(tops[:-1], tops[1:]): data[top:base] = f(np.copy(self[top:base])) data[base:] = f(np.copy(self[base:])) else: for top, base, val in zip(tops[:-1], tops[1:], vals[:-1]): data[top:base] = values[int(val)] data[base:] = values[int(vals[-1])] return Curve(data, params=params)
Block a log based on number of bins, or on cutoffs. Args: cutoffs (array) values (array): the values to map to. Defaults to [0, 1, 2,...] n_bins (int) right (bool) function (function): transform the log if you want. Returns: Curve.
juraj-google-style
def parse(lines, root=None): doc = {} entries = [] name = None total = None for line in lines: line = line.strip() if not line: continue if line and line[0] == "/" and line[-1] == ":": if name is None: name = line[:-1] if entries: d = Directory(name, total or len(entries), entries) doc[root] = d total = None entries = [] else: d = Directory(name, total or len(entries), entries) doc[name or root] = d total = None entries = [] name = line[:-1] continue if line.startswith("total"): total = int(line.split(None, 1)[1]) continue entries.append(line) name = name or root doc[name] = Directory(name, total or len(entries), entries) return doc
Parses a list of lines from ls into dictionaries representing their components. Args: lines (list): A list of lines generated by ls. root (str): The directory name to be used for ls output stanzas that don't have a name. Returns: A dictionary representing the ls output. It's keyed by the path containing each ls stanza.
juraj-google-style
def which_with_envpath(executable: str, env: Dict[str, str]) -> str: oldpath = os.environ.get("PATH", "") os.environ["PATH"] = env.get("PATH") which = shutil.which(executable) os.environ["PATH"] = oldpath return which
Performs a :func:`shutil.which` command using the PATH from the specified environment. Reason: when you use ``run([executable, ...], env)`` and therefore ``subprocess.run([executable, ...], env=env)``, the PATH that's searched for ``executable`` is the parent's, not the new child's -- so you have to find the executable manually. Args: executable: executable to find env: environment to fetch the PATH variable from
juraj-google-style
def get_metric_parsers(metric_packages=tuple(), include_defaults=True): metric_parsers = set() if include_defaults: import git_code_debt.metrics metric_parsers.update(discover(git_code_debt.metrics, is_metric_cls)) for metric_package in metric_packages: metric_parsers.update(discover(metric_package, is_metric_cls)) return metric_parsers
Gets all of the metric parsers. Args: metric_packages - Defaults to no extra packages. An iterable of metric containing packages. A metric inherits DiffParserBase and does not have __metric__ = False A metric package must be imported using import a.b.c include_defaults - Whether to include the generic metric parsers
juraj-google-style
def __init__(self, cbFun, cbCtx=None): self._cbFun = cbFun self._cbCtx = cbCtx
Create an instance of *CallbackReader* bound to specific URL. Args: cbFun (callable): user callable accepting *MIB name* and *cbCtx* objects Keyword Args: cbCtx (object): user object that can be used to communicate state information between user-scope code and the *cbFun* callable scope
juraj-google-style
def add_edge(self, a, b): neighbors_of_a = self.adjacency_lists.get(a) if not neighbors_of_a: neighbors_of_a = set() self.adjacency_lists[a] = neighbors_of_a neighbors_of_a.add(b) neighbors_of_b = self.adjacency_lists.get(b) if not neighbors_of_b: neighbors_of_b = set() self.adjacency_lists[b] = neighbors_of_b neighbors_of_b.add(a)
Used to add edges to the graph. 'a' and 'b' are vertexes and if 'a' or 'b' doesn't exisit then the vertex is created Args: a (hash): is one vertex of the edge b (hash): is another vertext of the edge
juraj-google-style
def minute(self, value=None): if value is not None: try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int ' 'for field `minute`'.format(value)) if value < 0: raise ValueError('value need to be greater or equal 0 ' 'for field `minute`') if value > 60: raise ValueError('value need to be smaller 60 ' 'for field `minute`') self._minute = value
Corresponds to IDD Field `minute` Args: value (int): value for IDD Field `minute` value >= 0 value <= 60 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def on_test_batch_end(self, batch, logs=None): if self._should_call_test_batch_hooks: self._call_batch_hook(ModeKeys.TEST, 'end', batch, logs=logs)
Calls the `on_test_batch_end` methods of its callbacks. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.
github-repos
def ListAssets(logdir, plugin_name): plugin_dir = PluginDirectory(logdir, plugin_name) try: return [x.rstrip('/') for x in tf.io.gfile.listdir(plugin_dir)] except tf.errors.NotFoundError: return []
List all the assets that are available for given plugin in a logdir. Args: logdir: A directory that was created by a TensorFlow summary.FileWriter. plugin_name: A string name of a plugin to list assets for. Returns: A string list of available plugin assets. If the plugin subdirectory does not exist (either because the logdir doesn't exist, or because the plugin didn't register) an empty list is returned.
juraj-google-style
def _average_precision(self, rec, prec): ap = 0.0 for t in np.arange(0.0, 1.1, 0.1): if (np.sum((rec >= t)) == 0): p = 0 else: p = np.max(prec[(rec >= t)]) ap += (p / 11.0) return ap
calculate average precision, override the default one, special 11-point metric Params: ---------- rec : numpy.array cumulated recall prec : numpy.array cumulated precision Returns: ---------- ap as float
codesearchnet
def inspect_repo(self, repo_name): req = proto.InspectRepoRequest(repo=proto.Repo(name=repo_name)) res = self.stub.InspectRepo(req, metadata=self.metadata) return res
Returns info about a specific Repo. Params: * repo_name: Name of the repo.
juraj-google-style
def step(self, actions): if (self._store_rollouts and self._rollouts_by_epoch_and_split[self.current_epoch]): raise ValueError('Data for current epoch has already been loaded from disk.') (obs, unclipped_rewards, dones) = self._step(actions) obs = self._preprocess_observations(obs) (min_reward, max_reward) = self.reward_range rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward)) if self._store_rollouts: unclipped_rewards = unclipped_rewards.astype(np.float64) encoded_obs = self._encode_observations(obs) for (rollout, frame, action) in zip(self._current_batch_rollouts, self._current_batch_frames, actions): rollout.append(frame._replace(action=action)) self._current_batch_frames = [Frame(*orud, action=None) for orud in zip(encoded_obs, rewards, unclipped_rewards, dones)] return (obs, rewards, dones)
Makes a step in all environments. Does any preprocessing and records frames. Args: actions: Batch of actions. Returns: (obs, rewards, dones) - batches of observations, rewards and done flags respectively. Raises: ValueError: when the data for current epoch has already been loaded.
codesearchnet
def __parse(self, function_meta): self._func = get_mapping_function( function_meta["func_name"], self.functions_mapping ) self.func_name = self._func.__name__ self._args = prepare_lazy_data( function_meta.get("args", []), self.functions_mapping, self.check_variables_set ) self._kwargs = prepare_lazy_data( function_meta.get("kwargs", {}), self.functions_mapping, self.check_variables_set ) if self.func_name == "load_csv_file": if len(self._args) != 1 or self._kwargs: raise exceptions.ParamsError("P() should only pass in one argument!") self._args = [self._args[0]] elif self.func_name == "get_os_environ": if len(self._args) != 1 or self._kwargs: raise exceptions.ParamsError("ENV() should only pass in one argument!") self._args = [self._args[0]]
init func as lazy functon instance Args: function_meta (dict): function meta including name, args and kwargs
juraj-google-style
def show(self, xlim=None, ylim=None, units="thz"): plt = self.get_plot(xlim, ylim, units=units) plt.show()
Show the plot using matplotlib. Args: xlim: Specifies the x-axis limits. Set to None for automatic determination. ylim: Specifies the y-axis limits. units: units for the frequencies. Accepted values thz, ev, mev, ha, cm-1, cm^-1.
juraj-google-style
def add_parents(self, parents): self._parents += [p for p in parents if p not in self._parents]
Adds new parent nodes after filtering for duplicates Args: parents (list): list of OmniTree nodes to add as parents
juraj-google-style
def convert_result(r): if (isinstance(r, collections.Sequence) and not isinstance(r, string_types)): rs = [] for subresult in r: rs.append(convert_result(subresult)) return rs if isinstance(r, ipyparallel.AsyncResult): r = r.r if isinstance(r, Ref): RemoteClass = distob.engine.proxy_types[r.type] r = RemoteClass(r) return r
Waits for and converts any AsyncResults. Converts any Ref into a Remote. Args: r: can be an ordinary object, ipyparallel.AsyncResult, a Ref, or a Sequence of objects, AsyncResults and Refs. Returns: either an ordinary object or a Remote instance
juraj-google-style
def global_horizontal_illuminance(self, value=999999.0): if value is not None: try: value = float(value) except ValueError: raise ValueError( 'value {} need to be of type float ' 'for field `global_horizontal_illuminance`'.format(value)) if value < 0.0: raise ValueError('value need to be greater or equal 0.0 ' 'for field `global_horizontal_illuminance`') self._global_horizontal_illuminance = value
Corresponds to IDD Field `global_horizontal_illuminance` will be missing if >= 999900 Args: value (float): value for IDD Field `global_horizontal_illuminance` Unit: lux value >= 0.0 Missing value: 999999.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
juraj-google-style
def get_config_bool_option(parser: ConfigParser, section: str, option: str, default: bool = None) -> bool: if not parser.has_section(section): raise ValueError("config missing section: " + section) return parser.getboolean(section, option, fallback=default)
Retrieves a boolean value from a parser. Args: parser: instance of :class:`ConfigParser` section: section name within config file option: option (variable) name within that section default: value to return if option is absent Returns: string value Raises: ValueError: if the section is absent
juraj-google-style
def dark(app): _apply_base_theme(app) darkPalette = QPalette() darkPalette.setColor(QPalette.WindowText, QColor(180, 180, 180)) darkPalette.setColor(QPalette.Button, QColor(53, 53, 53)) darkPalette.setColor(QPalette.Light, QColor(180, 180, 180)) darkPalette.setColor(QPalette.Midlight, QColor(90, 90, 90)) darkPalette.setColor(QPalette.Dark, QColor(35, 35, 35)) darkPalette.setColor(QPalette.Text, QColor(180, 180, 180)) darkPalette.setColor(QPalette.BrightText, QColor(180, 180, 180)) darkPalette.setColor(QPalette.ButtonText, QColor(180, 180, 180)) darkPalette.setColor(QPalette.Base, QColor(42, 42, 42)) darkPalette.setColor(QPalette.Window, QColor(53, 53, 53)) darkPalette.setColor(QPalette.Shadow, QColor(20, 20, 20)) darkPalette.setColor(QPalette.Highlight, QColor(42, 130, 218)) darkPalette.setColor(QPalette.HighlightedText, QColor(180, 180, 180)) darkPalette.setColor(QPalette.Link, QColor(56, 252, 196)) darkPalette.setColor(QPalette.AlternateBase, QColor(66, 66, 66)) darkPalette.setColor(QPalette.ToolTipBase, QColor(53, 53, 53)) darkPalette.setColor(QPalette.ToolTipText, QColor(180, 180, 180)) darkPalette.setColor(QPalette.Disabled, QPalette.WindowText, QColor(127, 127, 127)) darkPalette.setColor(QPalette.Disabled, QPalette.Text, QColor(127, 127, 127)) darkPalette.setColor(QPalette.Disabled, QPalette.ButtonText, QColor(127, 127, 127)) darkPalette.setColor(QPalette.Disabled, QPalette.Highlight, QColor(80, 80, 80)) darkPalette.setColor(QPalette.Disabled, QPalette.HighlightedText, QColor(127, 127, 127)) app.setPalette(darkPalette)
Apply Dark Theme to the Qt application instance. Args: app (QApplication): QApplication instance.
codesearchnet
def _begin_operation_action(self, action): conn_key = action.data['id'] callback = action.data['callback'] if (self._get_connection_state(conn_key) != self.Idle): callback(conn_key, self.id, False, 'Cannot start operation, connection is not idle') return data = self._get_connection(conn_key) data['state'] = self.InProgress data['microstate'] = action.data['operation_name'] data['action'] = action
Begin an attempted operation. Args: action (ConnectionAction): the action object describing what we are operating on
codesearchnet
def add(self, origin): digest = self._calc_digest(origin) if self.exists(digest): self.logger.debug('Added File: [{0}] ( Already exists. Skipping transfer)'.format(digest)) return digest absPath = self.get_file_path(digest) absFolderPath = os.path.dirname(absPath) self._makedirs(absFolderPath) self._copy_content(origin, absPath) self.logger.debug('Added file: "{0}" [{1}]'.format(digest, absPath)) return digest
Add new element to fsdb. Args: origin -- could be the path of a file or a readable/seekable object ( fileobject, stream, stringIO...) Returns: String rapresenting the digest of the file
codesearchnet
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[List[Tuple[int, int]]]=None): class_queries_logits = outputs.logits masks_queries_logits = outputs.pred_masks masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() segmentation = torch.einsum('bqc, bqhw -> bchw', masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError('Make sure that you pass in as many target sizes as the batch dimension of the logits') semantic_segmentation = [] for idx in range(batch_size): resized_logits = nn.functional.interpolate(segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode='bilinear', align_corners=False) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation
Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If unset, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
github-repos
def start_server(port): _pywrap_profiler.start_server(port)
Start a profiler grpc server that listens to given port. The profiler server will exit when the process finishes. The service is defined in tensorflow/core/profiler/profiler_service.proto. Args: port: port profiler server listens to. Example usage: ```python tf.profiler.experimental.server.start(6009) # do your training here.
github-repos
def detect_intent_knowledge(project_id, session_id, language_code, knowledge_base_id, texts): import dialogflow_v2beta1 as dialogflow session_client = dialogflow.SessionsClient() session_path = session_client.session_path(project_id, session_id) print('Session path: {}\n'.format(session_path)) for text in texts: text_input = dialogflow.types.TextInput(text=text, language_code=language_code) query_input = dialogflow.types.QueryInput(text=text_input) knowledge_base_path = dialogflow.knowledge_bases_client.KnowledgeBasesClient.knowledge_base_path(project_id, knowledge_base_id) query_params = dialogflow.types.QueryParameters(knowledge_base_names=[knowledge_base_path]) response = session_client.detect_intent(session=session_path, query_input=query_input, query_params=query_params) print(('=' * 20)) print('Query text: {}'.format(response.query_result.query_text)) print('Detected intent: {} (confidence: {})\n'.format(response.query_result.intent.display_name, response.query_result.intent_detection_confidence)) print('Fulfillment text: {}\n'.format(response.query_result.fulfillment_text)) print('Knowledge results:') knowledge_answers = response.query_result.knowledge_answers for answers in knowledge_answers.answers: print(' - Answer: {}'.format(answers.answer)) print(' - Confidence: {}'.format(answers.match_confidence))
Returns the result of detect intent with querying Knowledge Connector. Args: project_id: The GCP project linked with the agent you are going to query. session_id: Id of the session, using the same `session_id` between requests allows continuation of the conversation. language_code: Language of the queries. knowledge_base_id: The Knowledge base's id to query against. texts: A list of text queries to send.
codesearchnet
def VisitFunction(self, f): groups = self._GroupByArguments(f.signatures) new_signatures = [] for stripped_signature, ret_exc in groups.items(): ret = pytd_utils.JoinTypes(ret_exc.return_types) exc = tuple(ret_exc.exceptions) new_signatures.append(stripped_signature.Replace(return_type=ret, exceptions=exc)) return f.Replace(signatures=tuple(new_signatures))
Merge signatures of a function. This groups signatures by arguments and then for each group creates a single signature that joins the return values / exceptions using "or". Arguments: f: A pytd.Function instance Returns: Function with simplified / combined signatures.
github-repos
def _ResizeNearestNeighborGrad(op: ops.Operation, grad): image = op.inputs[0] if image.get_shape()[1:3].is_fully_defined(): image_shape = image.get_shape()[1:3] else: image_shape = array_ops.shape(image)[1:3] grads = gen_image_ops.resize_nearest_neighbor_grad(grad, image_shape, align_corners=op.get_attr('align_corners'), half_pixel_centers=op.get_attr('half_pixel_centers')) return [grads, None]
The derivatives for nearest neighbor resizing. Args: op: The ResizeNearestNeighbor op. grad: The tensor representing the gradient w.r.t. the output. Returns: The gradients w.r.t. the input and the output.
github-repos
def save(self, sess, save_path, timestep=None): if (self._saver is None): raise TensorForceError('register_saver_ops should be called before save') return self._saver.save(sess=sess, save_path=save_path, global_step=timestep, write_meta_graph=False, write_state=True)
Saves this component's managed variables. Args: sess: The session for which to save the managed variables. save_path: The path to save data to. timestep: Optional, the timestep to append to the file name. Returns: Checkpoint path where the model was saved.
codesearchnet
def snake_case_to_headless_camel_case(snake_string): return ''.join(([snake_string.split('_')[0]] + list((sub_string.capitalize() for sub_string in snake_string.split('_')[1:]))))
Convert snake_case to headlessCamelCase. Args: snake_string: The string to be converted. Returns: The input string converted to headlessCamelCase.
codesearchnet
def encrypt_encoded(self, encoding, r_value): obfuscator = r_value or 1 ciphertext = self.raw_encrypt(encoding.encoding, r_value=obfuscator) encrypted_number = EncryptedNumber(self, ciphertext, encoding.exponent) if r_value is None: encrypted_number.obfuscate() return encrypted_number
Paillier encrypt an encoded value. Args: encoding: The EncodedNumber instance. r_value (int): obfuscator for the ciphertext; by default (i.e. if *r_value* is None), a random value is used. Returns: EncryptedNumber: An encryption of *value*.
juraj-google-style
def _make_sent_vector(self, sent: List, bucket_length: int=None) -> np.ndarray: bucket_length = (bucket_length or len(sent)) answer = np.zeros(shape=(bucket_length, (MAX_WORD_LENGTH + 2)), dtype=np.int32) for (i, word) in enumerate(sent): answer[(i, 0)] = self.tags.tok2idx('BEGIN') m = min(len(word), MAX_WORD_LENGTH) for (j, x) in enumerate(word[(- m):]): answer[(i, (j + 1))] = self.symbols.tok2idx(x) answer[(i, (m + 1))] = self.tags.tok2idx('END') answer[(i, (m + 2):)] = self.tags.tok2idx('PAD') return answer
Transforms a sentence to Numpy array, which will be the network input. Args: sent: input sentence bucket_length: the width of the bucket Returns: A 3d array, answer[i][j][k] contains the index of k-th letter in j-th word of i-th input sentence.
codesearchnet
def _GenerateNonImplementedMethod(self, method): return (lambda inst, rpc_controller, request, callback: self._NonImplementedMethod(method.name, rpc_controller, callback))
Generates and returns a method that can be set for a service methods. Args: method: Descriptor of the service method for which a method is to be generated. Returns: A method that can be added to the service class.
codesearchnet
def update(self, rec=None, drop=None, tables=None, install=None, materialize=None, indexes=None, joins=0, views=0): if not drop: drop = [] if not tables: tables = set() if not install: install = set() if not materialize: materialize = set() if not indexes: indexes = set() if rec: self.update( drop=rec.drop, tables=rec.tables, install=rec.install, materialize=rec.materialize, indexes=rec.indexes, joins=rec.joins ) self.drop += drop self.tables |= set(tables) self.install |= set(install) self.materialize |= set(materialize) self.indexes |= set(indexes) self.joins += joins self.views += views if self.joins > 0 or self.views > 0: self.materialize |= self.install self.install = set()
Updates current record. Args: rec (FIMRecord):
juraj-google-style
def collapse_addresses(addresses): addrs = [] ips = [] nets = [] for ip in addresses: if isinstance(ip, _BaseAddress): if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) ips.append(ip) elif ip._prefixlen == ip._max_prefixlen: if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, ips[-1])) try: ips.append(ip.ip) except AttributeError: ips.append(ip.network_address) else: if nets and nets[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( ip, nets[-1])) nets.append(ip) ips = sorted(set(ips)) if ips: for first, last in _find_address_range(ips): addrs.extend(summarize_address_range(first, last)) return _collapse_addresses_internal(addrs + nets)
Collapse a list of IP objects. Example: collapse_addresses([IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/25')]) -> [IPv4Network('192.0.2.0/24')] Args: addresses: An iterator of IPv4Network or IPv6Network objects. Returns: An iterator of the collapsed IPv(4|6)Network objects. Raises: TypeError: If passed a list of mixed version objects.
juraj-google-style
def unenroll_user_from_course(self, username, course_id): enrollment = self.get_course_enrollment(username, course_id) if enrollment and enrollment['is_active']: response = self.client.enrollment.post({ 'user': username, 'course_details': {'course_id': course_id}, 'is_active': False, 'mode': enrollment['mode'] }) return not response['is_active'] return False
Call the enrollment API to unenroll the user in the course specified by course_id. Args: username (str): The username by which the user goes on the OpenEdx platform course_id (str): The string value of the course's unique identifier Returns: bool: Whether the unenrollment succeeded
juraj-google-style
def format_and_is_storage(path): if not hasattr(path, 'read'): path = fsdecode(path).replace('\\', '/') return path, is_storage(path) return path, True
Checks if path is storage and format it. If path is an opened file-like object, returns is storage as True. Args: path (path-like object or file-like object): Returns: tuple: str or file-like object (Updated path), bool (True if is storage).
juraj-google-style
def _Dhcpcd(self, interfaces, logger): for interface in interfaces: dhcpcd = ['/sbin/dhcpcd'] try: subprocess.check_call((dhcpcd + ['-x', interface])) except subprocess.CalledProcessError: logger.info('Dhcpcd not yet running for interface %s.', interface) try: subprocess.check_call((dhcpcd + [interface])) except subprocess.CalledProcessError: logger.warning('Could not activate interface %s.', interface)
Use dhcpcd to activate the interfaces. Args: interfaces: list of string, the output device names to enable. logger: logger object, used to write to SysLog and serial port.
codesearchnet
def setValues(self, values): ncols = self.getNumCols() nindices = self.getNumIndices() for (key, value) in values.items(): key = Utils.convToList(key) assert (len(key) == nindices) value = Utils.convToList(value) assert (len(value) == (ncols - nindices)) self.addRow((key + value))
Set the values of a DataFrame from a dictionary. Args: values: Dictionary with the values to set.
codesearchnet
def find_newline(self, size=(- 1)): if (size < 0): return self._buffer.find('\n', self._offset) return self._buffer.find('\n', self._offset, (self._offset + size))
Search for newline char in buffer starting from current offset. Args: size: number of bytes to search. -1 means all. Returns: offset of newline char in buffer. -1 if doesn't exist.
codesearchnet
def parse_received(received): values_by_clause = {} for pattern in RECEIVED_COMPILED_LIST: matches = [match for match in pattern.finditer(received)] if (len(matches) == 0): log.debug(('No matches found for %s in %s' % (pattern.pattern, received))) continue elif (len(matches) > 1): msg = ('More than one match found for %s in %s' % (pattern.pattern, received)) log.error(msg) raise MailParserReceivedParsingError(msg) else: log.debug(('Found one match for %s in %s' % (pattern.pattern, received))) match = matches[0].groupdict() if six.PY2: values_by_clause[match.keys()[0]] = match.values()[0] elif six.PY3: key = list(match.keys())[0] value = list(match.values())[0] values_by_clause[key] = value if (len(values_by_clause) == 0): msg = ('Unable to match any clauses in %s' % received) log.error(msg) raise MailParserReceivedParsingError(msg) return values_by_clause
Parse a single received header. Return a dictionary of values by clause. Arguments: received {str} -- single received header Raises: MailParserReceivedParsingError -- Raised when a received header cannot be parsed Returns: dict -- values by clause
codesearchnet
def validate_definition(self, definition_name, dict_to_test, definition=None): if (definition_name not in self.specification['definitions'].keys() and definition is None): return False spec_def = definition or self.specification['definitions'][definition_name] all_required_keys_present = all(req in dict_to_test.keys() for req in spec_def.get('required', {})) if 'required' in spec_def and not all_required_keys_present: return False properties_dict = spec_def.get('properties', {}) for key, value in dict_to_test.items(): if value is not None: if key not in properties_dict: return False else: if not self._validate_type(properties_dict[key], value): return False return True
Validate the given dict according to the given definition. Args: definition_name: name of the the definition. dict_to_test: dict to test. Returns: True if the given dict match the definition, False otherwise.
juraj-google-style
def set_hyperparameters(self, hyperparameters): self._hyperparameters.update(hyperparameters) if self._class: LOGGER.debug('Creating a new primitive instance for %s', self.name) self.instance = self.primitive(**self._hyperparameters)
Set new hyperparameters. Only the specified hyperparameters are modified, so any other hyperparameter keeps the value that had been previously given. If necessary, a new instance of the primitive is created. Args: hyperparameters (dict): Dictionary containing as keys the name of the hyperparameters and as values the values to be used.
codesearchnet
def zip_ll_row(params, data_row): l = params[0] pi = params[1] d0 = (data_row==0) likelihood = d0*pi + (1-pi)*poisson.pmf(data_row, l) return -np.log(likelihood+eps).sum()
Returns the negative log-likelihood of a row given ZIP data. Args: params (list): [lambda zero-inf] data_row (array): 1d array Returns: negative log-likelihood
juraj-google-style
def load(self, response): self._response = response if self.next_location(raw=True): self._num_redirects += 1
Load the response and increment the counter. Args: response (:class:`.http.request.Response`): The response from a previous request.
juraj-google-style
def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if isinstance(strides, int): strides = (strides,) if isinstance(dilation_rate, int): dilation_rate = (dilation_rate,) x, tf_data_format = _preprocess_conv1d_input(x, data_format) padding = _preprocess_padding(padding) if not isinstance(strides, tuple): strides = tuple(strides) if tf_data_format == 'NWC': spatial_start_dim = 1 strides = (1,) + strides * 2 + (1,) else: spatial_start_dim = 2 strides = (1, 1) + strides * 2 x = array_ops.expand_dims(x, spatial_start_dim) depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0) pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0) dilation_rate = (1,) + dilation_rate x = nn.separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format) x = array_ops.squeeze(x, [spatial_start_dim]) if data_format == 'channels_first' and tf_data_format == 'NWC': x = array_ops.transpose(x, (0, 2, 1)) return x
1D convolution with separable filters. Args: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. pointwise_kernel: kernel for the 1x1 convolution. strides: stride integer. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: integer dilation rate. Returns: Output tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`.
github-repos
def get_rng(obj=None): seed = (id(obj) + os.getpid() + int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295 if _RNG_SEED is not None: seed = _RNG_SEED return np.random.RandomState(seed)
Get a good RNG seeded with time, pid and the object. Args: obj: some object to use to generate random seed. Returns: np.random.RandomState: the RNG.
juraj-google-style
def add_headers(vcf_obj, nr_cases=None, sv=False): vcf_obj.add_info_to_header( { 'ID':"Obs", 'Number': '1', 'Type': 'Integer', 'Description': "The number of observations for the variant"} ) if not sv: vcf_obj.add_info_to_header( { 'ID':"Hom", 'Number': '1', 'Type': 'Integer', 'Description': "The number of observed homozygotes"} ) vcf_obj.add_info_to_header( { 'ID':"Hem", 'Number': '1', 'Type': 'Integer', 'Description': "The number of observed hemizygotes"} ) if nr_cases: case_header = " vcf_obj.add_to_header(case_header) return
Add loqus specific information to a VCF header Args: vcf_obj(cyvcf2.VCF)
juraj-google-style
def unload(self): unloaded = False if (self._lib is not None): if (self._winlib is not None): ctypes.windll.kernel32.FreeLibrary.argtypes = (ctypes.c_void_p,) ctypes.windll.kernel32.FreeLibrary(self._lib._handle) ctypes.windll.kernel32.FreeLibrary(self._winlib._handle) self._lib = None self._winlib = None unloaded = True else: del self._lib self._lib = None unloaded = True if (self._temp is not None): os.remove(self._temp.name) self._temp = None return unloaded
Unloads the library's DLL if it has been loaded. This additionally cleans up the temporary DLL file that was created when the library was loaded. Args: self (Library): the ``Library`` instance Returns: ``True`` if the DLL was unloaded, otherwise ``False``.
codesearchnet