code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def clone(self, uuid): request_url = (self._client.base_api_url + self.clone_url.format(id=uuid)) response = self._client.session.post(request_url) self.validate_request_success(response_text=response.text, request_url=request_url, status_code=response.status_code, expected_status_code=HTTP_201_CREATED) return self.response_data_to_model_instance(response.json())
Clone the task instance with given UUID. Args: uuid (str): The UUID of the task instance to clone. Returns: :class:`saltant.models.base_task_instance.BaseTaskInstance`: A task instance model instance representing the task instance created due to the clone.
codesearchnet
def close_children_tasks(self, parent_task_name): if parent_task_name not in self.tasks: return while self.tasks: next_task = reversed(self.tasks.keys()).next() if next_task == parent_task_name: break del self.tasks[next_task]
Closes all the children tasks that were open Args: parent_task_name (str): Name of the parent task Returns: None
juraj-google-style
def get_config_path(module_id: str = None, ext: str = 'yaml') -> Path: if module_id: config_path = get_data_path(module_id) else: profile = coordinator.profile config_path = get_base_path() / 'profiles' / profile if not config_path.exists(): config_path.mkdir(parents=True) return config_path / "config.{}".format(ext)
Get path for configuration file. Defaulted to ``~/.ehforwarderbot/profiles/profile_name/channel_id/config.yaml``. This method creates the queried path if not existing. The config file will not be created, however. Args: module_id (str): Module ID. ext (Optional[Str]): Extension name of the config file. Defaulted to ``"yaml"``. Returns: The path to the configuration file.
juraj-google-style
def tsqr(a): if (len(a.shape) != 2): raise Exception('tsqr requires len(a.shape) == 2, but a.shape is {}'.format(a.shape)) if (a.num_blocks[1] != 1): raise Exception('tsqr requires a.num_blocks[1] == 1, but a.num_blocks is {}'.format(a.num_blocks)) num_blocks = a.num_blocks[0] K = (int(np.ceil(np.log2(num_blocks))) + 1) q_tree = np.empty((num_blocks, K), dtype=object) current_rs = [] for i in range(num_blocks): block = a.objectids[(i, 0)] (q, r) = ra.linalg.qr.remote(block) q_tree[(i, 0)] = q current_rs.append(r) for j in range(1, K): new_rs = [] for i in range(int(np.ceil(((1.0 * len(current_rs)) / 2)))): stacked_rs = ra.vstack.remote(*current_rs[(2 * i):((2 * i) + 2)]) (q, r) = ra.linalg.qr.remote(stacked_rs) q_tree[(i, j)] = q new_rs.append(r) current_rs = new_rs assert (len(current_rs) == 1), ('len(current_rs) = ' + str(len(current_rs))) if (a.shape[0] >= a.shape[1]): q_shape = a.shape else: q_shape = [a.shape[0], a.shape[0]] q_num_blocks = core.DistArray.compute_num_blocks(q_shape) q_objectids = np.empty(q_num_blocks, dtype=object) q_result = core.DistArray(q_shape, q_objectids) for i in range(num_blocks): q_block_current = q_tree[(i, 0)] ith_index = i for j in range(1, K): if (np.mod(ith_index, 2) == 0): lower = [0, 0] upper = [a.shape[1], core.BLOCK_SIZE] else: lower = [a.shape[1], 0] upper = [(2 * a.shape[1]), core.BLOCK_SIZE] ith_index q_block_current = ra.dot.remote(q_block_current, ra.subarray.remote(q_tree[(ith_index, j)], lower, upper)) q_result.objectids[i] = q_block_current r = current_rs[0] return (q_result, ray.get(r))
Perform a QR decomposition of a tall-skinny matrix. Args: a: A distributed matrix with shape MxN (suppose K = min(M, N)). Returns: A tuple of q (a DistArray) and r (a numpy array) satisfying the following. - If q_full = ray.get(DistArray, q).assemble(), then q_full.shape == (M, K). - np.allclose(np.dot(q_full.T, q_full), np.eye(K)) == True. - If r_val = ray.get(np.ndarray, r), then r_val.shape == (K, N). - np.allclose(r, np.triu(r)) == True.
codesearchnet
def __init__(self, band_type=None, rate=None, burst_size=None): super().__init__() self.band_type = band_type self.rate = rate self.burst_size = burst_size self.update_length()
Create a MeterBandHeader with the optional parameters below. Args: band_type (MeterBandType): One of OFPMBT_*. rate (int): Rate for this band. burst_size (int): Size of bursts.
juraj-google-style
def step(self, action): (observ, reward, done, info) = self._env.step(action) observ = self._convert_observ(observ) reward = self._convert_reward(reward) return (observ, reward, done, info)
Forward action to the wrapped environment. Args: action: Action to apply to the environment. Raises: ValueError: Invalid action. Returns: Converted observation, converted reward, done flag, and info object.
codesearchnet
def add_outputs(self, **kwargs): self._closed() for (name, source_name) in kwargs.items(): obj = {} obj['outputSource'] = source_name obj['type'] = self.step_output_types[source_name] self.wf_outputs[name] = obj
Add workflow outputs. The output type is added automatically, based on the steps in the steps library. Args: kwargs (dict): A dict containing ``name=source name`` pairs. ``name`` is the name of the workflow output (e.g., ``txt_files``) and source name is the name of the step that produced this output plus the output name (e.g., ``saf-to-txt/out_files``).
codesearchnet
def build_circle_dict(self, center_lat, center_lng, radius, stroke_color=' circle = {'stroke_color': stroke_color, 'stroke_opacity': stroke_opacity, 'stroke_weight': stroke_weight, 'fill_color': fill_color, 'fill_opacity': fill_opacity, 'center': {'lat': center_lat, 'lng': center_lng}, 'radius': radius} return circle
Set a dictionary with the javascript class Circle parameters This function sets a default drawing configuration if the user just pass the rectangle bounds, but also allows to set each parameter individually if the user wish so. Args: center_lat (float): The circle center latitude center_lng (float): The circle center longitude radius (float): The circle radius, in meters stroke_color (str): Sets the color of the rectangle border using hexadecimal color notation stroke_opacity (float): Sets the opacity of the rectangle border in percentage. If stroke_opacity = 0, the border is transparent stroke_weight (int): Sets the stroke girth in pixels. fill_color (str): Sets the color of the circle fill using hexadecimal color notation fill_opacity (float): Sets the opacity of the circle fill
codesearchnet
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(Digest, self).read(istream, kmip_version=kmip_version) tstream = BytearrayStream(istream.read(self.length)) self.hashing_algorithm.read(tstream, kmip_version=kmip_version) self.digest_value.read(tstream, kmip_version=kmip_version) self.key_format_type.read(tstream, kmip_version=kmip_version) self.is_oversized(tstream) self.validate()
Read the data encoding the Digest object and decode it into its constituent parts. Args: istream (Stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
codesearchnet
def set_schema_location(self, ns_uri, schema_location, replace=False): ni = self.__lookup_uri(ns_uri) if (ni.schema_location == schema_location): return elif (replace or (ni.schema_location is None)): ni.schema_location = schema_location elif (schema_location is None): ni.schema_location = None else: raise ConflictingSchemaLocationError(ns_uri, ni.schema_location, schema_location)
Sets the schema location of the given namespace. If ``replace`` is ``True``, then any existing schema location is replaced. Otherwise, if the schema location is already set to a different value, an exception is raised. If the schema location is set to None, it is effectively erased from this set (this is not considered "replacement".) Args: ns_uri (str): The namespace whose schema location is to be set schema_location (str): The schema location URI to set, or None replace (bool): Whether to replace any existing schema location Raises: NamespaceNotFoundError: If the given namespace isn't in this set. ConflictingSchemaLocationError: If replace is False, schema_location is not None, and the namespace already has a different schema location in this set.
codesearchnet
def is_valid(self, value): if not self.is_array: return self._valid(value) if isinstance(value, (list, set, tuple)): return all([self._valid(item) for item in value]) return self._valid(value)
Validate value before actual instance setting based on type. Args: value (object): The value object for validation. Returns: True if value validation succeeds else False.
juraj-google-style
def CompileReport(self, mediator): lines_of_text = ['Listing file paths and hashes'] for pathspec, hashes in sorted( self._paths_with_hashes.items(), key=lambda tuple: tuple[0].comparable): path_string = self._GeneratePathString(mediator, pathspec, hashes) lines_of_text.append(path_string) lines_of_text.append('') report_text = '\n'.join(lines_of_text) return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
Compiles an analysis report. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. Returns: AnalysisReport: report.
juraj-google-style
def get_summed_cohp_by_label_and_orbital_list(self, label_list, orbital_list, divisor=1): first_cohpobject = self.get_orbital_resolved_cohp(label_list[0], orbital_list[0]) summed_cohp = first_cohpobject.cohp.copy() summed_icohp = first_cohpobject.icohp.copy() for ilabel, label in enumerate(label_list[1:], 1): cohp_here = self.get_orbital_resolved_cohp(label, orbital_list[ilabel]) summed_cohp[Spin.up] = np.sum([summed_cohp[Spin.up], cohp_here.cohp.copy()[Spin.up]], axis=0) if Spin.down in summed_cohp: summed_cohp[Spin.down] = np.sum([summed_cohp[Spin.down], cohp_here.cohp.copy()[Spin.down]], axis=0) summed_icohp[Spin.up] = np.sum([summed_icohp[Spin.up], cohp_here.icohp.copy()[Spin.up]], axis=0) if Spin.down in summed_icohp: summed_icohp[Spin.down] = np.sum([summed_icohp[Spin.down], cohp_here.icohp.copy()[Spin.down]], axis=0) divided_cohp = {} divided_icohp = {} divided_cohp[Spin.up] = np.divide(summed_cohp[Spin.up], divisor) divided_icohp[Spin.up] = np.divide(summed_icohp[Spin.up], divisor) if Spin.down in summed_cohp: divided_cohp[Spin.down] = np.divide(summed_cohp[Spin.down], divisor) divided_icohp[Spin.down] = np.divide(summed_icohp[Spin.down], divisor) return Cohp(efermi=first_cohpobject.efermi, energies=first_cohpobject.energies, cohp=divided_cohp, are_coops=first_cohpobject.are_coops, icohp=divided_icohp)
Returns a COHP object that includes a summed COHP divided by divisor Args: label_list: list of labels for the COHP that should be included in the summed cohp orbital_list: list of orbitals for the COHPs that should be included in the summed cohp (same order as label_list) divisor: float/int, the summed cohp will be divided by this divisor Returns: Returns a COHP object including a summed COHP
juraj-google-style
def _extract_response_chunks(self, all_responses, response_chunks, api_name): for response_chunk in response_chunks: if (not isinstance(response_chunk, list)): response_chunk = [response_chunk] for response in response_chunk: if (not response): continue if self._cache: self._cache.cache_value(api_name, response['resource'], response) all_responses[response['resource']] = response
Extracts and caches the responses from the response chunks in case of the responses for the requests containing multiple concatenated resources. Extracted responses are added to the already cached responses passed in the all_responses parameter. Args: all_responses: a list containing already cached responses. response_chunks: a list with response chunks. api_name: a string name of the API.
codesearchnet
def _lift_unlifted_variables(graph, variable_holder): with graph.as_default(): global_collection_variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES) local_collection_variables = ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES) existing_captures = {id(c) for c in graph.internal_captures} lifted_variables = {} def _should_lift_variable(v): return (v._in_graph_mode and v.graph.building_function) and isinstance(v, resource_variable_ops.BaseResourceVariable) and (id(v.handle) not in existing_captures) for old_variable in global_collection_variables: if _should_lift_variable(old_variable): new_variable = _lift_single_variable(old_variable, graph, variable_holder) lifted_variables[id(old_variable)] = new_variable existing_captures.add(id(old_variable.handle)) for old_variable in local_collection_variables: if _should_lift_variable(old_variable): new_variable = _lift_single_variable(old_variable, graph, variable_holder) lifted_variables[id(old_variable)] = new_variable existing_captures.add(id(old_variable.handle)) if new_variable._in_graph_mode: outer_graph = new_variable.graph global_collection = outer_graph.get_collection_ref(ops.GraphKeys.GLOBAL_VARIABLES) global_collection.remove(new_variable) outer_graph.add_to_collection(ops.GraphKeys.LOCAL_VARIABLES, new_variable) for collection_name in [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.LOCAL_VARIABLES]: mutable_collection = ops.get_collection_ref(collection_name) for index, current in enumerate(mutable_collection): mutable_collection[index] = lifted_variables.get(id(current), current) if not resource_variable_ops.is_resource_variable(mutable_collection[index]): logging.log_first_n(logging.WARN, 'Unable to create a python object for variable {} because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().'.format(mutable_collection[index]), 5)
Finds resource variables and lifts them into the outer context. When we import a GraphDef inside a wrap_function, no Python graph building code runs. This means we get VarHandleOps which create variable resources, but no corresponding Python objects. Leaving them like this works but gives the user no way to interact with or modify the variables outside the graph. This method searches for variables and lifts them out as regular variable objects when possible, indicating to the FuncGraph that they are captures. Args: graph: The FuncGraph to lift variables from. variable_holder: A VariableHolder to record the lifted variables in.
github-repos
def list(cls, session, endpoint_override=None, data=None): cls._check_implements('list') return cls((endpoint_override or ('/%s.json' % cls.__endpoint__)), data=data, session=session)
Return records in a mailbox. Args: session (requests.sessions.Session): Authenticated session. endpoint_override (str, optional): Override the default endpoint using this. data (dict, optional): Data to provide as request parameters. Returns: RequestPaginator(output_type=helpscout.BaseModel): Results iterator.
codesearchnet
def van(first_enc, first_frame, current_enc, gt_image, reuse=False, scope_prefix='', hparams=None): with tf.variable_scope(scope_prefix + 'van', reuse=reuse): output_shape = first_frame.get_shape().as_list() output_shape[0] = -1 first_depth = 64 f_first_enc, _ = van_enc_2d(first_enc, first_depth) f_first_frame, image_enc_history = van_image_enc_2d( first_frame, first_depth, hparams=hparams) f_current_enc, van_higher_level = van_enc_2d( current_enc, first_depth, reuse=True) f_gt_image, _ = van_image_enc_2d(gt_image, first_depth, True, hparams=hparams) analogy_t = analogy_computation_2d( f_first_enc, f_first_frame, f_current_enc, first_depth) enc_img = f_current_enc + analogy_t img = van_dec_2d( enc_img, image_enc_history, output_shape, first_depth, hparams=hparams) batch_size = tf.to_float(tf.shape(first_enc)[0]) r_loss = tf.nn.l2_loss(f_gt_image - f_current_enc - analogy_t) / batch_size return img, r_loss, van_higher_level
Implements a VAN. Args: first_enc: The first encoding. first_frame: The first ground truth frame. current_enc: The encoding of the frame to generate. gt_image: The ground truth image, only used for regularization. reuse: To reuse in variable scope or not. scope_prefix: The prefix before the scope name. hparams: The python hparams. Returns: The generated image.
juraj-google-style
def _einsum_equation(input_shapes, output_shape): ret = [] next_letter = ord('a') dim_to_letter = {} for (shape_num, shape) in enumerate((input_shapes + [output_shape])): if (shape_num == len(input_shapes)): ret.append('->') elif (shape_num > 0): ret.append(',') for d in shape.dims: if (d not in dim_to_letter): dim_to_letter[d] = chr(next_letter) next_letter += 1 ret.append(dim_to_letter[d]) return ''.join(ret)
Turn shapes into an einsum equation. e.g. "ij,jk->ik" Args: input_shapes: a list of Shapes output_shape: a Shape Returns: a string
codesearchnet
def parse(cls, args): try: (options, args) = cls.optparser.parse_args(args) if options.mode not in ["1", "2"]: raise ParseError("mode must be either '1' or '2'", cls.optparser.format_help()) if (options.dbtap_id is None) or (options.db_table is None): raise ParseError("dbtap_id and db_table are required", cls.optparser.format_help()) except OptionParsingError as e: raise ParseError(e.msg, cls.optparser.format_help()) except OptionParsingExit as e: return None v = vars(options) v["command_type"] = "DbImportCommand" return v
Parse command line arguments to construct a dictionary of command parameters that can be used to create a command Args: `args`: sequence of arguments Returns: Dictionary that can be used in create method Raises: ParseError: when the arguments are not correct
juraj-google-style
def bazel_command(self, subcommand: str='test', extra_options: Tuple[str, ...]=()) -> List[str]: options = _dict_to_cli_options(self.options) configs = [f'--config={config}' for config in self.configs] build_tag_filters = f'--build_tag_filters={','.join(self.build_tag_filters)}' test_tag_filters = f'--test_tag_filters={','.join(self.test_tag_filters)}' action_env = [f'--action_env={k}={v}' for k, v in self.action_env.items()] test_env = [f'--test_env={k}={v}' for k, v in self.test_env.items()] repo_env = [f'--repo_env={k}={v}' for k, v in self.repo_env.items()] override_repository = [f'--override_repository={k}={v}' for k, v in self.override_repository.items()] tag_filters = [build_tag_filters, test_tag_filters] all_options = tag_filters + configs + action_env + test_env + repo_env + override_repository + options + list(extra_options) return ['bazel', subcommand, *all_options, '--', *self.target_patterns]
Returns a bazel test command for this build. Args: subcommand: The subcommand to give to bazel. `test` by default. extra_options: Extra options. For now just used to pass in `--nobuild`. Returns: List of command line arguments
github-repos
def _get_kind_name(item): if isinstance(item, (str, bytes)): kind = 'bytes_list' elif isinstance(item, int): kind = 'int64_list' elif isinstance(item, float): kind = 'float_list' elif isinstance(item, Any): kind = 'any_list' else: kind = 'node_list' return kind
Returns the kind name in CollectionDef. Args: item: A data item. Returns: The string representation of the kind in CollectionDef.
github-repos
def data_period_end_day(self, value=None): if (value is not None): try: value = str(value) except ValueError: raise ValueError('value {} need to be of type str for field `data_period_end_day`'.format(value)) if (',' in value): raise ValueError('value should not contain a comma for field `data_period_end_day`') self._data_period_end_day = value
Corresponds to IDD Field `data_period_end_day` Args: value (str): value for IDD Field `data_period_end_day` if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def _get_encoding(dom, default="utf-8"): encoding = dom.find("meta", {"http-equiv": "Content-Type"}) if not encoding: return default encoding = encoding[0].params.get("content", None) if not encoding: return default return encoding.lower().split("=")[-1]
Try to look for meta tag in given `dom`. Args: dom (obj): pyDHTMLParser dom of HTML elements. default (default "utr-8"): What to use if encoding is not found in `dom`. Returns: str/default: Given encoding or `default` parameter if not found.
juraj-google-style
def _with_num_row_partitions(self, num_row_partitions): rank = self.rank if rank is None: raise ValueError('Rank must be known to adjust num_row_partitions') if not isinstance(num_row_partitions, int): raise ValueError('num_row_partitions must be an int') if num_row_partitions < 0: raise ValueError('num_row_partitions must be nonnegative') if num_row_partitions == self.num_row_partitions: return self if num_row_partitions >= rank: raise ValueError('num_row_partitions must be less than rank') if num_row_partitions > self.num_row_partitions: num_row_partitions_diff = num_row_partitions - self.num_row_partitions new_inner_rank = self.rank - num_row_partitions nvals = self._inner_shape_dim(0) more_rp = [] for i in range(num_row_partitions_diff): nrows = nvals row_length = self._inner_shape_dim(i + 1) nvals = nrows * row_length rp = RowPartition.from_uniform_row_length(row_length, nrows=nrows, dtype=self.dtype) more_rp.append(rp) alt_inner = self._alt_inner_shape(new_inner_rank) return DynamicRaggedShape(list(self.row_partitions) + more_rp, alt_inner) else: assert num_row_partitions < self.num_row_partitions return DynamicRaggedShape(self.row_partitions[:num_row_partitions], self._alt_inner_shape(self.rank - num_row_partitions))
Creates an identical shape with the given num_row_partitions. Note that the shape must be statically refactorable to this rank. In particular: * rank must be known. * num_row_partitions must be a nonnegative int. * num_row_partitions must be less than the rank of the shape * num_row_partitions must be greater or equal to the index of any ragged dimension. Note that if the num_row_partitions is the same, self is returned. Args: num_row_partitions: the target num_row_partitions (must be a nonnegative int). Returns: a shape with a (possibly) different num_row_partitions. Raises: ValueError: if the rank is unknown, the argument is not a nonnegative int, or there is a dimension that is nonuniform.
github-repos
def RegisterDefinition(self, data_type_definition): name_lower = data_type_definition.name.lower() if (name_lower in self._definitions): raise KeyError('Definition already set for name: {0:s}.'.format(data_type_definition.name)) if (data_type_definition.name in self._aliases): raise KeyError('Alias already set for name: {0:s}.'.format(data_type_definition.name)) for alias in data_type_definition.aliases: if (alias in self._aliases): raise KeyError('Alias already set for name: {0:s}.'.format(alias)) self._definitions[name_lower] = data_type_definition for alias in data_type_definition.aliases: self._aliases[alias] = name_lower if (data_type_definition.TYPE_INDICATOR == definitions.TYPE_INDICATOR_FORMAT): self._format_definitions.append(name_lower)
Registers a data type definition. The data type definitions are identified based on their lower case name. Args: data_type_definition (DataTypeDefinition): data type definitions. Raises: KeyError: if data type definition is already set for the corresponding name.
codesearchnet
def pluralize(singular): if singular in UNCOUNTABLES: return singular for i in IRREGULAR: if i[0] == singular: return i[1] for i in PLURALIZE_PATTERNS: if re.search(i[0], singular): return re.sub(i[0], i[1], singular)
Convert singular word to its plural form. Args: singular: A word in its singular form. Returns: The word in its plural form.
juraj-google-style
def create_services(self, compose_str: str) -> list: if (not self._manager): raise RuntimeError('Services can only be run on swarm manager nodes') services_ids = [] try: service_config = yaml.load(compose_str) service_list = copy.deepcopy(service_config) service_config.pop('version') service_config.pop('services') for service_name in service_list['services']: service_exist = self._client.services.list(filters={'name': service_name}) if (not service_exist): service_config['name'] = service_name service_spec = self._parse_services(service_config, service_name, service_list) created_service = self._client.services.create(**service_spec) service_id = created_service.short_id LOG.debug('Service created: %s', service_id) services_ids.append(service_id) else: LOG.debug('Services already exists') except yaml.YAMLError as exc: print(exc) return services_ids
Create new docker services. Args: compose_str (string): Docker compose 'file' string Return: service_names, list
codesearchnet
def text_filepaths_for_task(self, tmp_dir, task_id): assert task_id >= 0 assert task_id < self.num_train_shards + self.num_dev_shards if task_id < self.num_train_shards: return [ f for i, f in enumerate(self.train_text_filepaths(tmp_dir)) if i % self.num_train_shards == task_id ] else: return [ f for i, f in enumerate(self.dev_text_filepaths(tmp_dir)) if i % self.num_dev_shards == task_id - self.num_train_shards ]
List of input filepaths for a particular training or dev shard. Args: tmp_dir: a string task_id: an integer less than self.num_shards Returns: a list of tuples (filepath, start_pos, num_bytes)
juraj-google-style
def disqualified(self, num, natural=True, **kwargs): search_type = 'natural' if natural else 'corporate' baseuri = (self._BASE_URI + 'disqualified-officers/{}/{}'.format(search_type, num)) res = self.session.get(baseuri, params=kwargs) self.handle_http_error(res) return res
Search for disqualified officers by officer ID. Searches for natural disqualifications by default. Specify natural=False to search for corporate disqualifications. Args: num (str): Company number to search on. natural (Optional[bool]): Natural or corporate search kwargs (dict): additional keywords passed into requests.session.get *params* keyword.
juraj-google-style
def global_idx_to_numeric_idx(self, axis, indices): assert axis in ["row", "col", "columns"] if axis == "row": return pandas.Index( pandas.Series(np.arange(len(self.index)), index=self.index) .loc[indices] .values ) elif axis in ["col", "columns"]: return pandas.Index( pandas.Series(np.arange(len(self.columns)), index=self.columns) .loc[indices] .values )
Note: this function involves making copies of the index in memory. Args: axis: Axis to extract indices. indices: Indices to convert to numerical. Returns: An Index object.
juraj-google-style
def _get_deprecated_positional_arguments(names_to_ok_vals, arg_spec): arg_space = arg_spec.args + arg_spec.kwonlyargs arg_name_to_pos = {name: pos for pos, name in enumerate(arg_space)} deprecated_positional_args = {} for arg_name, spec in iter(names_to_ok_vals.items()): if arg_name in arg_name_to_pos: pos = arg_name_to_pos[arg_name] deprecated_positional_args[arg_name] = DeprecatedArgSpec(pos, spec.has_ok_value, spec.ok_value) return deprecated_positional_args
Builds a dictionary from deprecated arguments to their spec. Returned dict is keyed by argument name. Each value is a DeprecatedArgSpec with the following fields: position: The zero-based argument position of the argument within the signature. None if the argument isn't found in the signature. ok_values: Values of this argument for which warning will be suppressed. Args: names_to_ok_vals: dict from string arg_name to a list of values, possibly empty, which should not elicit a warning. arg_spec: Output from tf_inspect.getfullargspec on the called function. Returns: Dictionary from arg_name to DeprecatedArgSpec.
github-repos
def ExpectingFunctionArgs(clean_lines, linenum): line = clean_lines.elided[linenum] return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or (linenum >= 2 and (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', clean_lines.elided[linenum - 1]) or Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', clean_lines.elided[linenum - 2]) or Search(r'\bstd::m?function\s*\<\s*$', clean_lines.elided[linenum - 1]))))
Checks whether where function type arguments are expected. Args: clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. Returns: True if the line at 'linenum' is inside something that expects arguments of function types.
juraj-google-style
def parse_variables_mapping(variables_mapping, ignore=False): run_times = 0 parsed_variables_mapping = {} while (len(parsed_variables_mapping) != len(variables_mapping)): for var_name in variables_mapping: run_times += 1 if (run_times > (len(variables_mapping) * 4)): not_found_variables = {key: variables_mapping[key] for key in variables_mapping if (key not in parsed_variables_mapping)} raise exceptions.VariableNotFound(not_found_variables) if (var_name in parsed_variables_mapping): continue value = variables_mapping[var_name] variables = extract_variables(value) if (var_name in variables): if ignore: parsed_variables_mapping[var_name] = value continue raise exceptions.VariableNotFound(var_name) if variables: if any([(_var_name not in parsed_variables_mapping) for _var_name in variables]): continue parsed_value = parse_lazy_data(value, parsed_variables_mapping) parsed_variables_mapping[var_name] = parsed_value return parsed_variables_mapping
eval each prepared variable and function in variables_mapping. Args: variables_mapping (dict): { "varA": LazyString(123$varB), "varB": LazyString(456$varC), "varC": LazyString(${sum_two($a, $b)}), "a": 1, "b": 2, "c": {"key": LazyString($b)}, "d": [LazyString($a), 3] } ignore (bool): If set True, VariableNotFound will be ignored. This is used when initializing tests. Returns: dict: parsed variables_mapping should not contain any variable or function. { "varA": "1234563", "varB": "4563", "varC": "3", "a": 1, "b": 2, "c": {"key": 2}, "d": [1, 3] }
codesearchnet
def read_config_info(ini_file): try: config = RawConfigParser() config.optionxform = (lambda option: option) config.read(ini_file) the_stuff = {} for section in config.sections(): the_stuff[section] = {} for option in config.options(section): the_stuff[section][option] = config.get(section, option) return the_stuff except Exception as wtf: logging.error('Exception caught in read_config_info(): {}'.format(wtf)) traceback.print_exc(file=sys.stdout) return sys.exit(1)
Read the INI file Args: ini_file - path to the file Returns: A dictionary of stuff from the INI file Exits: 1 - if problems are encountered
codesearchnet
def events(self): if (not self.__events): self.__events = Events(self.__connection) return self.__events
Gets the Events API client. Returns: Events:
codesearchnet
def __call__(self, fn): if isinstance(fn, Response): return fn.mock if isinstance(fn, Mock): return fn if not isfunction(fn) and not ismethod(fn): raise TypeError('first argument must be a method or function') self._engine.remove_mock(self) @functools.wraps(fn) def decorator(*args, **kw): self._engine.add_mock(self) engine_active = self._engine.active if not engine_active: self._engine.activate() try: return fn(*args, **kw) finally: self._engine.remove_mock(self) if not engine_active: self._engine.disable() return decorator
Overload Mock instance as callable object in order to be used as decorator definition syntax. Arguments: fn (function): function to decorate. Returns: function or pook.Mock
juraj-google-style
def as_fn(self, *binding_order): if (len(binding_order) != len(self.unbound_vars)): raise ValueError('All vars must be specified.') for arg in binding_order: if (arg not in self.unbound_vars): raise ValueError(('Unknown binding: %s' % arg)) def func(*args, **kwargs): 'Constructs a template.' if (len(binding_order) != len(args)): raise ValueError(('Missing values, expects: %s' % binding_order)) values = dict(zip(binding_order, args)) values.update(kwargs) return self.construct(**values) func.__doc__ = _gen_ipython_string(func, binding_order, [], func.__doc__) return func
Creates a function by binding the arguments in the given order. Args: *binding_order: The unbound variables. This must include all values. Returns: A function that takes the arguments of binding_order. Raises: ValueError: If the bindings are missing values or include unknown values.
codesearchnet
def noisy_moments(self, moments: 'Iterable[cirq.Moment]', system_qubits: Sequence['cirq.Qid'] ) -> Sequence['cirq.OP_TREE']: if not hasattr(self.noisy_moment, '_not_overridden'): result = [] for moment in moments: result.append(self.noisy_moment(moment, system_qubits)) return result if not hasattr(self.noisy_operation, '_not_overridden'): result = [] for moment in moments: result.append([self.noisy_operation(op) for op in moment]) return result assert False, 'Should be unreachable.'
Adds possibly stateful noise to a series of moments. Args: moments: The moments to add noise to. system_qubits: A list of all qubits in the system. Returns: A sequence of OP_TREEs, with the k'th tree corresponding to the noisy operations for the k'th moment.
juraj-google-style
def parseString(inString, silence=False): parser = None rootNode= parsexmlstring_(inString, parser) rootTag, rootClass = get_root_tag(rootNode) if rootClass is None: rootTag = 'PcGts' rootClass = PcGts rootObj = rootClass.factory() rootObj.build(rootNode) if not silence: sys.stdout.write('<?xml version="1.0" ?>\n') rootObj.export( sys.stdout, 0, name_=rootTag, namespacedef_='xmlns:pc="http: return rootObj
Parse a string, create the object tree, and export it. Arguments: - inString -- A string. This XML fragment should not start with an XML declaration containing an encoding. - silence -- A boolean. If False, export the object. Returns -- The root object in the tree.
juraj-google-style
def strace_clear_all(self): data = 0 res = self._dll.JLINK_STRACE_Control(enums.JLinkStraceCommand.TRACE_EVENT_CLR_ALL, data) if res < 0: raise errors.JLinkException('Failed to clear all STRACE events.') return None
Clears all STRACE events. Args: self (JLink): the ``JLink`` instance. Returns: ``None`` Raises: JLinkException: on error.
juraj-google-style
def _properties_model_to_dict(properties): result = {} for attr in properties.__dict__: value = getattr(properties, attr) if (hasattr(value, '__module__') and ('models' in value.__module__)): value = _properties_model_to_dict(value) if (not ((value is None) or (isinstance(value, dict) and (not value)))): result[attr] = value return result
Convert properties model to dict. Args: properties: Properties model. Returns: dict: Converted model.
codesearchnet
def optimize(onnx_model_path: Path) -> Path: from onnxruntime import InferenceSession, SessionOptions opt_model_path = generate_identified_filename(onnx_model_path, '-optimized') sess_option = SessionOptions() sess_option.optimized_model_filepath = opt_model_path.as_posix() _ = InferenceSession(onnx_model_path.as_posix(), sess_option) print(f'Optimized model has been written at {opt_model_path}: ✔') print('/!\\ Optimized model contains hardware specific operators which might not be portable. /!\\') return opt_model_path
Load the model at the specified path and let onnxruntime look at transformations on the graph to enable all the optimizations possible Args: onnx_model_path: filepath where the model binary description is stored Returns: Path where the optimized model binary description has been saved
github-repos
class Conv1D(nn.Module): def __init__(self, nf, nx): super().__init__() self.nf = nf self.nx = nx self.weight = nn.Parameter(torch.empty(nx, nf)) self.bias = nn.Parameter(torch.zeros(nf)) nn.init.normal_(self.weight, std=0.02) def __repr__(self) -> str: return 'Conv1D(nf={nf}, nx={nx})'.format(**self.__dict__) def forward(self, x): size_out = x.size()[:-1] + (self.nf,) x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight) x = x.view(size_out) return x
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2). Basically works like a linear layer but the weights are transposed. Args: nf (`int`): The number of output features. nx (`int`): The number of input features.
github-repos
def _get_or_create_global_step_read(graph=None): graph = graph or ops.get_default_graph() global_step_read_tensor = _get_global_step_read(graph) if global_step_read_tensor is not None: return global_step_read_tensor global_step_tensor = get_global_step(graph) if global_step_tensor is None: return None with graph.as_default() as g, g.name_scope(None): with g.name_scope(global_step_tensor.op.name + '/'): if isinstance(global_step_tensor, variables.Variable): global_step_value = cond.cond(variable_v1.is_variable_initialized(global_step_tensor), global_step_tensor.read_value, lambda: global_step_tensor.initial_value) else: global_step_value = global_step_tensor global_step_read_tensor = global_step_value + 0 ops.add_to_collection(GLOBAL_STEP_READ_KEY, global_step_read_tensor) return _get_global_step_read(graph)
Gets or creates global step read tensor in graph. Args: graph: The graph in which to create the global step read tensor. If missing, use default graph. Returns: Global step read tensor if there is global_step_tensor else return None.
github-repos
def rho_rec(self, g2): return (self.expnorm / np.sqrt(1 + self.gamma2 * g2) + ( 1 - self.expnorm) * np.exp(-0.25 * self.beta2 * g2))
Reciprocal space model charge value for input squared reciprocal vector. Args: g2: Square of reciprocal vector Returns: Charge density at the reciprocal vector magnitude
juraj-google-style
def _ensure_unicode(text): if isinstance(text, six.binary_type): return text.decode(sys.getfilesystemencoding(), 'replace') else: return text
Ensures the text passed in becomes unicode Args: text (str|unicode) Returns: unicode
juraj-google-style
def read_accpro20(infile): with open(infile) as f: records = f.read().splitlines() accpro20_dict = {} for i, r in enumerate(records): if i % 2 == 0: accpro20_dict[records[i].split(' ')[0][1:]] = [int(x) for x in records[i + 1].split(' ')] return accpro20_dict
Read the accpro20 output (.acc20) and return the parsed FASTA records. Keeps the spaces between the accessibility numbers. Args: infile: Path to .acc20 file Returns: dict: Dictionary of accessibilities with keys as the ID
juraj-google-style
def FoldByteStream(self, mapped_value, **kwargs): try: byte_stream = mapped_value.encode(self._data_type_definition.encoding) except Exception as exception: error_string = ( 'Unable to write: {0:s} to byte stream with error: {1!s}').format( self._data_type_definition.name, exception) raise errors.MappingError(error_string) return super(StringMap, self).FoldByteStream(byte_stream, **kwargs)
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
juraj-google-style
def invoke(self): self._ensure_safe() self._interpreter.Invoke()
Invoke the interpreter. Be sure to set the input sizes, allocate tensors and fill values before calling this. Also, note that this function releases the GIL so heavy computation can be done in the background while the Python interpreter continues. No other function on this object should be called while the invoke() call has not finished. Raises: ValueError: When the underlying interpreter fails raise ValueError.
github-repos
def backward_transfer_pair(backward_channel: NettingChannelState, payer_transfer: LockedTransferSignedState, pseudo_random_generator: random.Random, block_number: BlockNumber) -> Tuple[(Optional[MediationPairState], List[Event])]: transfer_pair = None events: List[Event] = list() lock = payer_transfer.lock lock_timeout = BlockTimeout((lock.expiration - block_number)) if is_channel_usable(backward_channel, lock.amount, lock_timeout): message_identifier = message_identifier_from_prng(pseudo_random_generator) refund_transfer = channel.send_refundtransfer(channel_state=backward_channel, initiator=payer_transfer.initiator, target=payer_transfer.target, amount=get_lock_amount_after_fees(lock, backward_channel), message_identifier=message_identifier, payment_identifier=payer_transfer.payment_identifier, expiration=lock.expiration, secrethash=lock.secrethash) transfer_pair = MediationPairState(payer_transfer, backward_channel.partner_state.address, refund_transfer.transfer) events.append(refund_transfer) return (transfer_pair, events)
Sends a transfer backwards, allowing the previous hop to try a new route. When all the routes available for this node failed, send a transfer backwards with the same amount and secrethash, allowing the previous hop to do a retry. Args: backward_channel: The original channel which sent the mediated transfer to this node. payer_transfer: The *latest* payer transfer which is backing the mediation. block_number: The current block number. Returns: The mediator pair and the correspoding refund event.
codesearchnet
def run_tag_from_session_and_metric(session_name, metric_name): assert isinstance(session_name, six.string_types) assert isinstance(metric_name, api_pb2.MetricName) run = os.path.normpath(os.path.join(session_name, metric_name.group)) tag = metric_name.tag return run, tag
Returns a (run,tag) tuple storing the evaluations of the specified metric. Args: session_name: str. metric_name: MetricName protobuffer. Returns: (run, tag) tuple.
juraj-google-style
def GetRootKey(self): regf_key = self._regf_file.get_root_key() if (not regf_key): return None return REGFWinRegistryKey(regf_key, key_path=self._key_path_prefix)
Retrieves the root key. Returns: WinRegistryKey: Windows Registry root key or None if not available.
codesearchnet
def get_att_mats(translate_model): enc_atts = [] dec_atts = [] encdec_atts = [] prefix = 'transformer/body/' postfix_self_attention = '/multihead_attention/dot_product_attention' if (translate_model.hparams.self_attention_type == 'dot_product_relative'): postfix_self_attention = '/multihead_attention/dot_product_attention_relative' postfix_encdec = '/multihead_attention/dot_product_attention' for i in range(translate_model.hparams.num_hidden_layers): enc_att = translate_model.attention_weights[('%sencoder/layer_%i/self_attention%s' % (prefix, i, postfix_self_attention))] dec_att = translate_model.attention_weights[('%sdecoder/layer_%i/self_attention%s' % (prefix, i, postfix_self_attention))] encdec_att = translate_model.attention_weights[('%sdecoder/layer_%i/encdec_attention%s' % (prefix, i, postfix_encdec))] enc_atts.append(enc_att) dec_atts.append(dec_att) encdec_atts.append(encdec_att) return (enc_atts, dec_atts, encdec_atts)
Get's the tensors representing the attentions from a build model. The attentions are stored in a dict on the Transformer object while building the graph. Args: translate_model: Transformer object to fetch the attention weights from. Returns: Tuple of attention matrices; ( enc_atts: Encoder self attention weights. A list of `num_layers` numpy arrays of size (batch_size, num_heads, inp_len, inp_len) dec_atts: Decoder self attetnion weights. A list of `num_layers` numpy arrays of size (batch_size, num_heads, out_len, out_len) encdec_atts: Encoder-Decoder attention weights. A list of `num_layers` numpy arrays of size (batch_size, num_heads, out_len, inp_len) )
codesearchnet
def _ParseContainerTable(self, parser_mediator, table, container_name): if table is None: raise ValueError('Missing table value.') for record_index, esedb_record in enumerate(table.records): if parser_mediator.abort: break if container_name == 'Content': value_mappings = self._CONTAINER_TABLE_VALUE_MAPPINGS else: value_mappings = None try: record_values = self._GetRecordValues( parser_mediator, table.name, esedb_record, value_mappings=value_mappings) except UnicodeDecodeError: parser_mediator.ProduceExtractionWarning(( 'Unable to retrieve record values from record: {0:d} ' 'in table: {1:s}').format(record_index, table.name)) continue if (container_name in self._SUPPORTED_CONTAINER_NAMES or container_name.startswith('MSHist')): access_count = record_values.get('AccessCount', None) cached_filename = record_values.get('Filename', None) cached_file_size = record_values.get('FileSize', None) cache_identifier = record_values.get('CacheId', None) container_identifier = record_values.get('ContainerId', None) entry_identifier = record_values.get('EntryId', None) file_extension = record_values.get('FileExtension', None) redirect_url = record_values.get('RedirectUrl', None) sync_count = record_values.get('SyncCount', None) url = record_values.get('Url', '') if ord(url[0]) < 0x20 or ord(url[0]) == 0x7f: url = None request_headers = record_values.get('RequestHeaders', None) if not isinstance(request_headers, py2to3.UNICODE_TYPE): request_headers = None response_headers = record_values.get('ResponseHeaders', None) if not isinstance(response_headers, py2to3.UNICODE_TYPE): response_headers = None event_data = MsieWebCacheContainerEventData() event_data.access_count = access_count event_data.cached_filename = cached_filename event_data.cached_file_size = cached_file_size event_data.cache_identifier = cache_identifier event_data.container_identifier = container_identifier event_data.entry_identifier = entry_identifier event_data.file_extension = file_extension event_data.redirect_url = redirect_url event_data.request_headers = request_headers event_data.response_headers = response_headers event_data.sync_count = sync_count event_data.url = url timestamp = record_values.get('SyncTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, 'Synchronization time') parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = record_values.get('CreationTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = record_values.get('ExpiryTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_EXPIRATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = record_values.get('ModifiedTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_MODIFICATION) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = record_values.get('AccessedTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_LAST_ACCESS) parser_mediator.ProduceEventWithEventData(event, event_data) timestamp = record_values.get('PostCheckTime', None) if timestamp: date_time = dfdatetime_filetime.Filetime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, 'Post check time') parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a Container_# table. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. table (pyesedb.table): table. container_name (str): container name, which indicates the table type. Raises: ValueError: if the table value is missing.
juraj-google-style
def ParseStatusRow(self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = TwitterAndroidStatusEventData() event_data.query = query event_data.identifier = self._GetRowValue(query_hash, row, '_id') event_data.author_identifier = self._GetRowValue( query_hash, row, 'author_id') event_data.username = self._GetRowValue(query_hash, row, 'username') event_data.content = self._GetRowValue(query_hash, row, 'content') event_data.favorited = self._GetRowValue(query_hash, row, 'favorited') event_data.retweeted = self._GetRowValue(query_hash, row, 'retweeted') timestamp = self._GetRowValue(query_hash, row, 'time') if timestamp: date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_CREATION) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a status row from the database. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row resulting from query.
juraj-google-style
def merge_config(config: Mapping[(str, Any)], override_config: Mapping[(str, Any)]=None, override_config_fn: str=None) -> Mapping[(str, Any)]: if override_config_fn: with open(override_config_fn, 'r') as f: override_config = yaml.load(f, Loader=yaml.SafeLoader) if (not override_config): log.info('Missing override_config') return functools.reduce(rec_merge, (config, override_config))
Override config with additional configuration in override_config or override_config_fn Used in script to merge CLI options with Config Args: config: original configuration override_config: new configuration to override/extend current config override_config_fn: new configuration filename as YAML file
codesearchnet
def _cast_to_known_type(name): if name is None: return None return name.rstrip('.')
Canonicalizes a string representing a type if possible. # TODO(dbieber): Support additional canonicalization, such as string/str, and # boolean/bool. Example: _cast_to_known_type("str.") == "str" Args: name: A string representing a type, or None. Returns: A canonicalized version of the type string.
github-repos
def __init__(self, text_encoder_config=None, **kwargs): super(MultiNLIConfig, self).__init__(**kwargs) self.text_encoder_config = ( text_encoder_config or tfds.features.text.TextEncoderConfig())
BuilderConfig for MultiNLI. Args: text_encoder_config: `tfds.features.text.TextEncoderConfig`, configuration for the `tfds.features.text.TextEncoder` used for the features feature. **kwargs: keyword arguments forwarded to super.
juraj-google-style
def __init__(self, value=None): super(ExtensionType, self).__init__(value, Tags.EXTENSION_TYPE)
Construct an ExtensionType object. Args: value (Types): A number representing a Types enumeration value, indicating the type of the extended Object. Optional, defaults to None.
juraj-google-style
def add_data(self, data): if ((self.data_size - self.data_index) < len(data)): return Error.DESTINATION_BUFFER_TOO_SMALL if (self.in_progress is not None): self.in_progress.data += data return Error.NO_ERROR
Add data to the currently in progress entry. Args: data (bytes): The data that we want to add. Returns: int: An error code
codesearchnet
def get_xnp(self, x: Array, *, strict: bool=True): if self.is_jax(x): return self.jnp elif self.is_tf(x): return self.tnp elif self.is_np(x): return np elif self.is_torch(x): return self.torch elif not strict and isinstance(x, _ARRAY_LIKE_TYPES): return np else: raise TypeError(f'Cannot infer the numpy module from array: {type(x).__name__}')
Returns the numpy module associated with the given array. Args: x: Either tf, jax or numpy array. strict: If `False`, default to `np.array` if the array can't be infered ( to support array-like: list, tuple,...) Returns: The numpy module.
github-repos
async def attach_file(self, file_path: str, description: str = None) -> Attachment: with open(file_path, 'rb') as f: return await self._attach(f.read(), description)
add a file as an attachment |methcoro| Warning: |unstable| Args: file_path: path to the file you want to add description: *optional* description for your attachment Returns: Attachment: Raises: ValueError: file_path must not be None APIException
juraj-google-style
def future(self, request, timeout=None, metadata=None, credentials=None): return _utils.wrap_future_call(self._inner.future(request, timeout, metadata, credentials), self._loop, self._executor)
Asynchronously invokes the underlying RPC. Args: request: The request value for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. Returns: An object that is both a Call for the RPC and a Future. In the event of RPC completion, the return Call-Future's result value will be the response message of the RPC. Should the event terminate with non-OK status, the returned Call-Future's exception value will be an RpcError.
codesearchnet
def is60(msg): if allzeros(msg): return False d = hex2bin(data(msg)) if wrongstatus(d, 1, 2, 12): return False if wrongstatus(d, 13, 14, 23): return False if wrongstatus(d, 24, 25, 34): return False if wrongstatus(d, 35, 36, 45): return False if wrongstatus(d, 46, 47, 56): return False ias = ias60(msg) if ias is not None and ias > 500: return False mach = mach60(msg) if mach is not None and mach > 1: return False vr_baro = vr60baro(msg) if vr_baro is not None and abs(vr_baro) > 6000: return False vr_ins = vr60ins(msg) if vr_ins is not None and abs(vr_ins) > 6000: return False return True
Check if a message is likely to be BDS code 6,0 Args: msg (String): 28 bytes hexadecimal message string Returns: bool: True or False
juraj-google-style
def comment(self, text, comment_prefix=' comment = Comment(self._container) if not text.startswith(comment_prefix): text = "{} {}".format(comment_prefix, text) if not text.endswith('\n'): text = "{}{}".format(text, '\n') comment.add_line(text) self._container.structure.insert(self._idx, comment) self._idx += 1 return self
Creates a comment block Args: text (str): content of comment without # comment_prefix (str): character indicating start of comment Returns: self for chaining
juraj-google-style
class TFConvNextLayer(keras.layers.Layer): def __init__(self, config, dim, drop_path=0.0, **kwargs): super().__init__(**kwargs) self.dim = dim self.config = config self.dwconv = keras.layers.Conv2D(filters=dim, kernel_size=7, padding='same', groups=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='dwconv') self.layernorm = keras.layers.LayerNormalization(epsilon=1e-06, name='layernorm') self.pwconv1 = keras.layers.Dense(units=4 * dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='pwconv1') self.act = get_tf_activation(config.hidden_act) self.pwconv2 = keras.layers.Dense(units=dim, kernel_initializer=get_initializer(config.initializer_range), bias_initializer='zeros', name='pwconv2') self.drop_path = TFConvNextDropPath(drop_path, name='drop_path') if drop_path > 0.0 else keras.layers.Activation('linear', name='drop_path') def build(self, input_shape: tf.TensorShape=None): self.layer_scale_parameter = self.add_weight(shape=(self.dim,), initializer=keras.initializers.Constant(value=self.config.layer_scale_init_value), trainable=True, name='layer_scale_parameter') if self.config.layer_scale_init_value > 0 else None if self.built: return self.built = True if getattr(self, 'dwconv', None) is not None: with tf.name_scope(self.dwconv.name): self.dwconv.build([None, None, None, self.dim]) if getattr(self, 'layernorm', None) is not None: with tf.name_scope(self.layernorm.name): self.layernorm.build([None, None, None, self.dim]) if getattr(self, 'pwconv1', None) is not None: with tf.name_scope(self.pwconv1.name): self.pwconv1.build([None, None, self.dim]) if getattr(self, 'pwconv2', None) is not None: with tf.name_scope(self.pwconv2.name): self.pwconv2.build([None, None, 4 * self.dim]) if getattr(self, 'drop_path', None) is not None: with tf.name_scope(self.drop_path.name): self.drop_path.build(None) def call(self, hidden_states, training=False): input = hidden_states x = self.dwconv(hidden_states) x = self.layernorm(x) x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) if self.layer_scale_parameter is not None: x = self.layer_scale_parameter * x x = input + self.drop_path(x, training=training) return x
This corresponds to the `Block` class in the original implementation. There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C, H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back The authors used (2) as they find it slightly faster in PyTorch. Since we already permuted the inputs to follow NHWC ordering, we can just apply the operations straight-away without the permutation. Args: config ([`ConvNextConfig`]): Model configuration class. dim (`int`): Number of input channels. drop_path (`float`): Stochastic depth rate. Default: 0.0.
github-repos
def __str__(self): duplicates = [] def AppendDuplicateItem(item, count): if count == 1: duplicates.append('{0!r}'.format(item)) else: duplicates.append('{0!r} [{1} copies]'.format(item, count)) with self._lock: for item, count in six.iteritems(self._d): AppendDuplicateItem(item, count) for item, count in zip(self._unhashable_items, self._unhashable_counts): AppendDuplicateItem(item, count) return '[{0}]'.format(', '.join(duplicates))
Returns the string representation of the duplicate counts. Items occurring more than once are accompanied by their count. Otherwise the count is implied to be 1. For example, if the internal dict is {2: 1, 3: 4, 'abc': 1}, this returns the string "[{2, 3 [4 copies], 'abc'}]". Returns: String, the counts of duplicate items.
github-repos
def WriteEventMACBGroup(self, event_macb_group): output_values = self._GetOutputValues(event_macb_group[0]) timestamp_descriptions = [event.timestamp_desc for event in event_macb_group] output_values[3] = self._output_mediator.GetMACBRepresentationFromDescriptions(timestamp_descriptions) output_values[6] = '; '.join(timestamp_descriptions) self._WriteOutputValues(output_values)
Writes an event MACB group to the output. Args: event_macb_group (list[EventObject]): event MACB group.
codesearchnet
def __content_type_matches(self, content_type, available_content_types): if (content_type is None): return False if (content_type in available_content_types): return True for available_content_type in available_content_types: if (available_content_type in content_type): return True return False
Check if the given content type matches one of the available content types. Args: content_type (str): The given content type. available_content_types list(str): All the available content types. Returns: bool: True if a match was found, False otherwise.
codesearchnet
def search(self, search_phrase, limit=None): search_phrase = search_phrase.replace('-', '_') query, query_params = self._make_query_from_terms(search_phrase) self._parsed_query = (query, query_params) connection = self.backend.library.database.connection connection.connection.create_function('rank', 1, _make_rank_func((1., .1, 0, 0))) logger.debug('Searching datasets using `{}` query.'.format(query)) results = connection.execute(query, **query_params).fetchall() datasets = defaultdict(DatasetSearchResult) for result in results: vid, score = result datasets[vid] = DatasetSearchResult() datasets[vid].vid = vid datasets[vid].b_score = score logger.debug('Extending datasets with partitions.') for partition in self.backend.partition_index.search(search_phrase): datasets[partition.dataset_vid].p_score += partition.score datasets[partition.dataset_vid].partitions.add(partition) return list(datasets.values())
Finds datasets by search phrase. Args: search_phrase (str or unicode): limit (int, optional): how many results to return. None means without limit. Returns: list of DatasetSearchResult instances.
juraj-google-style
def set_input_embeddings(self, value): main_layer = getattr(self, self.base_model_prefix) if main_layer is None: raise NotImplementedError('The model does not implements the base_model_prefix attribute.') try: main_layer.set_input_embeddings(value) except AttributeError: logger.info('Building the model') self.build_in_name_scope() main_layer.set_input_embeddings(value)
Set model's input embeddings Args: value (`tf.Variable`): The new weights mapping hidden states to vocabulary.
github-repos
def _pad_input(self, inputs): if all(((p == self._conv_op_padding) for p in self._padding)): return inputs assert (self._conv_op_padding == VALID) def pad_amount(kernel_size, rate, padding): 'Pre- and post-padding required for a particular axis before conv op.' effective_kernel_size = int((((kernel_size - 1) * rate) + 1)) if (padding == FULL): return [(effective_kernel_size - 1), (effective_kernel_size - 1)] if (padding == CAUSAL): return [(effective_kernel_size - 1), 0] if (padding == REVERSE_CAUSAL): return [0, (effective_kernel_size - 1)] if (padding == SAME): return [((effective_kernel_size - 1) return [0, 0] paddings = map(pad_amount, self._kernel_shape, self._rate, self._padding) if self._data_format.startswith('NC'): paddings = ([[0, 0], [0, 0]] + list(paddings)) else: paddings = (([[0, 0]] + list(paddings)) + [[0, 0]]) return tf.pad(inputs, paddings)
Pad input in case the desired padding type requires it. VALID and SAME padding types are directly supported by tensorflow convolution ops, so don't require us to pad input ourselves, at least in cases where the same method is used for all dimensions. Other padding types (FULL, CAUSAL, REVERSE_CAUSAL) aren't directly supported by conv ops but can be implemented by using VALID and padding the input appropriately ourselves. If different padding types are used for different dimensions, we use VALID but pad the input ourselves along any dimensions that require other padding types. Args: inputs: A Tensor of shape `data_format` and of type `tf.float16`, `tf.bfloat16` or `tf.float32`. Returns: inputs: The `inputs` argument that has had any required padding added.
codesearchnet
def convert_new_publication_info_to_old(publication_infos): def _needs_a_hidden_pubnote(journal_title, journal_volume): return ( journal_title in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE and journal_volume in _JOURNALS_THAT_NEED_A_HIDDEN_PUBNOTE[journal_title] ) result = [] for publication_info in publication_infos: _publication_info = copy.deepcopy(publication_info) journal_title = _publication_info.get('journal_title') try: journal_title = _JOURNALS_RENAMED_NEW_TO_OLD[journal_title] _publication_info['journal_title'] = journal_title result.append(_publication_info) continue except KeyError: pass journal_volume = _publication_info.get('journal_volume') year = _publication_info.get('year') if (journal_title in _JOURNALS_WITH_YEAR_ADDED_TO_VOLUME and year and journal_volume and len(journal_volume) == 2): two_digit_year = str(year)[2:] _publication_info['journal_volume'] = ''.join([two_digit_year, journal_volume]) result.append(_publication_info) continue if journal_title and journal_volume: match = _RE_TITLE_ENDS_WITH_A_LETTER.match(journal_title) if match and _needs_a_hidden_pubnote(journal_title, journal_volume): _publication_info['journal_title'] = match.group('title') _publication_info['journal_volume'] = journal_volume + match.group('letter') result.append(_publication_info) _publication_info = copy.deepcopy(publication_info) _publication_info['hidden'] = True _publication_info['journal_title'] = match.group('title') _publication_info['journal_volume'] = match.group('letter') + journal_volume elif match and journal_title not in _JOURNALS_ALREADY_ENDING_WITH_A_LETTER: _publication_info['journal_title'] = match.group('title') _publication_info['journal_volume'] = match.group('letter') + journal_volume result.append(_publication_info) return result
Convert back a ``publication_info`` value from the new format to the old. Does the inverse transformation of :func:`convert_old_publication_info_to_new`, to be used whenever we are sending back records from Labs to Legacy. Args: publication_infos: a ``publication_info`` in the new format. Returns: list(dict): a ``publication_info`` in the old format.
juraj-google-style
def _ParseKeysFromFindSpecs(self, parser_mediator, win_registry, find_specs): searcher = dfwinreg_registry_searcher.WinRegistrySearcher(win_registry) for registry_key_path in iter(searcher.Find(find_specs=find_specs)): if parser_mediator.abort: break registry_key = searcher.GetKeyByPath(registry_key_path) self._ParseKey(parser_mediator, registry_key)
Parses the Registry keys from FindSpecs. Args: parser_mediator (ParserMediator): parser mediator. win_registry (dfwinreg.WinRegistryKey): root Windows Registry key. find_specs (dfwinreg.FindSpecs): Keys to search for.
juraj-google-style
def init(deb1, deb2=False): global DEBUG global DEBUGALL DEBUG = deb1 DEBUGALL = deb2
Initialize DEBUG and DEBUGALL. Allows other modules to set DEBUG and DEBUGALL, so their call to dprint or dprintx generate output. Args: deb1 (bool): value of DEBUG to set deb2 (bool): optional - value of DEBUGALL to set, defaults to False.
juraj-google-style
def _instance_transform(fqdn, o, *args, **kwargs): return _package_transform(o, fqdn, start=0, *args, **kwargs)
Applies an instance method with name `fqdn` to `o`. Args: fqdn (str): fully-qualified domain name of the object. o: object to apply instance method to.
juraj-google-style
def _forward_log_det_jacobian(self, x): raise NotImplementedError('forward_log_det_jacobian not implemented.')
Subclass implementation of `forward_log_det_jacobian` public function. In particular, this method differs from the public function, in that it does not take `event_ndims`. Thus, this implements the minimal Jacobian determinant calculation (i.e. over `forward_min_event_ndims`). Args: x: `Tensor`. The input to the "forward_log_det_jacobian" evaluation. Returns: forward_log_det_jacobian: `Tensor`, if this bijector is injective. If not injective, returns the k-tuple containing jacobians for the unique `k` points `(x1, ..., xk)` such that `g(xi) = y`.
github-repos
def get_url_distribution(self, params=None): params = (params or {}) all_responses = {} api_name = 'virustotal-url-distribution' response_chunks = self._request_reports(list(params.keys()), list(params.values()), 'url/distribution') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
Retrieves a live feed with the latest URLs submitted to VT. Args: resources: a dictionary with name and value for optional arguments Returns: A dict with the VT report.
codesearchnet
def _generate_fix_length_rpc_response(response_length, template='{"id": 0, "result": "%s", "error": null, "callback": null}'): result_length = response_length - (len(template) - 2) if result_length < 0: raise ValueError(f'The response_length should be no smaller than template_length + 2. Got response_length {response_length}, template_length {len(template)}.') chars = string.ascii_letters + string.digits return template % ''.join((random.choice(chars) for _ in range(result_length)))
Generates an RPC response string with specified length. This function generates a random string and formats the template with the generated random string to get the response string. This function formats the template with printf style string formatting. Args: response_length: int, the length of the response string to generate. template: str, the template used for generating the response string. Returns: The generated response string. Raises: ValueError: if the specified length is too small to generate a response.
github-repos
def switch_opt(default, shortname, help_msg): return ConfOpt(bool(default), True, shortname, dict(action=internal.Switch), True, help_msg, None)
Define a switchable ConfOpt. This creates a boolean option. If you use it in your CLI, it can be switched on and off by prepending + or - to its name: +opt / -opt. Args: default (bool): the default value of the swith option. shortname (str): short name of the option, no shortname will be used if it is set to None. help_msg (str): short description of the option. Returns: :class:`~loam.manager.ConfOpt`: a configuration option with the given properties.
codesearchnet
def _Aff4Size(aff4_obj): if not isinstance(aff4_obj, aff4.AFF4Stream): message = "Expected an instance of `%s` but received `%s`" raise TypeError(message % (aff4.AFF4Stream, type(aff4_obj))) return int(aff4_obj.Get(aff4_obj.Schema.SIZE))
Retrieves the total size in bytes of an AFF4 object. Args: aff4_obj: An AFF4 stream instance to retrieve size for. Returns: An integer representing number of bytes. Raises: TypeError: If `aff4_obj` is not an instance of AFF4 stream.
juraj-google-style
def plot_spectra_pieces_pdf(ss, aint=10, pdf_filename='pieces.pdf', setup=_default_setup): import f311.explorer as ex xmin, xmax, ymin_, ymax, _, yspan = calc_max_min(ss) ymin = ymin_ if setup.ymin is None else setup.ymin num_pages = int(math.ceil((xmax-xmin)/aint)) a99.format_BLB() pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename) logger = a99.get_python_logger() for h in range(num_pages): fig = plt.figure() lambda0 = xmin+h*aint lambda1 = lambda0+aint logger.info("Printing page {0:d}/{1:d} ([{2:g}, {3:g}])".format(h+1, num_pages, lambda0, lambda1)) for i, s in enumerate(ss): s_cut = ex.cut_spectrum(s, lambda0, lambda1) ax = plt.gca() ax.plot(s_cut.x, s_cut.y, label=s.title) if setup.flag_xlabel and setup.fmt_xlabel: plt.xlabel('Wavelength (interval: [{0:g}, {1:g}])'.format(lambda0, lambda1)) xspan = lambda1-lambda0 ax.set_xlim([lambda0 - xspan * _T, lambda1 + xspan * _T]) ax.set_ylim([ymin - yspan * _T, ymax + yspan * _T]) if setup.flag_legend: leg = plt.legend(loc=0) a99.format_legend(leg) plt.tight_layout() pdf.savefig(fig) plt.close() pdf.close() logger.info("File {0!s} successfully created.".format(pdf_filename))
Plots spectra, overlapped, in small wavelength intervals into a PDF file, one interval per page of the PDF file. Args: ss: list of Spectrum objects aint: wavelength interval for each plot pdf_filename: name of output file setup: PlotSpectrumSetup object **Note** overrides setup.fmt_xlabel; leaves y-labell and title blank
juraj-google-style
def fcoe_networks(self): if (not self.__fcoe_networks): self.__fcoe_networks = FcoeNetworks(self.__connection) return self.__fcoe_networks
Gets the FcoeNetworks API client. Returns: FcoeNetworks:
codesearchnet
def _should_record_summaries_internal(default_state): if _summary_state.writer is None: return constant_op.constant(False) if not callable(_summary_state.is_recording): static_cond = tensor_util.constant_value(_summary_state.is_recording) if static_cond is not None and (not static_cond): return constant_op.constant(False) resolve = lambda x: x() if callable(x) else x cond_distributed = resolve(_summary_state.is_recording_distribution_strategy) cond = resolve(_summary_state.is_recording) if cond is None: cond = default_state return math_ops.logical_and(cond_distributed, cond)
Returns boolean Tensor if summaries should/shouldn't be recorded. Now the summary condition is decided by logical "and" of below conditions: First, summary writer must be set. Given this constraint is met, ctx.summary_recording and ctx.summary_recording_distribution_strategy. The former one is usually set by user, and the latter one is controlled by DistributionStrategy (tf.distribute.ReplicaContext). Args: default_state: can be True or False. The default summary behavior when summary writer is set and the user does not specify ctx.summary_recording and ctx.summary_recording_distribution_strategy is True.
github-repos
def parse(self, filepath, content): try: parsed = json.loads(content) except ValueError: msg = 'No JSON object could be decoded from file: {}' raise SettingsBackendError(msg.format(filepath)) return parsed
Parse opened settings content using JSON parser. Args: filepath (str): Settings object, depends from backend content (str): Settings content from opened file, depends from backend. Raises: boussole.exceptions.SettingsBackendError: If parser can not decode a valid JSON object. Returns: dict: Dictionnary containing parsed setting elements.
codesearchnet
def random_indexes(max_index, subset_size=None, seed=None, rng=None): subst_ = np.arange(0, max_index) rng = ensure_rng((seed if (rng is None) else rng)) rng.shuffle(subst_) if (subset_size is None): subst = subst_ else: subst = subst_[0:min(subset_size, max_index)] return subst
random unrepeated indicies Args: max_index (?): subset_size (None): (default = None) seed (None): (default = None) rng (RandomState): random number generator(default = None) Returns: ?: subst CommandLine: python -m utool.util_numpy --exec-random_indexes Example: >>> # DISABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> max_index = 10 >>> subset_size = None >>> seed = None >>> rng = np.random.RandomState(0) >>> subst = random_indexes(max_index, subset_size, seed, rng) >>> result = ('subst = %s' % (str(subst),)) >>> print(result)
codesearchnet
def build_rectangle_dict(self, north, west, south, east, stroke_color=' rectangle = {'stroke_color': stroke_color, 'stroke_opacity': stroke_opacity, 'stroke_weight': stroke_weight, 'fill_color': fill_color, 'fill_opacity': fill_opacity, 'bounds': {'north': north, 'west': west, 'south': south, 'east': east}} return rectangle
Set a dictionary with the javascript class Rectangle parameters This function sets a default drawing configuration if the user just pass the rectangle bounds, but also allows to set each parameter individually if the user wish so. Args: north (float): The north latitude bound west (float): The west longitude bound south (float): The south latitude bound east (float): The east longitude bound stroke_color (str): Sets the color of the rectangle border using hexadecimal color notation stroke_opacity (float): Sets the opacity of the rectangle border in percentage. If stroke_opacity = 0, the border is transparent stroke_weight (int): Sets the stroke girth in pixels. fill_color (str): Sets the color of the rectangle fill using hexadecimal color notation fill_opacity (float): Sets the opacity of the rectangle fill
codesearchnet
def CheckFile(self, path): print('Checking: {0:s}'.format(path)) definitions_registry = registry.DataTypeDefinitionsRegistry() definitions_reader = reader.YAMLDataTypeDefinitionsFileReader() result = False try: definitions_reader.ReadFile(definitions_registry, path) result = True except KeyError as exception: logging.warning(( 'Unable to register data type definition in file: {0:s} with ' 'error: {1:s}').format(path, exception)) except errors.FormatError as exception: logging.warning( 'Unable to validate file: {0:s} with error: {1:s}'.format( path, exception)) return result
Validates the definition in a file. Args: path (str): path of the definition file. Returns: bool: True if the file contains valid definitions.
juraj-google-style
def CreatePriceTableRow(header, description, final_url, price_in_micros, currency_code, price_unit, final_mobile_url=None): table_row = {'header': header, 'description': description, 'finalUrls': {'urls': [final_url]}, 'price': {'money': {'microAmount': price_in_micros}, 'currencyCode': currency_code}, 'priceUnit': price_unit, 'xsi_type': 'PriceTableRow'} if final_mobile_url: table_row['finalMobileUrls'] = {'urls': [final_mobile_url]} return table_row
Helper function to generate a single row of a price table. Args: header: A str containing the header text of this row. description: A str description of this row in the price table. final_url: A str containing the final URL after all cross domain redirects. price_in_micros: An int indicating the price of the given currency in micros. currency_code: A str indicating the currency code being used. price_unit: A str enum indicating the price unit for this row. final_mobile_url: A str containing the final mobile URL after all cross domain redirects. Returns: A dictionary containing the contents of the generated price table row.
codesearchnet
def is_method_call(func, types=(), methods=()): return ( isinstance(func, astroid.BoundMethod) and isinstance(func.bound, astroid.Instance) and (func.bound.name in types if types else True) and (func.name in methods if methods else True) )
Determines if a BoundMethod node represents a method call. Args: func (astroid.BoundMethod): The BoundMethod AST node to check. types (Optional[String]): Optional sequence of caller type names to restrict check. methods (Optional[String]): Optional sequence of method names to restrict check. Returns: bool: true if the node represents a method call for the given type and method names, False otherwise.
juraj-google-style
def next_in_buffer(self, target_buffer: collections.deque[_T]) -> _T: if bool(target_buffer): return target_buffer.popleft() for item in self._input_stream: which_buffer = self._select_buffer(item) if which_buffer is None: continue if which_buffer is target_buffer: return item if isinstance(which_buffer, collections.deque): which_buffer.append(item) continue if isinstance(which_buffer, tuple): return_item: bool = False for buffer in which_buffer: if buffer is target_buffer: return_item = True else: buffer.append(item) if return_item: return item continue T = TypeVar('T', bound=_T) ExpectedTypes = Union[collections.deque[T], tuple[collections.deque[T], ...], None] raise TypeError(f'`{self._select_buffer}` returned a value of type `{type(which_buffer).__name__}`; expected one of `{ExpectedTypes}`.') raise StopIteration()
Returns the next item in the sub-stream corresponding to `target_buffer`. Args: target_buffer: The queue backing the sub-stream whose next element should be returned. Returns: If `target_buffer` is nonempty, the next element of `target_buffer`. Otherwise, the next element of `self._input_stream` that would have been added to `target_buffer`. Raises: StopIteration: If `target_buffer` and `self._input_stream` are both empty.
github-repos
def get_inner_template(self, language, template_type, indentation, key, val): inner_templates = {'php': {'iterable': ('%s%s => array \n%s( \n%s%s),\n' % (indentation, key, indentation, val, indentation)), 'singular': ('%s%s => %s, \n' % (indentation, key, val))}, 'javascript': {'iterable': ('%s%s : {\n%s\n%s},\n' % (indentation, key, val, indentation)), 'singular': ('%s%s: %s,\n' % (indentation, key, val))}, 'ocaml': {'iterable': ('%s[| (%s, (\n%s\n%s))|] ;;\n' % (indentation, key, val, indentation)), 'singular': ('%s(%s, %s);\n' % (indentation, key, val))}} return inner_templates[language][template_type]
Gets the requested template for the given language. Args: language: string, the language of the template to look for. template_type: string, 'iterable' or 'singular'. An iterable template is needed when the value is an iterable and needs more unpacking, e.g. list, tuple. A singular template is needed when unpacking is complete and the value is singular, e.g. string, int, float. indentation: int, the indentation level. key: multiple types, the array key. val: multiple types, the array values Returns: string, template formatting for arrays by language.
codesearchnet
def multisorted(items, *keys): if (len(keys) == 0): keys = [asc()] for key in reversed(keys): items = sorted(items, key=key.func, reverse=key.reverse) return items
Sort by multiple attributes. Args: items: An iterable series to be sorted. *keys: Key objects which extract key values from the items. The first key will be the most significant, and the last key the least significant. If no key functions are provided, the items will be sorted in ascending natural order. Returns: A list of items sorted according to keys.
codesearchnet
def move(self, fromaccount, toaccount, amount, minconf=1): amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN) return self.rpc.call('move', fromaccount, toaccount, float(str(amount)), minconf)
Send coins between accounts in the same wallet. If the receiving account does not exist, it is automatically created (but not automatically assigned an address). Args: fromaccount (str): origin account toaccount (str): destination account amount (str or Decimal): amount to send (8 decimal points) minconf (int): ensure the account has a valid balance using this many confirmations (default=1) Returns: bool: True if the coins are moved successfully, False otherwise
codesearchnet
def _serialization_helper(self, ray_forking): if ray_forking: actor_handle_id = compute_actor_handle_id( self._ray_actor_handle_id, self._ray_actor_forks) else: actor_handle_id = self._ray_actor_handle_id state = { "actor_id": self._ray_actor_id, "actor_handle_id": actor_handle_id, "module_name": self._ray_module_name, "class_name": self._ray_class_name, "actor_cursor": self._ray_actor_cursor, "actor_method_names": self._ray_actor_method_names, "method_signatures": self._ray_method_signatures, "method_num_return_vals": self._ray_method_num_return_vals, "actor_creation_dummy_object_id": self. _ray_actor_creation_dummy_object_id, "actor_method_cpus": self._ray_actor_method_cpus, "actor_driver_id": self._ray_actor_driver_id, "ray_forking": ray_forking } if ray_forking: self._ray_actor_forks += 1 new_actor_handle_id = actor_handle_id else: new_actor_handle_id = ActorHandleID(_random_string()) self._ray_new_actor_handles.append(new_actor_handle_id) return state
This is defined in order to make pickling work. Args: ray_forking: True if this is being called because Ray is forking the actor handle and false if it is being called by pickling. Returns: A dictionary of the information needed to reconstruct the object.
juraj-google-style
def _format_value(value): literal = repr(value) try: if (parse_value(literal) == value): return literal except SyntaxError: pass return None
Returns `value` in a format parseable by `parse_value`, or `None`. Simply put, This function ensures that when it returns a string value, the following will hold: parse_value(_format_value(value)) == value Args: value: The value to format. Returns: A string representation of `value` when `value` is literally representable, or `None`.
codesearchnet
def _CleanupUnregisteredFlagFromModuleDicts(self, flag_obj): if self._FlagIsRegistered(flag_obj): return for flags_by_module_dict in (self.FlagsByModuleDict(), self.FlagsByModuleIdDict(), self.KeyFlagsByModuleDict()): for flags_in_module in six.itervalues(flags_by_module_dict): while flag_obj in flags_in_module: flags_in_module.remove(flag_obj)
Cleanup unregistered flags from all module -> [flags] dictionaries. If flag_obj is registered under either its long name or short name, it won't be removed from the dictionaries. Args: flag_obj: A flag object.
juraj-google-style
def metar_to_speech(metar: str) -> str: LOGGER.info('getting speech text from METAR: %s', metar) metar_data, metar_units = emiz.avwx.metar.parse_in(metar) speech = emiz.avwx.speech.metar(metar_data, metar_units) speech = str(speech).replace('Altimeter', 'Q N H') LOGGER.debug('resulting speech: %s', speech) return speech
Creates a speakable text from a METAR Args: metar: METAR string to use Returns: speakable METAR for TTS
juraj-google-style
def _parse_octet(self, octet_str): if not self._DECIMAL_DIGITS.issuperset(octet_str): raise ValueError octet_int = int(octet_str, 10) if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1): raise ValueError return octet_int
Convert a decimal octet into an integer. Args: octet_str: A string, the number to parse. Returns: The octet as an integer. Raises: ValueError: if the octet isn't strictly a decimal from [0..255].
juraj-google-style
def ashrae_clear_sky(altitudes, month, sky_clearness=1): MONTHLY_A = [1202, 1187, 1164, 1130, 1106, 1092, 1093, 1107, 1136, 1166, 1190, 1204] MONTHLY_B = [0.141, 0.142, 0.149, 0.164, 0.177, 0.185, 0.186, 0.182, 0.165, 0.152, 0.144, 0.141] dir_norm_rad = [] dif_horiz_rad = [] for (i, alt) in enumerate(altitudes): if (alt > 0): try: dir_norm = (MONTHLY_A[(month - 1)] / math.exp((MONTHLY_B[(month - 1)] / math.sin(math.radians(alt))))) diff_horiz = ((0.17 * dir_norm) * math.sin(math.radians(alt))) dir_norm_rad.append((dir_norm * sky_clearness)) dif_horiz_rad.append((diff_horiz * sky_clearness)) except OverflowError: dir_norm_rad.append(0) dif_horiz_rad.append(0) else: dir_norm_rad.append(0) dif_horiz_rad.append(0) return (dir_norm_rad, dif_horiz_rad)
Calculate solar flux for an original ASHRAE Clear Sky Args: altitudes: A list of solar altitudes in degrees month: An integer (1-12) indicating the month the altitudes belong to sky_clearness: A factor that will be multiplied by the output of the model. This is to help account for locations where clear, dry skies predominate (e.g., at high elevations) or, conversely, where hazy and humid conditions are frequent. See Threlkeld and Jordan (1958) for recommended values. Typical values range from 0.95 to 1.05 and are usually never more than 1.2. Default is set to 1.0. Returns: dir_norm_rad: A list of direct normal radiation values for each of the connected altitudes in W/m2. dif_horiz_rad: A list of diffuse horizontall radiation values for each of the connected altitudes in W/m2.
codesearchnet