code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def _create_interval_filter(interval): def filter_fn(value): if (not isinstance(value, six.integer_types) and not isinstance(value, float)): raise error.HParamsError( 'Cannot use an interval filter for a value of type: %s, Value: %s' % (type(value), value)) return interval.min_value <= value and value <= interval.max_value return filter_fn
Returns a function that checkes whether a number belongs to an interval. Args: interval: A tensorboard.hparams.Interval protobuf describing the interval. Returns: A function taking a number (a float or an object of a type in six.integer_types) that returns True if the number belongs to (the closed) 'interval'.
juraj-google-style
def poly_energies(samples_like, poly): msg = 'poly_energies is deprecated and will be removed in dimod 0.9.0.In the future, use BinaryPolynomial.energies' warnings.warn(msg, DeprecationWarning) return BinaryPolynomial(poly, 'SPIN').energies(samples_like)
Calculates energy of samples from a higher order polynomial. Args: sample (samples_like): A collection of raw samples. `samples_like` is an extension of NumPy's array_like structure. See :func:`.as_samples`. poly (dict): Polynomial as a dict of form {term: bias, ...}, where `term` is a tuple of variables and `bias` the associated bias. Variable labeling/indexing of terms in poly dict must match that of the sample(s). Returns: list/:obj:`numpy.ndarray`: The energy of the sample(s).
codesearchnet
def patch(self, payload, append_to_arrays=True): if not isinstance(payload, dict): raise ValueError("The 'payload' parameter must be provided a dictionary object.") payload = self.__class__.set_id_in_fkeys(payload) if append_to_arrays: for key in payload: val = payload[key] if type(val) == list: val.extend(getattr(self, key)) payload[key] = list(set(val)) payload = self.check_boolean_fields(payload) payload = self.__class__.add_model_name_to_payload(payload) self.debug_logger.debug("PATCHING payload {}".format(json.dumps(payload, indent=4))) res = requests.patch(url=self.record_url, json=payload, headers=HEADERS, verify=False) self.write_response_html_to_file(res,"bob.html") res.raise_for_status() json_res = res.json() self.debug_logger.debug("Success") self.attrs = json_res return json_res
Patches current record and udpates the current instance's 'attrs' attribute to reflect the new changes. Args: payload - hash. This will be JSON-formatted prior to sending the request. Returns: `dict`. The JSON formatted response. Raises: `requests.exceptions.HTTPError`: The status code is not ok.
juraj-google-style
def suggest_charges(self, tolerance=0.1): recommendations = {} for def_type in self.defect_types: test_charges = np.arange( np.min(self.stable_charges[def_type]) - 1, np.max(self.stable_charges[def_type]) + 2) test_charges = [charge for charge in test_charges if charge not in self.finished_charges[def_type]] if len(self.transition_level_map[def_type].keys()): min_tl = min(self.transition_level_map[def_type].keys()) if min_tl < tolerance: max_charge = max(self.transition_level_map[def_type][min_tl]) test_charges = [charge for charge in test_charges if charge < max_charge] max_tl = max(self.transition_level_map[def_type].keys()) if max_tl > (self.band_gap - tolerance): min_charge = min(self.transition_level_map[def_type][max_tl]) test_charges = [charge for charge in test_charges if charge > min_charge] else: test_charges = [charge for charge in test_charges if charge not in self.stable_charges[def_type]] recommendations[def_type] = test_charges return recommendations
Suggest possible charges for defects to computee based on proximity of known transitions from entires to VBM and CBM Args: tolerance (float): tolerance with respect to the VBM and CBM to ` continue to compute new charges
juraj-google-style
def top_k(x, k, sorted=True): if any_symbolic_tensors((x,)): return TopK(k, sorted).symbolic_call(x) return backend.math.top_k(x, k, sorted)
Finds the top-k values and their indices in a tensor. Args: x: Input tensor. k: An integer representing the number of top elements to retrieve. sorted: A boolean indicating whether to sort the output in descending order. Defaults to `True`. Returns: A tuple containing two tensors. The first tensor contains the top-k values, and the second tensor contains the indices of the top-k values in the input tensor. Example: >>> x = keras.ops.convert_to_tensor([5, 2, 7, 1, 9, 3]) >>> values, indices = top_k(x, k=3) >>> print(values) array([9 7 5], shape=(3,), dtype=int32) >>> print(indices) array([4 2 0], shape=(3,), dtype=int32)
github-repos
def make_hex_texture(grid_size=2, resolution=1): (grid_x, grid_y) = np.meshgrid(np.arange(grid_size), np.arange(grid_size)) ROOT_3_OVER_2 = (np.sqrt(3) / 2) ONE_HALF = 0.5 grid_x = ((grid_x * np.sqrt(3)) + ((grid_y % 2) * ROOT_3_OVER_2)).flatten() grid_y = (grid_y.flatten() * 1.5) grid_points = grid_x.shape[0] x_offsets = np.interp(np.arange((4 * resolution)), (np.arange(4) * resolution), [ROOT_3_OVER_2, 0.0, (- ROOT_3_OVER_2), (- ROOT_3_OVER_2)]) y_offsets = np.interp(np.arange((4 * resolution)), (np.arange(4) * resolution), [(- ONE_HALF), (- 1.0), (- ONE_HALF), ONE_HALF]) tmx = (4 * resolution) x_t = (np.tile(grid_x, (tmx, 1)) + x_offsets.reshape((tmx, 1))) y_t = (np.tile(grid_y, (tmx, 1)) + y_offsets.reshape((tmx, 1))) x_t = np.vstack([x_t, np.tile(np.nan, (1, grid_x.size))]) y_t = np.vstack([y_t, np.tile(np.nan, (1, grid_y.size))]) return fit_texture((x_t.flatten('F'), y_t.flatten('F')))
Makes a texture consisting on a grid of hexagons. Args: grid_size (int): the number of hexagons along each dimension of the grid resolution (int): the number of midpoints along the line of each hexagon Returns: A texture.
codesearchnet
def run_ppm_server(pdb_file, outfile, force_rerun=False): if ssbio.utils.force_rerun(outfile=outfile, flag=force_rerun): url = 'http: files = {'userfile': open(pdb_file, 'rb')} r = requests.post(url, files=files) info = r.text with open(outfile, 'w') as f: f.write(info) else: with open(outfile, 'r') as f: info = f.read() t = info.replace('\n', '') tt = t.replace('\r', '') ttt = tt.replace('\t', '') soup = BeautifulSoup(ttt, 'lxml') tables = soup.find_all('table', attrs={'class': 'data'}) info_dict = {} table_index = 0 for t in tables: data_index = 0 for data in t.find_all('tr', attrs={'class': 'row1'}): data_list = list(data.strings) if (table_index == 0): info_dict['Depth/Hydrophobic Thickness'] = data_list[0] info_dict['deltaG_transfer'] = data_list[2] info_dict['Tilt Angle'] = data_list[3] if ((table_index == 1) and (data_index == 0)): info_dict['Embedded_residues_Tilt'] = data_list[0] info_dict['Embedded_residues'] = data_list[1] if ((table_index == 1) and (data_index == 1)): info_dict['Transmembrane_secondary_structure_segments_Tilt'] = data_list[0] info_dict['Transmembrane_secondary_structure_segments'] = data_list[1] if (table_index == 2): info_dict['Output Messages'] = data_list[1] if (table_index == 3): baseurl = 'http: a = data.find('a', href=True) download_url = (baseurl + a['href'].replace('./', '')) info_dict['Output file download link'] = download_url data_index += 1 table_index += 1 return info_dict
Run the PPM server from OPM to predict transmembrane residues. Args: pdb_file (str): Path to PDB file outfile (str): Path to output HTML results file force_rerun (bool): Flag to rerun PPM if HTML results file already exists Returns: dict: Dictionary of information from the PPM run, including a link to download the membrane protein file
codesearchnet
def s_add(self, path, function, method=None, type_cast=None): with self._lock: try: path = '^/{}'.format(path.lstrip('/')) path = '{}/$'.format(path.rstrip('/')) path = path.replace('<', '(?P<') path = path.replace('>', '>[^/]*)') self.add(path, function, method, type_cast) except Exception: pass
Function for registering a simple path. Args: path (str): Path to be matched. function (function): Function to associate with this path. method (str, optional): Usually used to define one of GET, POST, PUT, DELETE. You may use whatever fits your situation though. Defaults to None. type_cast (dict, optional): Mapping between the param name and one of `int`, `float` or `bool`. The value reflected by the provided param name will than be casted to the given type. Defaults to None.
codesearchnet
def parse_GSM(filepath, entry_name=None): if isinstance(filepath, str): with utils.smart_open(filepath) as f: soft = [] has_table = False for line in f: if (('_table_begin' in line) or (not line.startswith(('^', '!', ' has_table = True soft.append(line.rstrip()) else: soft = [] has_table = False for line in filepath: if (('_table_begin' in line) or (not line.startswith(('^', '!', ' has_table = True soft.append(line.rstrip()) if (entry_name is None): sets = [i for i in soft if i.startswith('^')] if (len(sets) > 1): raise Exception('More than one entry in GPL') if (len(sets) == 0): raise NoEntriesException('No entries found. Check the if accession is correct!') entry_name = parse_entry_name(sets[0]) columns = parse_columns(soft) metadata = parse_metadata(soft) if has_table: table_data = parse_table_data(soft) else: table_data = DataFrame() gsm = GSM(name=entry_name, table=table_data, metadata=metadata, columns=columns) return gsm
Parse GSM entry from SOFT file. Args: filepath (:obj:`str` or :obj:`Iterable`): Path to file with 1 GSM entry or list of lines representing GSM from GSE file. entry_name (:obj:`str`, optional): Name of the entry. By default it is inferred from the data. Returns: :obj:`GEOparse.GSM`: A GSM object.
codesearchnet
def _should_catch_error(self, error, errors=()): caught_errors = (errors or (self.session.driver.invalid_element_errors + (ElementNotFound,))) return isinstance(error, caught_errors)
Returns whether to catch the given error. Args: error (Exception): The error to consider. errors (Tuple[Type[Exception], ...], optional): The exception types that should be caught. Defaults to :class:`ElementNotFound` plus any driver-specific invalid element errors. Returns: bool: Whether to catch the given error.
codesearchnet
def has_no_narrow_neurite_section(neuron, neurite_filter, radius_threshold=0.05, considered_section_min_length=50): considered_sections = (sec for sec in iter_sections(neuron, neurite_filter=neurite_filter) if sec.length > considered_section_min_length) def narrow_section(section): return section.points[:, COLS.R].mean() < radius_threshold bad_ids = [(section.id, section.points[1]) for section in considered_sections if narrow_section(section)] return CheckResult(len(bad_ids) == 0, bad_ids)
Check if the neuron has dendrites with narrow sections Arguments: neuron(Neuron): The neuron object to test neurite_filter(callable): filter the neurites by this callable radius_threshold(float): radii below this are considered narro considered_section_min_length(float): sections with length below this are not taken into account Returns: CheckResult with result. result.info contains the narrow section ids and their first point
juraj-google-style
def get_gitlab_project(self): self.server = gitlab.Gitlab(GIT_URL, private_token=GITLAB_TOKEN, api_version=4) project = self.server.projects.get(self.git_short) if (not project): raise GitLabApiError('Could not get Project "{0}" from GitLab API.'.format(self.git_short)) self.project = project return self.project
Get numerical GitLab Project ID. Returns: int: Project ID number. Raises: foremast.exceptions.GitLabApiError: GitLab responded with bad status code.
codesearchnet
def _create_sagemaker_model(self, *args): if self.algorithm_arn: if self._created_model_package_name is None: model_package_name = self._create_sagemaker_model_package() self.sagemaker_session.wait_for_model_package(model_package_name) self._created_model_package_name = model_package_name model_package_name = self._created_model_package_name else: model_package_name = self.model_package_arn container_def = { 'ModelPackageName': model_package_name, } if self.env != {}: container_def['Environment'] = self.env model_package_short_name = model_package_name.split('/')[-1] enable_network_isolation = self.enable_network_isolation() self.name = self.name or utils.name_from_base(model_package_short_name) self.sagemaker_session.create_model(self.name, self.role, container_def, vpc_config=self.vpc_config, enable_network_isolation=enable_network_isolation)
Create a SageMaker Model Entity Args: *args: Arguments coming from the caller. This class does not require any so they are ignored.
juraj-google-style
def removeColumns(self, columnNames): model = self.tableView.model() if model is not None: model.removeDataFrameColumns(columnNames) self.removeColumnButton.setChecked(False)
Removes one or multiple columns from the model. This method is also a slot. Args: columnNames (list): A list of columns, which shall be removed from the model.
juraj-google-style
def query_op_traceback(self, op_name): for op_log_proto in self._graph_tracebacks: for log_entry in op_log_proto.log_entries: if log_entry.name == op_name: return self._code_def_to_traceback(log_entry.code_def, op_log_proto.id_to_string) raise ValueError("Op '%s' does not exist in the tracebacks received by the debug server." % op_name)
Query the traceback of an op. Args: op_name: Name of the op to query. Returns: The traceback of the op, as a list of 3-tuples: (filename, lineno, function_name) Raises: ValueError: If the op cannot be found in the tracebacks received by the server so far.
github-repos
def CopyToDateTimeString(self): if (self._number_of_seconds is None): return None return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:01d}'.format(self.year, self.month, self.day_of_month, self.hours, self.minutes, self.seconds, self.deciseconds)
Copies the RFC2579 date-time to a date and time string. Returns: str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#" or None if the number of seconds is missing.
codesearchnet
def directional_emd(direction, d1, d2): if (direction == Direction.CAUSE): func = hamming_emd elif (direction == Direction.EFFECT): func = effect_emd else: validate.direction(direction) return round(func(d1, d2), config.PRECISION)
Compute the EMD between two repertoires for a given direction. The full EMD computation is used for cause repertoires. A fast analytic solution is used for effect repertoires. Args: direction (Direction): |CAUSE| or |EFFECT|. d1 (np.ndarray): The first repertoire. d2 (np.ndarray): The second repertoire. Returns: float: The EMD between ``d1`` and ``d2``, rounded to |PRECISION|. Raises: ValueError: If ``direction`` is invalid.
codesearchnet
def psnr_and_ssim(output, target): output = tf.cast(output, dtype=tf.int32) target = tf.cast(target, dtype=tf.int32) psnr = tf.image.psnr(output, target, max_val=255) ssim = tf.image.ssim(output, target, max_val=255) return psnr, ssim
Compute the PSNR and SSIM. Args: output: 4-D Tensor, shape=(num_frames, height, width, num_channels) target: 4-D Tensor, shape=(num_frames, height, width, num_channels) Returns: psnr: 1-D Tensor, shape=(num_frames,) ssim: 1-D Tensor, shape=(num_frames,)
juraj-google-style
def get_parameter_dict(self, include_frozen=False): return OrderedDict(zip(self.get_parameter_names(include_frozen=include_frozen), self.get_parameter_vector(include_frozen=include_frozen)))
Get an ordered dictionary of the parameters Args: include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
codesearchnet
def explicit_pass(msg, extras=None): raise signals.TestPass(msg, extras)
Explicitly pass a test. This will pass the test explicitly regardless of any other error happened in the test body. E.g. even if errors have been recorded with `expects`, the test will still be marked pass if this is called. A test without uncaught exception will pass implicitly so this should be used scarcely. Args: msg: A string explaining the details of the passed test. extras: An optional field for extra information to be included in test result. Raises: signals.TestPass: Mark a test as passed.
github-repos
def constant_to_value(self, pyval, subst=None, node=None): node = node or self.ctx.root_node if pyval.__class__ is tuple: type_key = tuple((type(v) for v in pyval)) else: type_key = type(pyval) key = ('constant', pyval, type_key) if key in self._convert_cache: if self._convert_cache[key] is None: self._convert_cache[key] = self.unsolvable if not self.ctx.recursion_allowed: name = getattr(pyval, 'name', None) or pyval.__class__.__name__ self.ctx.errorlog.recursion_error(self.ctx.vm.frames, name) return self._convert_cache[key] else: self._convert_cache[key] = None need_node = [False] def get_node(): need_node[0] = True return node recursive = isinstance(pyval, pytd.LateType) and pyval.recursive if recursive: context = self.ctx.allow_recursive_convert() else: context = contextlib.nullcontext() with context: try: value = self._constant_to_value(pyval, subst, get_node) except NotImplementedError: del self._convert_cache[key] raise if not need_node[0] or node is self.ctx.root_node: if recursive: annot = abstract.LateAnnotation(pyval.name, self.ctx.vm.frames, self.ctx) annot.set_type(value) value = annot self._convert_cache[key] = value return value
Like constant_to_var, but convert to an abstract.BaseValue. This also memoizes the results. We don't memoize on name, as builtin types like str or list might be reinitialized under different names (e.g. "param 1"), but we want the canonical name and type. We *do* memoize on the type as well, to make sure that e.g. "1.0" and "1" get converted to different constants. Memoization is an optimization, but an important one - mapping constants like "None" to the same AbstractValue greatly simplifies the cfg structures we're building. Args: pyval: The constant to convert. subst: The current type parameters. node: The current CFG node. (For instances) Returns: The converted constant. (Instance of BaseValue)
github-repos
def get_atten(self, idx=0): if not self.is_open: raise attenuator.Error('Connection to attenuator at %s is not open!' % self._telnet_client.host) if idx + 1 > self.path_count or idx < 0: raise IndexError('Attenuator index out of range!', self.path_count, idx) telnet_cmd = ':ATT?' if self.path_count == 1 else 'CHAN:%s:ATT?' % (idx + 1) atten_val_str = self._telnet_client.cmd(telnet_cmd) atten_val = float(atten_val_str) return atten_val
This function returns the current attenuation from an attenuator at a given index in the instrument. Args: idx: This zero-based index is the identifier for a particular attenuator in an instrument. Raises: Error: The underlying telnet connection to the instrument is not open. Returns: A float that is the current attenuation value.
github-repos
def set_room_name(self, room_id, name, timestamp=None): body = { "name": name } return self.send_state_event(room_id, "m.room.name", body, timestamp=timestamp)
Perform PUT /rooms/$room_id/state/m.room.name Args: room_id (str): The room ID name (str): The new room name timestamp (int): Set origin_server_ts (For application services only)
juraj-google-style
def get_local_config_filepath( config_filepath, force_local=False, ): local_config_name = path.basename(config_filepath).split('.')[0] + '_local.cfg' local_config_filepath = path.join(path.split(config_filepath)[0], local_config_name) real_config_filepath = '' if path.isfile(local_config_filepath) or force_local: real_config_filepath = local_config_filepath else: real_config_filepath = config_filepath return real_config_filepath
helper for finding local filepath for config Args: config_filepath (str): path to local config abspath > relpath force_local (bool): force return of _local.cfg version Returns: str: Path to local config, or global if path DNE
juraj-google-style
def split(self, bitindex): if (bitindex < 0): raise ValueError('bitindex must be larger or equal to 0.') if (bitindex == 0): return (None, self) lastend = 0 split_promise = False for (splitindex, p) in enumerate(self._promises): if (bitindex in range(lastend, p._bitstart)): split_promise = False break if (bitindex in range(p._bitstart, p._bitend)): if ((bitindex - p._bitstart) == 0): split_promise = False else: split_promise = True break lastend = p._bitend else: raise Exception('Should be impossible') processed_left = TDOPromiseCollection(self._chain) processed_right = TDOPromiseCollection(self._chain) if split_promise: (left, right) = p.split((bitindex - p._bitstart)) for i in range(splitindex): processed_left.add(self._promises[i], 0) processed_left.add(left, 0) processed_right.add(right, 0) for tmpprim in self._promises[(splitindex + 1):]: processed_right.add(tmpprim, (- bitindex)) return (processed_left, processed_right) else: for i in range(splitindex): processed_left.add(self._promises[i], 0) for i in range(splitindex, len(self._promises)): processed_right.add(self._promises[i], (- bitindex)) return (processed_left, processed_right)
Split a promise into two promises. A tail bit, and the 'rest'. Same operation as the one on TDOPromise, except this works with a collection of promises and splits the appropriate one. Returns: The 'Rest' and the 'Tail'. The 'Rest' is TDOPromiseCollection containing the first chunk of the original TDOPromiseCollection. The 'Tail' is a single bit sub promise for the final bit in the operation If the 'Rest' would have a length of 0, None is returned
codesearchnet
def add_scales_bar(img, bbox): tc = TileCoordinate(bbox.min.zoom, bbox.min.x, bbox.min.y) meters_per_pixel = tc.resolution() one_km_bar = int(1000 * (1 / meters_per_pixel)) col_black = (0, 0, 0) line_start = (100, img.size[1] - 100) line_end = (line_start[0] + one_km_bar, line_start[1]) whiskers_left = [line_start[0], line_start[1] - 15, line_start[0], line_start[1] + 15] whiskers_right = [line_end[0], line_end[1] - 15, line_end[0], line_end[1] + 15] draw = ImageDraw.Draw(img) draw.line([line_start, line_end], fill=col_black, width=5) draw.line(whiskers_left, fill=col_black, width=2) draw.line(whiskers_right, fill=col_black, width=2) draw.text((line_start[0] + 10, line_start[1] + 10), fill=col_black, text="1 km") del draw
Add a scales bar to the map. Calculates the resolution at the current latitude and inserts the corresponding scales bar on the map. Args: img (Image): Image object to which the scales bar will be added. bbox (TileBB): boundaries of the map
juraj-google-style
def _ParsePage(self, parser_mediator, file_offset, page_data): page_header_map = self._GetDataTypeMap('binarycookies_page_header') try: page_header = self._ReadStructureFromByteStream( page_data, file_offset, page_header_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map page header data at offset: 0x{0:08x} with error: ' '{1!s}').format(file_offset, exception)) for record_offset in page_header.offsets: if parser_mediator.abort: break self._ParseRecord(parser_mediator, page_data, record_offset)
Parses a page. Args: parser_mediator (ParserMediator): parser mediator. file_offset (int): offset of the data relative from the start of the file-like object. page_data (bytes): page data. Raises: ParseError: when the page cannot be parsed.
juraj-google-style
def ReadArtifactDefinitionValues(self, artifact_definition_values): if (not artifact_definition_values): raise errors.FormatError('Missing artifact definition values.') different_keys = (set(artifact_definition_values) - definitions.TOP_LEVEL_KEYS) if different_keys: different_keys = ', '.join(different_keys) raise errors.FormatError('Undefined keys: {0:s}'.format(different_keys)) name = artifact_definition_values.get('name', None) if (not name): raise errors.FormatError('Invalid artifact definition missing name.') description = artifact_definition_values.get('doc', None) if (not description): raise errors.FormatError('Invalid artifact definition: {0:s} missing description.'.format(name)) artifact_definition = artifact.ArtifactDefinition(name, description=description) if artifact_definition_values.get('collectors', []): raise errors.FormatError('Invalid artifact definition: {0:s} still uses collectors.'.format(name)) urls = artifact_definition_values.get('urls', []) if (not isinstance(urls, list)): raise errors.FormatError('Invalid artifact definition: {0:s} urls is not a list.'.format(name)) artifact_definition.conditions = artifact_definition_values.get('conditions', []) artifact_definition.provides = artifact_definition_values.get('provides', []) self._ReadLabels(artifact_definition_values, artifact_definition, name) self._ReadSupportedOS(artifact_definition_values, artifact_definition, name) artifact_definition.urls = urls self._ReadSources(artifact_definition_values, artifact_definition, name) return artifact_definition
Reads an artifact definition from a dictionary. Args: artifact_definition_values (dict[str, object]): artifact definition values. Returns: ArtifactDefinition: an artifact definition. Raises: FormatError: if the format of the artifact definition is not set or incorrect.
codesearchnet
def get_operation_mtf_dimension_names(self, operation_name): mtf_dimension_names = set() for tensor_name in self.get_operation_input_names(operation_name): mtf_dimension_names.update(self.get_tensor_mtf_dimension_names( tensor_name)) for tensor_name in self.get_operation_output_names(operation_name): mtf_dimension_names.update(self.get_tensor_mtf_dimension_names( tensor_name)) return mtf_dimension_names
The Mesh TensorFlow dimensions associated with an operation. Args: operation_name: a string, name of an operation in the graph. Returns: a set(string), the names of Mesh TensorFlow dimensions.
juraj-google-style
def forward(self, grid, interpolate_pos_encoding: bool=False): batch_size, num_frames, height, width, num_channels = grid.shape grid = grid.mean(1) grid = self.add_2d_positional_embeddings(grid, interpolate_pos_encoding=interpolate_pos_encoding) visual_tokens = grid.view(batch_size, -1, num_channels) visual_tokens_shape = visual_tokens.shape[:-1] device = visual_tokens.device token_type_ids = torch.zeros(visual_tokens_shape, dtype=torch.long, device=device) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = visual_tokens + token_type_embeddings embeddings = self.layer_norm(embeddings) embeddings = self.dropout(embeddings) return embeddings
Args: grid: Array of shape (batch_size, num_frames, height, width, num_channels). It contains processed frames extracted from videos, and is generated by Tvp image preprocessor. Note, num_frames can be 1 interpolate_pos_encoding: (bool, *optional*, defaults to `False`): Whether to interpolate the pre-trained position encodings. Returns: embeddings: The embedding of grid with size (batch_size, height*width, num_channels)
github-repos
def _event_size(event_shape, name=None): with tf.compat.v1.name_scope(name, 'event_size', [event_shape]): event_shape = tf.convert_to_tensor(value=event_shape, dtype=tf.int32, name='event_shape') event_shape_const = tf.get_static_value(event_shape) if (event_shape_const is not None): return np.prod(event_shape_const) else: return tf.reduce_prod(input_tensor=event_shape)
Computes the number of elements in a tensor with shape `event_shape`. Args: event_shape: A tensor shape. name: The name to use for the tensor op to compute the number of elements (if such an op needs to be created). Returns: event_size: The number of elements in `tensor_shape`. Returns a numpy int when the number of elements can be computed immediately. Otherwise, returns a scalar tensor.
codesearchnet
def _describe_bitmask( bits: int, table: Dict[Any, str], default: str = "0" ) -> str: result = [] for bit, name in table.items(): if bit & bits: result.append(name) if not result: return default return "|".join(result)
Returns a bitmask in human readable form. This is a private function, used internally. Args: bits (int): The bitmask to be represented. table (Dict[Any,str]): A reverse lookup table. default (Any): A default return value when bits is 0. Returns: str: A printable version of the bits variable.
juraj-google-style
def download_from_url(path, url): filename = url.split('/')[(- 1)] found_file = find_file(path, filename, max_depth=0) if (found_file is None): filename = os.path.join(path, filename) tf.logging.info(('Downloading from %s to %s.' % (url, filename))) inprogress_filepath = (filename + '.incomplete') (inprogress_filepath, _) = urllib.request.urlretrieve(url, inprogress_filepath, reporthook=download_report_hook) print() tf.gfile.Rename(inprogress_filepath, filename) return filename else: tf.logging.info(('Already downloaded: %s (at %s).' % (url, found_file))) return found_file
Download content from a url. Args: path: string directory where file will be downloaded url: string url Returns: Full path to downloaded file
codesearchnet
def from_fortran_src(cls, fortran_src: str, dir: str='.'): import tempfile fp = tempfile.NamedTemporaryFile('w+t', delete=False, dir=dir) fp.writelines(fortran_src) fp.close() G = cls.from_fortran_file(fp.name, dir) os.remove(fp.name) return G
Create a GroundedFunctionNetwork instance from a string with raw Fortran code. Args: fortran_src: A string with Fortran source code. dir: (Optional) - the directory in which the temporary Fortran file will be created (make sure you have write permission!) Defaults to the current directory. Returns: A GroundedFunctionNetwork instance
codesearchnet
def read(self, offset, size): self._file_object.seek(offset, os.SEEK_SET) return self._file_object.read(size)
Reads a byte string from the image object at the specified offset. Args: offset (int): offset where to start reading. size (int): number of bytes to read. Returns: bytes: data read.
codesearchnet
def tz_convert(dt, to_tz, from_tz=None) -> str: logger = logs.get_logger(tz_convert, level='info') (f_tz, t_tz) = (get_tz(from_tz), get_tz(to_tz)) from_dt = pd.Timestamp(str(dt), tz=f_tz) logger.debug(f'converting {str(from_dt)} from {f_tz} to {t_tz} ...') return str(pd.Timestamp(str(from_dt), tz=t_tz))
Convert to tz Args: dt: date time to_tz: to tz from_tz: from tz - will be ignored if tz from dt is given Returns: str: date & time Examples: >>> dt_1 = pd.Timestamp('2018-09-10 16:00', tz='Asia/Hong_Kong') >>> tz_convert(dt_1, to_tz='NY') '2018-09-10 04:00:00-04:00' >>> dt_2 = pd.Timestamp('2018-01-10 16:00') >>> tz_convert(dt_2, to_tz='HK', from_tz='NY') '2018-01-11 05:00:00+08:00' >>> dt_3 = '2018-09-10 15:00' >>> tz_convert(dt_3, to_tz='NY', from_tz='JP') '2018-09-10 02:00:00-04:00'
codesearchnet
def _dqdv_split_frames(cell, tidy=False, **kwargs): (charge_dfs, cycles, minimum_v, maximum_v) = _collect_capacity_curves(cell, direction='charge') ica_charge_dfs = _make_ica_charge_curves(charge_dfs, cycles, minimum_v, maximum_v, **kwargs) ica_charge_df = pd.concat(ica_charge_dfs, axis=1, keys=[k.name for k in ica_charge_dfs]) (dcharge_dfs, cycles, minimum_v, maximum_v) = _collect_capacity_curves(cell, direction='discharge') ica_dcharge_dfs = _make_ica_charge_curves(dcharge_dfs, cycles, minimum_v, maximum_v, **kwargs) ica_discharge_df = pd.concat(ica_dcharge_dfs, axis=1, keys=[k.name for k in ica_dcharge_dfs]) ica_charge_df.columns.names = ['cycle', 'value'] ica_discharge_df.columns.names = ['cycle', 'value'] if tidy: ica_charge_df = ica_charge_df.melt('voltage', var_name='cycle', value_name='dq', col_level=0) ica_discharge_df = ica_discharge_df.melt('voltage', var_name='cycle', value_name='dq', col_level=0) return (ica_charge_df, ica_discharge_df)
Returns dqdv data as pandas.DataFrames for all cycles. Args: cell (CellpyData-object). tidy (bool): return in wide format if False (default), long (tidy) format if True. Returns: (charge_ica_frame, discharge_ica_frame) where the frames are pandas.DataFrames where the first column is voltage ('v') and the following columns are the incremental capcaity for each cycle (multi-indexed, where cycle number is on the top level). Example: >>> from cellpy.utils import ica >>> charge_ica_df, dcharge_ica_df = ica.ica_frames(my_cell) >>> charge_ica_df.plot(x=("voltage", "v"))
codesearchnet
def load_intent(self, name, file_name, reload_cache=False): self.intents.load(name, file_name, reload_cache) with open(file_name) as f: self.padaos.add_intent(name, f.read().split('\n')) self.must_train = True
Loads an intent, optionally checking the cache first Args: name (str): The associated name of the intent file_name (str): The location of the intent file reload_cache (bool): Whether to refresh all of cache
codesearchnet
def copy(self, name=None): cpy = copy.copy(self) if name: cpy.name = name return cpy
shallow copy of the instruction. Args: name (str): name to be given to the copied circuit, if None then the name stays the same Returns: Instruction: a shallow copy of the current instruction, with the name updated if it was provided
juraj-google-style
def add_connection(self, connection_id, internal_id, context): if self._get_connection_state(connection_id) != self.Disconnected: return if self._get_connection_state(internal_id) != self.Disconnected: return conn_data = { 'state': self.Idle, 'microstate': None, 'connection_id': connection_id, 'internal_id': internal_id, 'context': context } self._connections[connection_id] = conn_data self._int_connections[internal_id] = conn_data
Add an already created connection. Used to register devices connected before starting the device adapter. Args: connection_id (int): The external connection id internal_id (string): An internal identifier for the connection context (dict): Additional information to associate with this context
juraj-google-style
def lint(exclude, skip_untracked, commit_only): exclude = list(exclude) + conf.get('lint.exclude', []) runner = LintRunner(exclude, skip_untracked, commit_only) if not runner.run(): exit(1)
Lint python files. Args: exclude (list[str]): A list of glob string patterns to test against. If the file/path matches any of those patters, it will be filtered out. skip_untracked (bool): If set to **True** it will skip all files not tracked by git. commit_only (bool): Only lint files that are staged for commit.
juraj-google-style
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(GetRequestPayload, self).read( input_stream, kmip_version=kmip_version ) local_stream = utils.BytearrayStream(input_stream.read(self.length)) if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream): self._unique_identifier = primitives.TextString( tag=enums.Tags.UNIQUE_IDENTIFIER ) self._unique_identifier.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.KEY_FORMAT_TYPE, local_stream): self._key_format_type = primitives.Enumeration( enum=enums.KeyFormatType, tag=enums.Tags.KEY_FORMAT_TYPE ) self._key_format_type.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next(enums.Tags.KEY_COMPRESSION_TYPE, local_stream): self._key_compression_type = primitives.Enumeration( enum=enums.KeyCompressionType, tag=enums.Tags.KEY_COMPRESSION_TYPE ) self._key_compression_type.read( local_stream, kmip_version=kmip_version ) if self.is_tag_next( enums.Tags.KEY_WRAPPING_SPECIFICATION, local_stream ): self._key_wrapping_specification = \ objects.KeyWrappingSpecification() self._key_wrapping_specification.read( local_stream, kmip_version=kmip_version ) self.is_oversized(local_stream)
Read the data encoding the Get request payload and decode it into its constituent parts. Args: input_stream (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def modify_ack_deadline(self, items): ack_ids = [item.ack_id for item in items] seconds = [item.seconds for item in items] request = types.StreamingPullRequest( modify_deadline_ack_ids=ack_ids, modify_deadline_seconds=seconds ) self._manager.send(request)
Modify the ack deadline for the given messages. Args: items(Sequence[ModAckRequest]): The items to modify.
juraj-google-style
def run_graph_optimizations(graph_def, input_arrays, output_arrays, config, graph=None): meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph) signature = _meta_graph_pb2.SignatureDef() for array in input_arrays: signature.inputs[array.name].name = array.name signature.inputs[array.name].dtype = array.dtype.as_datatype_enum signature.inputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto()) for array in output_arrays: signature.outputs[array.name].name = array.name signature.outputs[array.name].dtype = array.dtype.as_datatype_enum signature.outputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto()) meta_graph.signature_def['not_used_key'].CopyFrom(signature) fetch_collection = _meta_graph_pb2.CollectionDef() for array in input_arrays + output_arrays: fetch_collection.node_list.value.append(array.name) meta_graph.collection_def['train_op'].CopyFrom(fetch_collection) return tf_optimizer.OptimizeGraph(config, meta_graph)
Apply standard TensorFlow optimizations to the graph_def. Args: graph_def: Frozen GraphDef to be optimized. input_arrays: List of arrays that are considered inputs of the graph. output_arrays: List of arrays that are considered outputs of the graph. config: tf.ConfigProto. graph: TensorFlow Graph. Required when Eager mode is enabled. (default None) Returns: A new, optimized GraphDef.
github-repos
def add_to_buffer(self, content, read_position): self.read_position = read_position if self.read_buffer is None: self.read_buffer = content else: self.read_buffer = content + self.read_buffer
Add additional bytes content as read from the read_position. Args: content (bytes): data to be added to buffer working BufferWorkSpac. read_position (int): where in the file pointer the data was read from.
juraj-google-style
def run_and_gather_logs(name, test_name, test_args, benchmark_type, skip_processing_logs=False): if not (test_name and test_name.startswith(' raise ValueError('Expected test_name parameter with a unique test, e.g.: --test_name= test_executable = test_name.rstrip().strip('/').replace(':', '/') if gfile.Exists(os.path.join('bazel-bin', test_executable)): test_executable = os.path.join('bazel-bin', test_executable) else: test_executable = os.path.join('.', test_executable) test_adjusted_name = name gpu_config = gpu_info_lib.gather_gpu_devices() if gpu_config: gpu_name = gpu_config[0].model gpu_short_name_match = re.search('(Tesla|NVIDIA) (K40|K80|P100|V100|A100)', gpu_name) if gpu_short_name_match: gpu_short_name = gpu_short_name_match.group(0) test_adjusted_name = name + '|' + gpu_short_name.replace(' ', '_') temp_directory = tempfile.mkdtemp(prefix='run_and_gather_logs') mangled_test_name = test_adjusted_name.strip('/').replace('|', '_').replace('/', '_').replace(':', '_') test_file_prefix = os.path.join(temp_directory, mangled_test_name) test_file_prefix = '%s.' % test_file_prefix try: if not gfile.Exists(test_executable): test_executable_py3 = test_executable + '.python3' if not gfile.Exists(test_executable_py3): raise ValueError('Executable does not exist: %s' % test_executable) test_executable = test_executable_py3 test_args = shlex.split(test_args) os.environ['TEST_REPORT_FILE_PREFIX'] = test_file_prefix start_time = time.time() subprocess.check_call([test_executable] + test_args) if skip_processing_logs: return (None, test_adjusted_name) run_time = time.time() - start_time log_files = gfile.Glob('{}*'.format(test_file_prefix)) if not log_files: raise MissingLogsError('No log files found at %s.' % test_file_prefix) return (process_test_logs(test_adjusted_name, test_name=test_name, test_args=test_args, benchmark_type=benchmark_type, start_time=int(start_time), run_time=run_time, log_files=log_files), test_adjusted_name) finally: try: gfile.DeleteRecursively(temp_directory) except OSError: pass
Run the bazel test given by test_name. Gather and return the logs. Args: name: Benchmark target identifier. test_name: A unique bazel target, e.g. "//path/to:test" test_args: A string containing all arguments to run the target with. benchmark_type: A string representing the BenchmarkType enum; the benchmark type for this target. skip_processing_logs: Whether to skip processing test results from log files. Returns: A tuple (test_results, mangled_test_name), where test_results: A test_log_pb2.TestResults proto, or None if log processing is skipped. test_adjusted_name: Unique benchmark name that consists of benchmark name optionally followed by GPU type. Raises: ValueError: If the test_name is not a valid target. subprocess.CalledProcessError: If the target itself fails. IOError: If there are problems gathering test log output from the test. MissingLogsError: If we couldn't find benchmark logs.
github-repos
def Parse(text, message): if (not isinstance(text, six.text_type)): text = text.decode('utf-8') try: if (sys.version_info < (2, 7)): js = json.loads(text) else: js = json.loads(text, object_pairs_hook=_DuplicateChecker) except ValueError as e: raise ParseError('Failed to load JSON: {0}.'.format(str(e))) _ConvertMessage(js, message) return message
Parses a JSON representation of a protocol message into a message. Args: text: Message JSON representation. message: A protocol beffer message to merge into. Returns: The same message passed as argument. Raises:: ParseError: On JSON parsing problems.
codesearchnet
def unregister(self, alias): if alias not in self._service_objects: raise Error(self._device, 'No service is registered with alias "%s".' % alias) service_obj = self._service_objects.pop(alias) if service_obj.is_alive: with expects.expect_no_raises('Failed to stop service instance "%s".' % alias): service_obj.stop()
Unregisters a service instance. Stops a service and removes it from the manager. Args: alias: string, the alias of the service instance to unregister.
github-repos
def _get_config_files(): config_paths = [] if os.environ.get('FEDMSG_CONFIG'): config_location = os.environ['FEDMSG_CONFIG'] else: config_location = '/etc/fedmsg.d' if os.path.isfile(config_location): config_paths.append(config_location) elif os.path.isdir(config_location): possible_config_files = [os.path.join(config_location, p) for p in os.listdir(config_location) if p.endswith('.py')] for p in possible_config_files: if os.path.isfile(p): config_paths.append(p) if (not config_paths): _log.info('No configuration files found in %s', config_location) return config_paths
Load the list of file paths for fedmsg configuration files. Returns: list: List of files containing fedmsg configuration.
codesearchnet
def sphere(radius=0.5, sectors=32, rings=16) -> VAO: R = (1.0 / (rings - 1)) S = (1.0 / (sectors - 1)) vertices = ([0] * ((rings * sectors) * 3)) normals = ([0] * ((rings * sectors) * 3)) uvs = ([0] * ((rings * sectors) * 2)) (v, n, t) = (0, 0, 0) for r in range(rings): for s in range(sectors): y = math.sin((((- math.pi) / 2) + ((math.pi * r) * R))) x = (math.cos((((2 * math.pi) * s) * S)) * math.sin(((math.pi * r) * R))) z = (math.sin((((2 * math.pi) * s) * S)) * math.sin(((math.pi * r) * R))) uvs[t] = (s * S) uvs[(t + 1)] = (r * R) vertices[v] = (x * radius) vertices[(v + 1)] = (y * radius) vertices[(v + 2)] = (z * radius) normals[n] = x normals[(n + 1)] = y normals[(n + 2)] = z t += 2 v += 3 n += 3 indices = ((([0] * rings) * sectors) * 6) i = 0 for r in range((rings - 1)): for s in range((sectors - 1)): indices[i] = ((r * sectors) + s) indices[(i + 1)] = (((r + 1) * sectors) + (s + 1)) indices[(i + 2)] = ((r * sectors) + (s + 1)) indices[(i + 3)] = ((r * sectors) + s) indices[(i + 4)] = (((r + 1) * sectors) + s) indices[(i + 5)] = (((r + 1) * sectors) + (s + 1)) i += 6 vbo_vertices = numpy.array(vertices, dtype=numpy.float32) vbo_normals = numpy.array(normals, dtype=numpy.float32) vbo_uvs = numpy.array(uvs, dtype=numpy.float32) vbo_elements = numpy.array(indices, dtype=numpy.uint32) vao = VAO('sphere', mode=mlg.TRIANGLES) vao.buffer(vbo_vertices, '3f', ['in_position']) vao.buffer(vbo_normals, '3f', ['in_normal']) vao.buffer(vbo_uvs, '2f', ['in_uv']) vao.index_buffer(vbo_elements, index_element_size=4) return vao
Creates a sphere. Keyword Args: radius (float): Radius or the sphere rings (int): number or horizontal rings sectors (int): number of vertical segments Returns: A :py:class:`demosys.opengl.vao.VAO` instance
codesearchnet
def cmap_from_color(color, dark=False): if dark: return sns.dark_palette(color, as_cmap=True) else: return sns.light_palette(color, as_cmap=True)
Generates a matplotlib colormap from a single color. Colormap will be built, by default, from white to ``color``. Args: color: Can be one of several things: 1. Hex code 2. HTML color name 3. RGB tuple dark (bool): If ``True``, colormap will be built from ``color`` to black. Default is ``False``, which builds a colormap from white to ``color``. Returns: colormap: A matplotlib colormap
codesearchnet
def _getFuncArgs(func): r code = func.func_code Defaults = func.func_defaults nargs = code.co_argcount ArgNames = code.co_varnames[:nargs] Args = OrderedDict() argCount = len(ArgNames) defCount = len(Defaults) if Defaults else 0 diff = argCount - defCount for i in range(0, diff): Args[ArgNames[i]] = {} for i in range(diff, argCount): Args[ArgNames[i]] = {'default': Defaults[i - diff]} return Args
r"""Gives the details on the args of the given func. Args: func (function): The function to get details on.
juraj-google-style
def _extract_response_xml(self, domain, response): attributes = {} alexa_keys = {'POPULARITY': 'TEXT', 'REACH': 'RANK', 'RANK': 'DELTA'} try: xml_root = ET.fromstring(response._content) for xml_child in xml_root.findall('SD if ((xml_child.tag in alexa_keys) and (alexa_keys[xml_child.tag] in xml_child.attrib)): attributes[xml_child.tag.lower()] = xml_child.attrib[alexa_keys[xml_child.tag]] except ParseError: pass attributes['domain'] = domain return {'attributes': attributes}
Extract XML content of an HTTP response into dictionary format. Args: response: HTML Response objects Returns: A dictionary: {alexa-ranking key : alexa-ranking value}.
codesearchnet
def functional_pulse(func): @functools.wraps(func) def to_pulse(duration, *args, name=None, **kwargs): if isinstance(duration, int) and duration > 0: samples = func(duration, *args, **kwargs) samples = np.asarray(samples, dtype=np.complex128) return SamplePulse(samples=samples, name=name) raise PulseError('The first argument must be an integer value representing duration.') return to_pulse
A decorator for generating SamplePulse from python callable. Args: func (callable): A function describing pulse envelope. Raises: PulseError: when invalid function is specified.
juraj-google-style
def get_rebind_dict(rebinder: Callable, target: Symbolic) -> Dict[str, Any]: signature = pg_typing.signature(rebinder, auto_typing=False, auto_doc=False) if len(signature.args) == 2: select_fn = lambda k, v, p: rebinder(k, v) elif len(signature.args) == 3: select_fn = rebinder else: raise TypeError(f"Rebinder function '{signature.id}' should accept 2 or 3 arguments (key_path, value, [parent]). Encountered: {signature.args}.") path_value_pairs = dict() def _fill_rebind_dict(path, value, parent): new_value = select_fn(path, value, parent) if new_value is not value: path_value_pairs[str(path)] = new_value return TraverseAction.CONTINUE return TraverseAction.ENTER traverse(target, _fill_rebind_dict) return path_value_pairs
Generate rebind dict using rebinder on target value. Args: rebinder: A callable object with signature: (key_path: utils.KeyPath, value: Any) -> Any or (key_path: utils.KeyPath, value: Any, parent: Any) -> Any. If rebinder returns the same value from input, the value is considered unchanged. Otherwise it will be put into the returning rebind dict. See `Symbolic.rebind` for more details. target: Upon which value the rebind dict is computed. Returns: An ordered dict of key path string to updated value.
github-repos
def attribute_label(self, attribute_id, label, action='GET', params=None): if params is None: params = {} if not self.can_update(): self._tcex.handle_error(910, [self.type]) if action == 'GET': return self.tc_requests.get_attribute_label( self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner, params=params, ) if action == 'DELETE': return self.tc_requests.delete_attribute_label( self.api_type, self.api_sub_type, self.unique_id, attribute_id, label, owner=self.owner, ) self._tcex.handle_error(925, ['action', 'attribute_label', 'action', 'action', action]) return None
Gets a security labels from a attribute Args: attribute_id: label: action: params: Returns: Security label json
juraj-google-style
def Serialize(self, writer): super(AssetState, self).Serialize(writer) writer.WriteUInt256(self.AssetId) writer.WriteByte(self.AssetType) writer.WriteVarString(self.Name) if self.Amount.value > -1: writer.WriteFixed8(self.Amount, unsigned=True) else: writer.WriteFixed8(self.Amount) if type(self.Available) is not Fixed8: raise Exception("AVAILABLE IS NOT FIXED 8!") writer.WriteFixed8(self.Available, unsigned=True) writer.WriteByte(self.Precision) writer.WriteByte(b'\x00') writer.WriteFixed8(self.Fee) writer.WriteUInt160(self.FeeAddress) self.Owner.Serialize(writer) writer.WriteUInt160(self.Admin) writer.WriteUInt160(self.Issuer) writer.WriteUInt32(self.Expiration) writer.WriteBool(self.IsFrozen)
Serialize full object. Args: writer (neo.IO.BinaryWriter):
juraj-google-style
def _measure_tensor_list_column_widths(self, data): max_timestamp_width = 0 if data: max_rel_time_ms = (data[-1].timestamp - self._debug_dump.t0) / 1000.0 max_timestamp_width = len('[%.3f] ' % max_rel_time_ms) + 1 max_timestamp_width = max(max_timestamp_width, len(self._TIMESTAMP_COLUMN_HEAD) + 1) max_dump_size_width = 0 for dump in data: dump_size_str = cli_shared.bytes_to_readable_str(dump.dump_size_bytes) if len(dump_size_str) + 1 > max_dump_size_width: max_dump_size_width = len(dump_size_str) + 1 max_dump_size_width = max(max_dump_size_width, len(self._DUMP_SIZE_COLUMN_HEAD) + 1) max_op_type_width = 0 for dump in data: op_type = self._debug_dump.node_op_type(dump.node_name) if len(op_type) + 1 > max_op_type_width: max_op_type_width = len(op_type) + 1 max_op_type_width = max(max_op_type_width, len(self._OP_TYPE_COLUMN_HEAD) + 1) return (max_timestamp_width, max_dump_size_width, max_op_type_width)
Determine the maximum widths of the timestamp and op-type column. This method assumes that data is sorted in the default order, i.e., by ascending timestamps. Args: data: (list of DebugTensorDaum) the data based on which the maximum column widths will be determined. Returns: (int) maximum width of the timestamp column. 0 if data is empty. (int) maximum width of the dump size column. 0 if data is empty. (int) maximum width of the op type column. 0 if data is empty.
github-repos
def _check_not_finalized(self) -> None: if self._finalized: raise RuntimeError('Graph is finalized and cannot be modified.')
Check if the graph is finalized. Raises: RuntimeError: If the graph finalized.
github-repos
def iter_predict(self, X, include_init=False): for probas in self.iter_predict_proba(X, include_init=include_init): (yield self.encoder_.inverse_transform(np.argmax(probas, axis=1)))
Returns the predicted classes for ``X`` at every stage of the boosting procedure. Arguments: X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples. Sparse matrices are accepted only if they are supported by the weak model. include_init (bool, default=False): If ``True`` then the prediction from ``init_estimator`` will also be returned. Returns: iterator of arrays of shape (n_samples, n_classes) containing the predicted classes at each stage.
codesearchnet
def set_video_pos(self, x1, y1, x2, y2): position = ('%s %s %s %s' % (str(x1), str(y1), str(x2), str(y2))) self._player_interface.VideoPos(ObjectPath('/not/used'), String(position))
Set the video position on the screen Args: x1 (int): Top left x coordinate (px) y1 (int): Top left y coordinate (px) x2 (int): Bottom right x coordinate (px) y2 (int): Bottom right y coordinate (px)
codesearchnet
def qualifyContracts(self, *contracts: List[Contract]) -> List[Contract]: return self._run(self.qualifyContractsAsync(*contracts))
Fully qualify the given contracts in-place. This will fill in the missing fields in the contract, especially the conId. Returns a list of contracts that have been successfully qualified. This method is blocking. Args: contracts: Contracts to qualify.
juraj-google-style
def from_sub_models_config(cls, text_encoder_config: PretrainedConfig, audio_encoder_config: PretrainedConfig, decoder_config: MusicgenMelodyDecoderConfig, **kwargs): return cls(text_encoder=text_encoder_config.to_dict(), audio_encoder=audio_encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)
Instantiate a [`MusicgenMelodyConfig`] (or a derived class) from text encoder, audio encoder and decoder configurations. Returns: [`MusicgenMelodyConfig`]: An instance of a configuration object
github-repos
def apply(self, pts: torch.Tensor) -> torch.Tensor: rot_mats = self.get_rot_mats() return rot_vec_mul(rot_mats, pts)
Apply the current Rotation as a rotation matrix to a set of 3D coordinates. Args: pts: A [*, 3] set of points Returns: [*, 3] rotated points
github-repos
def init_logger(self, log_dir=None, level=logging.INFO): logging.basicConfig( format='%(asctime)s - %(levelname)s - %(message)s', level=level) logger = logging.getLogger(__name__) if log_dir and self.rank == 0: filename = '{}.log'.format(self.timestamp) log_file = osp.join(log_dir, filename) self._add_file_handler(logger, log_file, level=level) return logger
Init the logger. Args: log_dir(str, optional): Log file directory. If not specified, no log file will be used. level (int or str): See the built-in python logging module. Returns: :obj:`~logging.Logger`: Python logger.
juraj-google-style
def trimpath(attributes): if 'pathdepth' in attributes: if attributes['pathdepth'] != 'full': pathelements = [] remainder = attributes['file'] limit = int(attributes['pathdepth']) while len(pathelements) < limit and remainder: remainder, pe = os.path.split(remainder) pathelements.insert(0, pe) return os.path.join(*pathelements) return attributes['file'] return os.path.basename(attributes['file'])
Simplifies the given path. If pathdepth is in attributes, the last pathdepth elements will be returned. If pathdepth is "full", the full path will be returned. Otherwise the filename only will be returned. Args: attributes: The element attributes. Returns: The trimmed path.
juraj-google-style
def _build(self, inputs, is_training=True, dropout_keep_prob=0.5): self._input_shape = tuple(inputs.get_shape().as_list()) net = inputs final_index = self._num_layers - 1 for layer_id in xrange(self._num_layers): net = self._layers[layer_id](net) if final_index != layer_id or self._activate_final: if self._use_dropout: keep_prob = utils.smart_cond( is_training, true_fn=lambda: dropout_keep_prob, false_fn=lambda: tf.constant(1.0) ) net = tf.nn.dropout(net, keep_prob=keep_prob) net = self._activation(net) return net
Assembles the `MLP` and connects it to the graph. Args: inputs: A 2D Tensor of size `[batch_size, input_size]`. is_training: A bool or tf.Bool Tensor. Indicates whether we are currently training. Defaults to `True`. dropout_keep_prob: The probability that each element is kept when both `use_dropout` and `is_training` are True. Defaults to 0.5. Returns: A 2D Tensor of size `[batch_size, output_sizes[-1]]`.
juraj-google-style
def content_type(self): if ((self.media_type is not None) and self.media_type.startswith('text/') and (self.charset is not None)): return ((self.media_type + '; charset=') + self.charset) else: return self.media_type
Return the value of Content-Type header field. The value for the Content-Type header field is determined from the :attr:`media_type` and :attr:`charset` data attributes. Returns: str: Value of Content-Type header field
codesearchnet
def union(self, other): operation = bool.__or__ self.cross_product(other, operation) return self
Constructs an unminimized DFA recognizing the union of the languages of two given DFAs. Args: other (DFA): The other DFA that will be used for the union operation Returns: DFA: The resulting DFA
juraj-google-style
def __call__(self, *args, **kwargs):
Executes this callable. This behaves like a regular op - in eager mode, it immediately starts execution, returning results. In graph mode, it creates ops which return symbolic TensorFlow values (like `tf.Tensor`, `tf.data.Dataset`, etc.). For example, `tf.function` callables typically generate a `tf.raw_ops.PartitionedCall` op, but not always - the exact operations being generated are an internal implementation detail. Args: *args: positional argument for this call **kwargs: keyword arguments for this call Returns: The execution results.
github-repos
def _SetExtractionParsersAndPlugins(self, configuration, session): names_generator = parsers_manager.ParsersManager.GetParserAndPluginNames(parser_filter_expression=configuration.parser_filter_expression) session.enabled_parser_names = list(names_generator) session.parser_filter_expression = configuration.parser_filter_expression
Sets the parsers and plugins before extraction. Args: configuration (ProcessingConfiguration): processing configuration. session (Session): session.
codesearchnet
def commit_offsets_sync(self, offsets): assert self.config['api_version'] >= (0, 8, 1), 'Unsupported Broker API' assert all(map(lambda k: isinstance(k, TopicPartition), offsets)) assert all(map(lambda v: isinstance(v, OffsetAndMetadata), offsets.values())) self._invoke_completed_offset_commit_callbacks() if not offsets: return while True: self.ensure_coordinator_ready() future = self._send_offset_commit_request(offsets) self._client.poll(future=future) if future.succeeded(): return future.value if not future.retriable(): raise future.exception time.sleep(self.config['retry_backoff_ms'] / 1000)
Commit specific offsets synchronously. This method will retry until the commit completes successfully or an unrecoverable error is encountered. Arguments: offsets (dict {TopicPartition: OffsetAndMetadata}): what to commit Raises error on failure
juraj-google-style
def random_weights(n, bounds=(0.0, 1.0), total=1.0): low = bounds[0] high = bounds[1] if (high < low): raise ValueError('Higher bound must be greater or equal to lower bound') if (((n * high) < total) or ((n * low) > total)): raise ValueError('solution not possible with given n and bounds') w = ([0] * n) tgt = (- float(total)) for i in range(n): rn = ((n - i) - 1) rhigh = (rn * high) rlow = (rn * low) lowb = max(((- rhigh) - tgt), low) highb = min(((- rlow) - tgt), high) rw = random.uniform(lowb, highb) w[i] = rw tgt += rw random.shuffle(w) return w
Generate pseudo-random weights. Returns a list of random weights that is of length n, where each weight is in the range bounds, and where the weights sum up to total. Useful for creating random portfolios when benchmarking. Args: * n (int): number of random weights * bounds ((low, high)): bounds for each weight * total (float): total sum of the weights
codesearchnet
def apply(self, read_tuple_name, read_tuple_id=None, synchronize_widths=True): parts = read_tuple_name.split('__') parts[0] = self._fill_right(parts[0], '-', self.prefix_width) if (read_tuple_id is not None): parts[1] = '{:x}'.format(read_tuple_id) parts[1] = self._fill_left(parts[1], '0', self.read_tuple_id_width) if synchronize_widths: new_segments = [] segments = parts[2][1:(- 1)].split('),(') for segment in segments: values = segment.split(',') values[0] = values[0].zfill(self.genome_id_width) values[1] = values[1].zfill(self.chr_id_width) values[3] = values[3].zfill(self.coor_width) values[4] = values[4].zfill(self.coor_width) new_segments.append((('(' + ','.join(values)) + ')')) parts[2] = ','.join(new_segments) return '__'.join(parts)
Apply profile on a read tuple name and update read tuple ID. Args: read_tuple_name (str): Read tuple name to be updated. read_tuple_id (id): New read tuple ID. synchronize_widths (bool): Update widths (in accordance to this profile).
codesearchnet
def scandir(path='.'): scandir_path = fsdecode(path).replace('\\', '/') if (not is_storage(scandir_path)): return os_scandir(scandir_path) return _scandir_generator(is_bytes=isinstance(fspath(path), (bytes, bytearray)), scandir_path=scandir_path, system=get_instance(scandir_path))
Return an iterator of os.DirEntry objects corresponding to the entries in the directory given by path. The entries are yielded in arbitrary order, and the special entries '.' and '..' are not included. Equivalent to "os.scandir". Args: path (path-like object): Path or URL. If path is of type bytes (directly or indirectly through the PathLike interface), the type of the name and path attributes of each os.DirEntry will be bytes; in all other circumstances, they will be of type str. Returns: Generator of os.DirEntry: Entries information.
codesearchnet
def _free_up_space(self, size, this_rel_path=None): space = ((self.size + size) - self.maxsize) if (space <= 0): return removes = [] for row in self.database.execute('SELECT path, size, time FROM files ORDER BY time ASC'): if (space > 0): removes.append(row[0]) space -= row[1] else: break for rel_path in removes: if (rel_path != this_rel_path): global_logger.debug('Deleting {}'.format(rel_path)) self.remove(rel_path)
If there are not size bytes of space left, delete files until there is Args: size: size of the current file this_rel_path: rel_pat to the current file, so we don't delete it.
codesearchnet
def setup_data_split(X, y, tokenizer, proc_data_dir, **kwargs): (X_train, X_val, X_test, y_train, y_val, y_test) = split_data(X, y) tokenizer.build_vocab(X_train) process_save(X_train, y_train, tokenizer, path.join(proc_data_dir, 'train.bin'), train=True, **kwargs) process_save(X_val, y_val, tokenizer, path.join(proc_data_dir, 'val.bin'), **kwargs) process_save(X_test, y_test, tokenizer, path.join(proc_data_dir, 'test.bin'), **kwargs)
Setup data while splitting into a training, validation, and test set. Args: X: text data, y: data labels, tokenizer: A Tokenizer instance proc_data_dir: Directory for the split and processed data
codesearchnet
def batch_dot(x0: FloatArray['... n'], x1: FloatArray['... n'], *, keepdims: bool=False, xnp: numpy_utils.NpModule=...) -> FloatArray['... 1?']: y = xnp.einsum('...m,...m->...', x0, x1) return y[..., None] if keepdims else y
Dot product on the last dimension, with broadcasting support. Contrary to `np.dot`, the behavior is consistent for 1-dim vs n-dim (while dot act as matmul). First dimensions are always broadcasted. Args: x0: Vector array x1: Vector array keepdims: If True, returns `FloatArray['... 1']` xnp: Numpy module to use Returns: The dot product along the last axis.
github-repos
def _get_mutation(self, node: cfg.CFGNode, arg_dict: dict[str, cfg.Variable], subst: datatypes.AliasingDict[str, cfg.Variable], retvar: cfg.Variable) -> list[function.Mutation]: mutations = [] if any((f.mutated_type for f in self.pytd_sig.params)): subst = abstract_utils.with_empty_substitutions(subst, self.pytd_sig, node, self.ctx) for formal in self.pytd_sig.params: actual = arg_dict[formal.name] if formal.mutated_type is None: continue args = actual.data for arg in args: if isinstance(arg, _instance_base.SimpleValue): for names_actuals in self.mutated_type_parameters[formal]: for tparam, type_actual in names_actuals: log.info('Mutating %s to %s', tparam.name, pytd_utils.Print(type_actual)) type_actual_val = self.ctx.convert.pytd_cls_to_instance_var(type_actual, subst, node, discard_concrete_values=True) mutations.append(function.Mutation(arg, tparam.full_name, type_actual_val)) if self.name == '__new__': for ret in retvar.data: if ret.cls.full_name != 'builtins.type': for t in ret.cls.template: if t.full_name in subst: mutations.append(function.Mutation(ret, t.full_name, subst[t.full_name])) return mutations
Mutation for changing the type parameters of mutable arguments. This will adjust the type parameters as needed for pytd functions like: def append_float(x: list[int]): x = list[int or float] This is called after all the signature matching has succeeded, and we know we're actually calling this function. Args: node: The current CFG node. arg_dict: A map of strings to cfg.Variable instances. subst: Current type parameters. retvar: A variable of the return value. Returns: A list of Mutation instances. Raises: ValueError: If the pytd contains invalid information for mutated params.
github-repos
def is_legal_object(self, c: OntologyClass) -> bool: ranges = self.included_ranges() return not ranges or c in ranges or c.super_classes_closure() & ranges
is_legal_object(c) = true if - c in included_ranges(self) or - super_classes_closure(c) intersection included_ranges(self) is not empty Args: c: Returns:
juraj-google-style
def _create_variables(self, values, trainable): def create_variable(value): if backend.is_tensor(value) or isinstance(value, (np.ndarray, np.generic)): dtype = value.dtype if is_float_dtype(dtype): dtype = None return self.add_weight(value.shape, initializer=value, dtype=dtype, trainable=trainable) elif isinstance(value, (bool, int, float)): dtype = standardize_dtype(type(value)) if is_float_dtype(dtype): dtype = None return self.add_weight((), initializer=backend.convert_to_tensor(value), dtype=dtype, trainable=trainable) else: return value variables = jax.tree_util.tree_map(create_variable, values) if trainable: self.params = variables else: self.state = variables flat_variables, _ = jax.tree_util.tree_flatten(variables) return flat_variables
Create a structure of variables from a structure of JAX arrays. `values` is traversed via JAX's `tree_map`. When a leaf is a JAX array or a tensor-like object, a corresponding variable is created with it as the initial value. The resulting structure of variables is assigned to `self.params` or `self.state` depending on `trainable`. Then, a flattened version of the variables is returned for tracking. `self.params` or `self.state` are intentionally not tracked because structures like `TrackedList` interfere with `jax.tree_utils`. Note that leaf objects that are not JAX arrays and not tensor-like are left intact as they are assumed to be configuration used by the model. Args: values: the structure of values to traverse. trainable: whether to create trainable variables. Returns: flat list of variables initialized with `values` for tracking.
github-repos
def tracers(tracersfile): if not tracersfile.is_file(): return None tra = {} with tracersfile.open('rb') as fid: readbin = partial(_readbin, fid) magic = readbin() if magic > 8000: magic -= 8000 readbin() readbin = partial(readbin, file64=True) if magic < 100: raise ParsingError(tracersfile, 'magic > 100 expected to get tracervar info') nblk = magic % 100 readbin('f', 2) readbin() readbin('f') ninfo = readbin() ntra = readbin(nwords=nblk, unpack=False) readbin('f') curv = readbin() if curv: readbin('f') infos = [] for _ in range(ninfo): infos.append(b''.join(readbin('b', 16)).strip().decode()) tra[infos[-1]] = [] if magic > 200: ntrace_elt = readbin() if ntrace_elt > 0: readbin('f', ntrace_elt) for ntrab in ntra: data = readbin('f', ntrab * ninfo) for idx, info in enumerate(infos): tra[info].append(data[idx::ninfo]) return tra
Extract tracers data. Args: tracersfile (:class:`pathlib.Path`): path of the binary tracers file. Returns: dict of list of numpy.array: Tracers data organized by attribute and block.
juraj-google-style
def _int_to_pos(self, flat_position): return flat_position % self.env.action_space.screen_shape[0],\ flat_position % self.env.action_space.screen_shape[1]
Returns x, y from flat_position integer. Args: flat_position: flattened position integer Returns: x, y
juraj-google-style
def _prepare_init_params_from_job_description(cls, job_details, model_channel_name=None): init_params = super(Chainer, cls)._prepare_init_params_from_job_description(job_details, model_channel_name) for argument in [Chainer._use_mpi, Chainer._num_processes, Chainer._process_slots_per_host, Chainer._additional_mpi_options]: value = init_params['hyperparameters'].pop(argument, None) if value: init_params[argument[len('sagemaker_'):]] = value image_name = init_params.pop('image') framework, py_version, tag, _ = framework_name_from_image(image_name) if not framework: init_params['image_name'] = image_name return init_params init_params['py_version'] = py_version init_params['framework_version'] = framework_version_from_tag(tag) training_job_name = init_params['base_job_name'] if framework != cls.__framework_name__: raise ValueError("Training job: {} didn't use image for requested framework".format(training_job_name)) return init_params
Convert the job description to init params that can be handled by the class constructor Args: job_details: the returned job details from a describe_training_job API call. model_channel_name (str): Name of the channel where pre-trained model data will be downloaded. Returns: dictionary: The transformed init_params
juraj-google-style
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0): super(RevokeRequestPayload, self).read( istream, kmip_version=kmip_version ) tstream = BytearrayStream(istream.read(self.length)) self.unique_identifier = attributes.UniqueIdentifier() self.unique_identifier.read(tstream, kmip_version=kmip_version) self.revocation_reason = objects.RevocationReason() self.revocation_reason.read(tstream, kmip_version=kmip_version) if self.is_tag_next(enums.Tags.COMPROMISE_OCCURRENCE_DATE, tstream): self.compromise_occurrence_date = primitives.DateTime( tag=enums.Tags.COMPROMISE_OCCURRENCE_DATE) self.compromise_occurrence_date.read( tstream, kmip_version=kmip_version ) self.is_oversized(tstream) self.validate()
Read the data encoding the RevokeRequestPayload object and decode it into its constituent parts. Args: istream (Stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
juraj-google-style
def clean_for_serialization(self, data): if isinstance(data, dict): for k in data.keys(): if k.startswith('__'): del data[k] elif isinstance(data[k], bson.objectid.ObjectId): del data[k] elif isinstance(data[k], datetime.datetime): data[k] = (data[k].isoformat() + 'Z') elif isinstance(data[k], dict): data[k] = self.clean_for_serialization(data[k]) elif isinstance(data[k], list): data[k] = [self.clean_for_serialization(item) for item in data[k]] return data
Clean data in preparation for serialization. Deletes items having key either a BSON, datetime, dict or a list instance, or starting with __. Args: data: Sample data to be serialized. Returns: Cleaned data dictionary.
codesearchnet
def add_scales_bar(img, bbox): tc = TileCoordinate(bbox.min.zoom, bbox.min.x, bbox.min.y) meters_per_pixel = tc.resolution() one_km_bar = int((1000 * (1 / meters_per_pixel))) col_black = (0, 0, 0) line_start = (100, (img.size[1] - 100)) line_end = ((line_start[0] + one_km_bar), line_start[1]) whiskers_left = [line_start[0], (line_start[1] - 15), line_start[0], (line_start[1] + 15)] whiskers_right = [line_end[0], (line_end[1] - 15), line_end[0], (line_end[1] + 15)] draw = ImageDraw.Draw(img) draw.line([line_start, line_end], fill=col_black, width=5) draw.line(whiskers_left, fill=col_black, width=2) draw.line(whiskers_right, fill=col_black, width=2) draw.text(((line_start[0] + 10), (line_start[1] + 10)), fill=col_black, text='1 km') del draw
Add a scales bar to the map. Calculates the resolution at the current latitude and inserts the corresponding scales bar on the map. Args: img (Image): Image object to which the scales bar will be added. bbox (TileBB): boundaries of the map
codesearchnet
def remove(path, follow_symlink=False): if os.path.isfile(path): os.remove(path) elif os.path.islink(path): if follow_symlink: remove(os.readlink(path)) os.unlink(path) else: shutil.rmtree(path)
Implements an remove function that will delete files, folder trees and symlink trees 1.) Remove a file 2.) Remove a symlink and follow into with a recursive rm if follow_symlink 3.) Remove directory with rmtree Args: path (str): path to remove follow_symlink(bool): follow symlinks and removes whatever is in them
juraj-google-style
def _make_query_from_terms(self, terms): expanded_terms = self._expand_terms(terms) cterms = '' if expanded_terms['doc']: cterms = self.backend._and_join(expanded_terms['doc']) if expanded_terms['keywords']: if cterms: cterms = self.backend._and_join( cterms, self.backend._join_keywords(expanded_terms['keywords'])) else: cterms = self.backend._join_keywords(expanded_terms['keywords']) logger.debug('Dataset terms conversion: `{}` terms converted to `{}` query.'.format(terms, cterms)) return cterms
Creates a query for dataset from decomposed search terms. Args: terms (dict or unicode or string): Returns: tuple: First element is str with FTS query, second is parameters of the query.
juraj-google-style
def sca_xsect(scatterer, h_pol=True): if scatterer.psd_integrator is not None: return scatterer.psd_integrator.get_angular_integrated( scatterer.psd, scatterer.get_geometry(), "sca_xsect") old_geom = scatterer.get_geometry() def d_xsect(thet, phi): (scatterer.phi, scatterer.thet) = (phi*rad_to_deg, thet*rad_to_deg) Z = scatterer.get_Z() I = sca_intensity(scatterer, h_pol) return I * np.sin(thet) try: xsect = dblquad(d_xsect, 0.0, 2*np.pi, lambda x: 0.0, lambda x: np.pi)[0] finally: scatterer.set_geometry(old_geom) return xsect
Scattering cross section for the current setup, with polarization. Args: scatterer: a Scatterer instance. h_pol: If True (default), use horizontal polarization. If False, use vertical polarization. Returns: The scattering cross section.
juraj-google-style
def default_metric_definitions(cls, toolkit): if toolkit is RLToolkit.COACH: return [ {'Name': 'reward-training', 'Regex': '^Training>.*Total reward=(.*?),'}, {'Name': 'reward-testing', 'Regex': '^Testing>.*Total reward=(.*?),'} ] elif toolkit is RLToolkit.RAY: float_regex = "[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?" return [ {'Name': 'episode_reward_mean', 'Regex': 'episode_reward_mean: (%s)' % float_regex}, {'Name': 'episode_reward_max', 'Regex': 'episode_reward_max: (%s)' % float_regex} ]
Provides default metric definitions based on provided toolkit. Args: toolkit(sagemaker.rl.RLToolkit): RL Toolkit to be used for training. Returns: list: metric definitions
juraj-google-style
def starting_wall_time(self): return self._starting_wall_time
Get the starting timestamp of the instrumented TensorFlow program. When there are multiple hosts (i.e., multiple tfdbg file sets), the earliest timestamp among the file sets is returned. It is assumed to be the job that starts first (e.g., the coordinator). Returns: Starting timestamp in seconds since the epoch, as a float.
github-repos
def _load_from_hdx(self, object_type, id_field): (success, result) = self._read_from_hdx(object_type, id_field) if success: self.old_data = self.data self.data = result return True logger.debug(result) return False
Helper method to load the HDX object given by identifier from HDX Args: object_type (str): Description of HDX object type (for messages) id_field (str): HDX object identifier Returns: bool: True if loaded, False if not
codesearchnet
def get_latest_package_from_string(txt, paths=None, error=False): from rez.utils.formatting import PackageRequest req = PackageRequest(txt) return get_latest_package(name=req.name, range_=req.range_, paths=paths, error=error)
Get the latest package found within the given request string. Args: txt (str): Request, eg 'foo-1.2+' paths (list of str, optional): paths to search for package families, defaults to `config.packages_path`. error (bool): If True, raise an error if no package is found. Returns: `Package` object, or None if no package is found.
juraj-google-style
def get_token(wallet: 'Wallet', token_str: str) -> 'NEP5Token.NEP5Token': if token_str.startswith('0x'): token_str = token_str[2:] token = None for t in wallet.GetTokens().values(): if token_str in [t.symbol, t.ScriptHash.ToString()]: token = t break if not isinstance(token, NEP5Token.NEP5Token): raise ValueError("The given token argument does not represent a known NEP5 token") return token
Try to get a NEP-5 token based on the symbol or script_hash Args: wallet: wallet instance token_str: symbol or script_hash (accepts script hash with or without 0x prefix) Raises: ValueError: if token is not found Returns: NEP5Token instance if found.
juraj-google-style
def _read_marcxml(xml): marc_xml = _read_content_or_path(xml) marc_xml = _oai_to_xml(marc_xml) marc_xml = _add_namespace(marc_xml) file_obj = StringIO.StringIO(marc_xml) return ET.parse(file_obj)
Read MARC XML or OAI file, convert, add namespace and return XML in required format with all necessities. Args: xml (str): Filename or XML string. Don't use ``\\n`` in case of filename. Returns: obj: Required XML parsed with ``lxml.etree``.
codesearchnet
def multiplicative_jitter(x, epsilon=1e-2): if epsilon == 0: return x return x * mtf.random_uniform( x.mesh, x.shape, minval=1.0 - epsilon, maxval=1.0+epsilon, dtype=x.dtype)
Multiply values by a random number between 1-epsilon and 1+epsilon. Makes models more resilient to rounding errors introduced by bfloat16. This seems particularly important for logits. Args: x: a mtf.Tensor epsilon: a floating point value Returns: a mtf.Tensor with the same type and shape as x.
juraj-google-style
def tinsel(to_patch, module_name, decorator=mock_decorator): def fn_decorator(function): def wrapper(*args, **kwargs): with patch(to_patch, decorator): m = importlib.import_module(module_name) reload(m) function(*args, **kwargs) reload(m) return wrapper return fn_decorator
Decorator for simple in-place decorator mocking for tests Args: to_patch: the string path of the function to patch module_name: complete string path of the module to reload decorator (optional): replacement decorator. By default a pass-through will be used. Returns: A wrapped test function, during the context of execution the specified path is patched.
juraj-google-style
def create_config_profile(msg_type): msg_type = msg_type.lower() if msg_type not in CONFIG.keys(): raise UnsupportedMessageTypeError(msg_type) display_required_items(msg_type) if get_user_ack(): profile_name = input("Profile Name: ") data = get_data_from_user(msg_type) auth = get_auth_from_user(msg_type) configure_profile(msg_type, profile_name, data, auth)
Create a profile for the given message type. Args: :msg_type: (str) message type to create config entry.
juraj-google-style
def OptimizeGraph(config_proto, metagraph, verbose=True, graph_id=b'graph_to_optimize', cluster=None, strip_default_attributes=False): if not isinstance(config_proto, config_pb2.ConfigProto): raise TypeError(f'Argument `config_proto` should be a tf.ConfigProto, received type: {type(config_proto).__name__}') if is_oss: optimize_method = tf_opt.TF_OptimizeGraphSerialized metagraph = metagraph.SerializeToString() else: optimize_method = tf_opt.TF_OptimizeGraph if cluster is not None: out_graph = optimize_method(cluster.tf_cluster, config_proto.SerializeToString(), metagraph, verbose, graph_id, strip_default_attributes) else: with _OPTIMIZE_GRAPH_CLUSTER_LOCK: cluster = gcluster.Cluster() try: out_graph = optimize_method(cluster.tf_cluster, config_proto.SerializeToString(), metagraph, verbose, graph_id, strip_default_attributes) finally: cluster.Shutdown() if is_oss: out_graph = graph_pb2.GraphDef.FromString(out_graph) return out_graph
Optimize the provided metagraph. For best results, the signature_def field in `metagraph` should be populated with information about input (feed) and output (fetch) tensors. Args: config_proto: a ConfigProto protobuf. metagraph: a MetagraphDef protobuf. verbose: whether to log optimization results. graph_id: a string identifying this graph. cluster: a grappler cluster object representing hardware resources available to run this graph. strip_default_attributes: whether graph node attributes having default values should be removed after all the optimization passes. This option is useful if the resulting graph will be executed by an older process that might not know some of the recently added attributes.
github-repos