code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def __init__(self, value: 'ProcessorPartTypes', *, role: str='', substream_name: str='', mimetype: str | None=None, metadata: dict[str, Any] | None=None) -> None: super().__init__() match value: case genai_types.Part(): self._part = value case ProcessorPart(): self._part = value.part role = role or value.role substream_name = substream_name or value.substream_name mimetype = mimetype or value.mimetype metadata = metadata or value.metadata case str(): self._part = genai_types.Part(text=value) case bytes(): if not mimetype: raise ValueError('MIME type must be specified when constructing a ProcessorPart from bytes.') self._part = genai_types.Part.from_bytes(data=value, mime_type=mimetype) case PIL.Image.Image(): if mimetype: if not mimetype.startswith('image/'): raise ValueError(f"Can't convert image of mimetype {mimetype}.") suffix = mimetype[len('image/'):] if value.format: if suffix != value.format.lower(): raise ValueError(f'The image format {value.format} and does not match the mimetype {suffix}.') else: suffix = value.format.lower() if value.format else 'webp' mimetype = f'image/{suffix}' bytes_io = io.BytesIO() value.save(bytes_io, suffix.upper()) self._part = genai_types.Part.from_bytes(data=bytes_io.getvalue(), mime_type=mimetype) case _: raise ValueError(f"Can't construct ProcessorPart from {type(value)}.") self._role = role self._substream_name = substream_name self._metadata = metadata if mimetype: self._mimetype = mimetype elif self._part.inline_data and self._part.inline_data.mime_type: self._mimetype = self._part.inline_data.mime_type elif self._part.text: self._mimetype = 'text/plain' else: self._mimetype = ''
Constructs a ProcessorPart using a `Part` or `ProcessorPart`. Args: value: The content to use to construct the ProcessorPart. role: Optional. The producer of the content. In Genai models, must be either 'user' or 'model', but the user can set their own semantics. Useful to set for multi-turn conversations, otherwise can be empty. substream_name: (Optional) ProcessorPart stream can be split into multiple independent streams. They may have specific semantics, e.g. a song and its lyrics, or can be just alternative responses. Prefer using a default substream with an empty name. If the `ProcessorPart` is created using another `ProcessorPart`, this ProcessorPart inherits the existing substream_name, unless it is overridden in this argument. mimetype: Mime type of the data. metadata: (Optional) Auxiliary information about the part. If the `ProcessorPart` is created using another `ProcessorPart`, this ProcessorPart inherits the existing metadata, unless it is overridden in this argument.
github-repos
def set_unit_desired_state(self, unit, desired_state): if (desired_state not in self._STATES): raise ValueError('state must be one of: {0}'.format(self._STATES)) if isinstance(unit, Unit): unit = unit.name else: unit = str(unit) self._single_request('Units.Set', unitName=unit, body={'desiredState': desired_state}) return self.get_unit(unit)
Update the desired state of a unit running in the cluster Args: unit (str, Unit): The Unit, or name of the unit to update desired_state: State the user wishes the Unit to be in ("inactive", "loaded", or "launched") Returns: Unit: The unit that was updated Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400 ValueError: An invalid value was provided for ``desired_state``
codesearchnet
def get_variant_genotypes(self, variant): chrom = variant.chrom.name if self.chrom is not None and chrom == self.chrom: chrom = "NA" results = [] iterator = self._bgen.iter_variants_in_region( CHROM_STR_DECODE.get(chrom, chrom), variant.pos, variant.pos, ) for info, dosage in iterator: if (variant.alleles is None or variant.iterable_alleles_eq([info.a1, info.a2])): results.append(Genotypes( Variant( info.name, CHROM_STR_ENCODE.get(info.chrom, info.chrom), info.pos, [info.a1, info.a2], ), dosage, reference=info.a1, coded=info.a2, multiallelic=True, )) if not results: logging.variant_name_not_found(variant) return results
Get the genotypes from a well formed variant instance. Args: marker (Variant): A Variant instance. Returns: A list of Genotypes instance containing a pointer to the variant as well as a vector of encoded genotypes.
juraj-google-style
def get_module_docstring(module_name, package, api_name): for version in _API_VERSIONS: compat_prefix = _COMPAT_MODULE_TEMPLATE % version if module_name.startswith(compat_prefix): module_name = module_name[len(compat_prefix):].strip('.') docstring_module_name = module_name doc_sources = doc_srcs.get_doc_sources(api_name) if module_name in doc_sources: docsrc = doc_sources[module_name] if docsrc.docstring: return docsrc.docstring if docsrc.docstring_module_name: docstring_module_name = docsrc.docstring_module_name if package != 'tf_keras': docstring_module_name = package + '.' + docstring_module_name if docstring_module_name in sys.modules and sys.modules[docstring_module_name].__doc__: return sys.modules[docstring_module_name].__doc__ return 'Public API for tf.%s namespace.' % module_name
Get docstring for the given module. This method looks for docstring in the following order: 1. Checks if module has a docstring specified in doc_srcs. 2. Checks if module has a docstring source module specified in doc_srcs. If it does, gets docstring from that module. 3. Checks if module with module_name exists under base package. If it does, gets docstring from that module. 4. Returns a default docstring. Args: module_name: module name relative to tensorflow (excluding 'tensorflow.' prefix) to get a docstring for. package: Base python package containing python with target tf_export decorators. api_name: API you want to generate Currently, only `tensorflow`. Returns: One-line docstring to describe the module.
github-repos
def _prewarm_versatileimagefield(size_key, versatileimagefieldfile): versatileimagefieldfile.create_on_demand = True try: url = get_url_from_image_key(versatileimagefieldfile, size_key) except Exception: success = False url_or_filepath = versatileimagefieldfile.name logger.exception('Thumbnail generation failed', extra={'path': url_or_filepath}) else: success = True url_or_filepath = url return (success, url_or_filepath)
Returns a 2-tuple: 0: bool signifying whether the image was successfully pre-warmed 1: The url of the successfully created image OR the path on storage of the image that was not able to be successfully created. Arguments: `size_key_list`: A list of VersatileImageField size keys. Examples: * 'crop__800x450' * 'thumbnail__800x800' `versatileimagefieldfile`: A VersatileImageFieldFile instance
juraj-google-style
def StatResultFromStatEntry(stat_entry): values = [] for attr in _STAT_ATTRS[:10]: values.append(stat_entry.Get(attr)) return os.stat_result(values)
Returns a `os.stat_result` with most information from `StatEntry`. This is a lossy conversion, only the 10 first stat_result fields are populated, because the os.stat_result constructor is inflexible. Args: stat_entry: An instance of rdf_client_fs.StatEntry. Returns: An instance of `os.stat_result` with basic fields populated.
codesearchnet
def _Assert3DImage(image): return control_flow_ops.with_dependencies(_Check3DImage(image, require_static=False), image)
Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: 3-D Tensor of shape [height, width, channels] Raises: ValueError: if `image.shape` is not a 3-vector. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape.
github-repos
def RelayDirectly(self, inventory): relayed = False self.RelayCache[inventory.Hash.ToBytes()] = inventory for peer in self.Peers: relayed |= peer.Relay(inventory) if len(self.Peers) == 0: if type(BC.Default()) is TestLevelDBBlockchain: return True logger.info("no connected peers") return relayed
Relay the inventory to the remote client. Args: inventory (neo.Network.Inventory): Returns: bool: True if relayed successfully. False otherwise.
juraj-google-style
def __init__(self, row_class=Row): self.row_class = row_class self.separator = ', ' self.Reset()
Initialises a new table. Args: row_class: A class to use as the row object. This should be a subclass of this module's Row() class.
juraj-google-style
def get_soa_record(client, zone_id, zone_name): response = client.list_resource_record_sets(HostedZoneId=zone_id, StartRecordName=zone_name, StartRecordType='SOA', MaxItems='1') return SOARecord(response['ResourceRecordSets'][0])
Gets the SOA record for zone_name from zone_id. Args: client (:class:`botocore.client.Route53`): The connection used to interact with Route53's API. zone_id (string): The AWS Route53 zone id of the hosted zone to query. zone_name (string): The name of the DNS hosted zone to create. Returns: :class:`stacker.util.SOARecord`: An object representing the parsed SOA record returned from AWS Route53.
codesearchnet
def __init__( self, dir=None, options=None, upstream=None, prefix='', **kwargs): from ambry.dbexceptions import ConfigurationError super(FsCache, self).__init__(upstream, **kwargs) self._cache_dir = dir if not os.path.isabs(self._cache_dir): raise ConfigurationError( "Filesystem cache must have an absolute path. Got: '{}' ".format( self._cache_dir)) self.prefix = prefix
Init a new FileSystem Cache Args: cache_dir maxsize. Maximum size of the cache, in GB
juraj-google-style
def __call__(self, fn): def benchmark(app, *args, **kwargs): before = datetime.datetime.now() data = fn(app, *args, **kwargs) after = datetime.datetime.now() app.tcex.log.debug( 'function: "{}", benchmark_time: "{}"'.format( self.__class__.__name__, after - before ) ) return data return benchmark
Implement __call__ function for decorator. Args: fn (function): The decorated function. Returns: function: The custom decorator function.
juraj-google-style
def upsample_filters(filters, rate): num_spatial_dims = len(rate) spatial_shape = np.array(filters.shape[:num_spatial_dims]) output_spatial_shape = (spatial_shape - 1) * rate + 1 output = np.zeros(tuple(output_spatial_shape) + tuple(filters.shape[-2:]), filters.dtype) output[tuple((np.s_[::rate[i]] for i in range(num_spatial_dims)))] = filters return output
Upsamples the filters by a factor of rate along the spatial dimensions. Args: filters: spatial_shape + [in_channels, out_channels] Original filters. rate: A list of len(spatial_shape) positive ints, specifying the upsampling rate. Returns: filters_up: output_spatial_shape + [in_channels, out_channels]. Upsampled filters with output_spatial_shape[i] = (spatial_shape[i] - 1) * rate[i] + 1 containing (rate[i] - 1) zeros between consecutive filter values along spatial dimension i.
github-repos
def __init__(self, optimizer, fraction=0.1, scope='subsampling-step', summary_labels=()): assert isinstance(fraction, float) and fraction > 0.0 self.fraction = fraction super(SubsamplingStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)
Creates a new subsampling-step meta optimizer instance. Args: optimizer: The optimizer which is modified by this meta optimizer. fraction: The fraction of instances of the batch to subsample.
juraj-google-style
def _get_scope(node_name): if not node_name: raise ValueError(f'Node name cannot be empty or None. Received: {node_name}.') if node_name.startswith('^'): node_name = node_name[1:] if '/' in node_name: scope, _ = node_name.rsplit('/', 1) return scope return ''
Extract the scope name from a node name. The scope name is everything before the final slash, not including any ^ prefix denoting a control dependency. Args: node_name: the full name of an Op or a Tensor in the graph. Returns: The deepest named scope containing the node. Raises: ValueError: if tensor_name is None or empty
github-repos
def __request_message_descriptor(self, request_kind, message_type, method_id, path): if isinstance(message_type, resource_container.ResourceContainer): base_message_type = message_type.body_message_class() if (request_kind == self.__NO_BODY and base_message_type != message_types.VoidMessage()): msg = ('Method %s specifies a body message in its ResourceContainer, but ' 'is a HTTP method type that cannot accept a body.') % method_id raise api_exceptions.ApiConfigurationError(msg) else: base_message_type = message_type if (request_kind != self.__NO_BODY and base_message_type != message_types.VoidMessage()): self.__request_schema[method_id] = self.__parser.add_message( base_message_type.__class__) params = self.__params_descriptor(message_type, request_kind, path, method_id) return params
Describes the parameters and body of the request. Args: request_kind: The type of request being made. message_type: messages.Message or ResourceContainer class. The message to describe. method_id: string, Unique method identifier (e.g. 'myapi.items.method') path: string, HTTP path to method. Returns: Dictionary describing the request. Raises: ValueError: if the method path and request required fields do not match
juraj-google-style
def write(self, vendor_id=None, log_type=None, json=None, **kwargs): path = '/logging-service/v1/logs/{}/{}'.format(vendor_id, log_type) r = self._httpclient.request(method='POST', url=self.url, json=json, path=path, **kwargs) return r
Write log records to the Logging Service. This API requires a JSON array in its request body, each element of which represents a single log record. Log records are provided as JSON objects. Every log record must include the primary timestamp field that you identified when you registered your app. Every log record must also identify the log type. Args: vendor_id (str): Vendor ID. log_type (str): Log type. json (list): Payload/request body. **kwargs: Supported :meth:`~pancloud.httpclient.HTTPClient.request` parameters. Returns: requests.Response: Requests Response() object. Examples: Refer to ``logging_write.py`` example.
codesearchnet
def _set_subject(self, subject): def test_uri(value): if not isinstance(value, (Uri, BlankNode)): try: if value.startswith("_:"): return BlankNode(value) else: return Uri(value) except: return BlankNode() else: return value if isinstance(subject, dict): self.subject = test_uri(subject['s']) if isinstance(subject['o'], list): for item in subject['o']: self.add_property(subject['p'], item) else: self.add_property(subject['p'], subject['o']) else: self.subject = test_uri(subject)
sets the subject value for the class instance Args: subject(dict, Uri, str): the subject for the class instance
juraj-google-style
def join_tokens_to_sentences(tokens): text = "" for (entry, next_entry) in zip(tokens, tokens[1:]): text += entry if next_entry not in SENTENCE_STOPS: text += " " text += tokens[-1] return text
Correctly joins tokens to multiple sentences Instead of always placing white-space between the tokens, it will distinguish between the next symbol and *not* insert whitespace if it is a sentence symbol (e.g. '.', or '?') Args: tokens: array of string tokens Returns: Joint sentences as one string
juraj-google-style
def path_to_zip(path): if (not os.path.exists(path)): raise IOError(("%s doesn't exists!" % path)) with tempfile.NamedTemporaryFile(delete=False) as ntf: zip_fn = ntf.name with zipfile.ZipFile(zip_fn, mode='w') as zip_file: for (root, dirs, files) in os.walk(path): for fn in files: zip_file.write(os.path.join(root, fn)) return zip_fn
Compress `path` to the ZIP. Args: path (str): Path to the directory. Returns: str: Path to the zipped file (in /tmp).
codesearchnet
def read(self, auth, resource, options, defer=False): return self._call('read', auth, [resource, options], defer)
Read value(s) from a dataport. Calls a function that builds a request to read the dataport specified by an alias or rid and returns timeseries data as defined by the options. Args: auth: Takes the device cik resource: Takes the dataport alias or rid. options: Takes a list of options for what to return.
juraj-google-style
def log_run_info(self, model_name): run_info = {'model_name': model_name, 'machine_config': {}, 'run_date': datetime.datetime.now().strftime(_DATE_TIME_FORMAT_PATTERN)} _collect_tensorflow_info(run_info) _collect_tensorflow_environment_variables(run_info) _collect_cpu_info(run_info) _collect_gpu_info(run_info) _collect_memory_info(run_info) with tf.gfile.GFile(os.path.join(self._logging_dir, BENCHMARK_RUN_LOG_FILE_NAME), 'w') as f: try: json.dump(run_info, f) f.write('\n') except (TypeError, ValueError) as e: tf.logging.warning('Failed to dump benchmark run info to log file: %s', e)
Collect most of the TF runtime information for the local env. The schema of the run info follows official/benchmark/datastore/schema. Args: model_name: string, the name of the model.
codesearchnet
def _populate_calibration_options(quantization_options: quant_opts_pb2.QuantizationOptions): calib_opts = quantization_options.calibration_options if calib_opts.calibration_method == _CalibrationMethod.CALIBRATION_METHOD_UNSPECIFIED: calib_opts.calibration_method = _CalibrationMethod.CALIBRATION_METHOD_MIN_MAX elif calib_opts.calibration_method == _CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_PERCENTILE: if not calib_opts.calibration_parameters.num_bins: calib_opts.calibration_parameters.num_bins = 512 if not calib_opts.calibration_parameters.min_percentile: calib_opts.calibration_parameters.min_percentile = 0.001 if not calib_opts.calibration_parameters.max_percentile: calib_opts.calibration_parameters.max_percentile = 99.999 elif calib_opts.calibration_method in [_CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_BRUTEFORCE, _CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_MAX_FREQUENCY, _CalibrationMethod.CALIBRATION_METHOD_HISTOGRAM_MSE_SYMMETRIC]: activation_tensor_type = quantization_options.quantization_method.quantization_component_specs[_QuantizationComponent.COMPONENT_ACTIVATION].tensor_type if activation_tensor_type != _TensorType.TENSORTYPE_INT_8: raise ValueError(f'Only TENSORTYPE_INT_8 is supported for HISTOGRAM_MSE calibration methods. calibration_method={calib_opts.calibration_method}') if not calib_opts.calibration_parameters.num_bins: calib_opts.calibration_parameters.num_bins = 512 if calib_opts.calibration_data_dir: save_model.create_empty_output_dir(calib_opts.calibration_data_dir, overwrite=calib_opts.force_regenerate_calibration_data)
Populates default values for CalibrationOptions. Args: quantization_options: An instance of QuantizationOptions with a field specifying CalibrationOptions
github-repos
def __init__(self, structure, transformations=None, history=None, other_parameters=None): self.final_structure = structure self.history = history or [] self.other_parameters = other_parameters or {} self._undone = [] transformations = transformations or [] for t in transformations: self.append_transformation(t)
Initializes a transformed structure from a structure. Args: structure (Structure): Input structure transformations ([Transformations]): List of transformations to apply. history (list): Previous history. other_parameters (dict): Additional parameters to be added.
juraj-google-style
def set_parent(self, node): self._parent = node if (node is None): self._depth = 0 else: self._depth = (node.get_depth() + 1)
Attach node to its parent. Args: node: Parent node. Note: ``node`` can be ``None``. In that case, the node is detached from its previous parent.
codesearchnet
def create_source_map(nodes, code, filepath): reparsed_nodes = parser.parse(code, preamble_len=0, single_node=False) for node in reparsed_nodes: resolve(node, code, filepath, node.lineno, node.col_offset) source_map = {} try: for before, after in ast_util.parallel_walk(nodes, reparsed_nodes): origin_info = anno.getanno(before, anno.Basic.ORIGIN, default=None) final_info = anno.getanno(after, anno.Basic.ORIGIN, default=None) if origin_info is None or final_info is None: continue line_loc = LineLocation(final_info.loc.filename, final_info.loc.lineno) existing_origin = source_map.get(line_loc) if existing_origin is not None: if existing_origin.loc.line_loc == origin_info.loc.line_loc: if existing_origin.loc.lineno >= origin_info.loc.lineno: continue if existing_origin.loc.col_offset <= origin_info.loc.col_offset: continue source_map[line_loc] = origin_info except ValueError as err: new_msg = 'Inconsistent ASTs detected. This is a bug. Cause: \n' new_msg += str(err) new_msg += 'Diff:\n' for n, rn in zip(nodes, reparsed_nodes): nodes_str = pretty_printer.fmt(n, color=False, noanno=True) reparsed_nodes_str = pretty_printer.fmt(rn, color=False, noanno=True) diff = difflib.context_diff(nodes_str.split('\n'), reparsed_nodes_str.split('\n'), fromfile='Original nodes', tofile='Reparsed nodes', n=7) diff = '\n'.join(diff) new_msg += diff + '\n' raise ValueError(new_msg) return source_map
Creates a source map between an annotated AST and the code it compiles to. Note: this function assumes nodes nodes, code and filepath correspond to the same code. Args: nodes: Iterable[ast.AST, ...], one or more AST modes. code: Text, the source code in which nodes are found. filepath: Text Returns: Dict[LineLocation, OriginInfo], mapping locations in code to locations indicated by origin annotations in node.
github-repos
def swd_read16(self, offset): value = self._dll.JLINK_SWD_GetU16(offset) return ctypes.c_uint16(value).value
Gets a unit of ``16`` bits from the input buffer. Args: self (JLink): the ``JLink`` instance offset (int): the offset (in bits) from which to start reading Returns: The integer read from the input buffer.
codesearchnet
def __init__(self, _args): self.args = _args self._db_conn = None self._install_json = None self._install_json_params = None self._install_json_output_variables = None self._layout_json = None self._layout_json_names = None self._layout_json_params = None self._layout_json_outputs = None self._redis = None self._tcex_json = None self.app_path = os.getcwd() self.exit_code = 0 self.input_table = 'inputs' self.output = [] c.init(autoreset=True, strip=False)
Initialize Class properties. Args: _args (namespace): The argparser args Namespace.
juraj-google-style
def __init__(self, states, internals, actions, include_next_states, capacity, scope='queue', summary_labels=None): self.capacity = capacity self.scope = scope self.states_memory = dict() self.internals_memory = dict() self.actions_memory = dict() self.terminal_memory = None self.reward_memory = None self.memory_index = None self.episode_indices = None self.episode_count = None self.retrieve_indices = None super(Queue, self).__init__( states=states, internals=internals, actions=actions, include_next_states=include_next_states, scope=scope, summary_labels=summary_labels )
Queue memory. Args: capacity: Memory capacity.
juraj-google-style
def write_genotypes(self, genotypes): if (self._mode != 'w'): raise UnsupportedOperation("not available in 'r' mode") if (self._nb_values is None): self._nb_values = len(genotypes) if (self._nb_values != len(genotypes)): raise ValueError('{:,d} samples expected, got {:,d}'.format(self._nb_values, len(genotypes))) byte_array = [(((g[0] | (g[1] << 2)) | (g[2] << 4)) | (g[3] << 6)) for g in self._grouper((_byte_recode[geno] for geno in genotypes), 4)] self._bed.write(bytearray(byte_array))
Write genotypes to binary file. Args: genotypes (numpy.ndarray): The genotypes to write in the BED file.
codesearchnet
def node_exists(self, node_name, device_name=None): if not self._debug_graphs: raise LookupError('Nodes have not been loaded from partition graphs yet.') if device_name is not None and device_name not in self._debug_graphs: raise ValueError("The specified device_name '%s' cannot be found." % device_name) for _, debug_graph in self._debug_graphs.items(): if node_name in debug_graph.node_inputs: return True return False
Test if a node exists in the partition graphs. Args: node_name: (`str`) name of the node to be checked. device_name: optional device name. If None, will search for the node on all available devices. Otherwise, search for the node only on the given device. Returns: A boolean indicating whether the node exists. Raises: LookupError: If no partition graphs have been loaded yet. ValueError: If device_name is specified but cannot be found.
github-repos
def body(self, body: str): if body is not None and not isinstance(body, str): raise TypeError("'body' MUST be a string") self._body = body
Set body of the message Args: body (str): The body of the message
juraj-google-style
def _parse_target(target): if len(target) != 8: raise ArgumentError("Invalid targeting data length", expected=8, length=len(target)) slot, match_op = struct.unpack("<B6xB", target) if match_op == _MATCH_CONTROLLER: return {'controller': True, 'slot': 0} elif match_op == _MATCH_SLOT: return {'controller': False, 'slot': slot} raise ArgumentError("Unsupported complex targeting specified", match_op=match_op)
Parse a binary targeting information structure. This function only supports extracting the slot number or controller from the target and will raise an ArgumentError if more complicated targeting is desired. Args: target (bytes): The binary targeting data blob. Returns: dict: The parsed targeting data
juraj-google-style
def _FetchMostRecentGraphSeriesFromTheLegacyDB(label, report_type, token=None): try: stats_for_label = aff4.FACTORY.Open(GetAFF4ClientReportsURN().Add(label), aff4_type=aff4_stats.ClientFleetStats, mode='r', token=token) except aff4.InstantiationError: return None aff4_attr = _GetAFF4AttributeForReportType(report_type) graph_series = rdf_stats.ClientGraphSeries(report_type=report_type) if (aff4_attr.attribute_type == rdf_stats.GraphSeries): graphs = stats_for_label.Get(aff4_attr) if (graphs is None): return None for graph in graphs: graph_series.graphs.Append(graph) elif (aff4_attr.attribute_type == rdf_stats.Graph): graph = stats_for_label.Get(aff4_attr) if (graph is None): return None graph_series.graphs.Append(graph) else: raise AFF4AttributeTypeError(aff4_attr.attribute_type) return graph_series
Fetches the latest graph-series for a client label from the legacy DB. Args: label: Client label to fetch data for. report_type: rdf_stats.ClientGraphSeries.ReportType to fetch data for. token: ACL token to use for reading from the DB. Raises: AFF4AttributeTypeError: If an unexpected report-data type is encountered. Returns: The graph series for the given label and report type that was last written to the DB, or None if no series for that label and report-type exist.
codesearchnet
def recode(self, table: pd.DataFrame, validate=False) -> pd.DataFrame: raise NotImplementedError("This method must be defined for each subclass.")
Pass the appropriate columns through each recoder function sequentially and return the final result. Args: table (pd.DataFrame): A dataframe on which to apply recoding logic. validate (bool): If ``True``, recoded table must pass validation tests.
juraj-google-style
def cause_repertoire(self, mechanism, purview): if (not purview): return np.array([1.0]) if (not mechanism): return max_entropy_distribution(purview, self.tpm_size) purview = frozenset(purview) joint = np.ones(repertoire_shape(purview, self.tpm_size)) joint *= functools.reduce(np.multiply, [self._single_node_cause_repertoire(m, purview) for m in mechanism]) return distribution.normalize(joint)
Return the cause repertoire of a mechanism over a purview. Args: mechanism (tuple[int]): The mechanism for which to calculate the cause repertoire. purview (tuple[int]): The purview over which to calculate the cause repertoire. Returns: np.ndarray: The cause repertoire of the mechanism over the purview. .. note:: The returned repertoire is a distribution over purview node states, not the states of the whole network.
codesearchnet
def ResourcePath(package_name, filepath): if not getattr(sys, "frozen", None): target = _GetPkgResources(package_name, filepath) if target and os.access(target, os.R_OK): return target target = os.path.join(sys.prefix, filepath) if target and os.access(target, os.R_OK): return target return None
Computes a path to the specified package resource. Args: package_name: A name of the package where the resource is located. filepath: A path to the resource relative to the package location. Returns: A path to the resource or `None` if the resource cannot be found.
juraj-google-style
def AddFilesWithUnknownHashes( client_path_blob_refs, use_external_stores = True ): hash_id_blob_refs = dict() client_path_hash_id = dict() metadatas = dict() all_client_path_blob_refs = list() for client_path, blob_refs in iteritems(client_path_blob_refs): if len(blob_refs) <= 1: if blob_refs: hash_id = rdf_objects.SHA256HashID.FromBytes( blob_refs[0].blob_id.AsBytes()) else: hash_id = rdf_objects.SHA256HashID.FromData(b"") client_path_hash_id[client_path] = hash_id hash_id_blob_refs[hash_id] = blob_refs metadatas[hash_id] = FileMetadata( client_path=client_path, blob_refs=blob_refs) else: for blob_ref in blob_refs: all_client_path_blob_refs.append((client_path, blob_ref)) client_path_offset = collections.defaultdict(lambda: 0) client_path_sha256 = collections.defaultdict(hashlib.sha256) verified_client_path_blob_refs = collections.defaultdict(list) client_path_blob_ref_batches = collection.Batch( items=all_client_path_blob_refs, size=_BLOBS_READ_BATCH_SIZE) for client_path_blob_ref_batch in client_path_blob_ref_batches: blob_id_batch = set( blob_ref.blob_id for _, blob_ref in client_path_blob_ref_batch) blobs = data_store.BLOBS.ReadBlobs(blob_id_batch) for client_path, blob_ref in client_path_blob_ref_batch: blob = blobs[blob_ref.blob_id] if blob is None: message = "Could not find one of referenced blobs: {}".format( blob_ref.blob_id) raise BlobNotFoundError(message) offset = client_path_offset[client_path] if blob_ref.size != len(blob): raise ValueError( "Got conflicting size information for blob %s: %d vs %d." % (blob_ref.blob_id, blob_ref.size, len(blob))) if blob_ref.offset != offset: raise ValueError( "Got conflicting offset information for blob %s: %d vs %d." % (blob_ref.blob_id, blob_ref.offset, offset)) verified_client_path_blob_refs[client_path].append(blob_ref) client_path_offset[client_path] = offset + len(blob) client_path_sha256[client_path].update(blob) for client_path in iterkeys(client_path_sha256): sha256 = client_path_sha256[client_path].digest() hash_id = rdf_objects.SHA256HashID.FromBytes(sha256) client_path_hash_id[client_path] = hash_id hash_id_blob_refs[hash_id] = verified_client_path_blob_refs[client_path] data_store.REL_DB.WriteHashBlobReferences(hash_id_blob_refs) if use_external_stores: for client_path in iterkeys(verified_client_path_blob_refs): metadatas[client_path_hash_id[client_path]] = FileMetadata( client_path=client_path, blob_refs=verified_client_path_blob_refs[client_path]) EXTERNAL_FILE_STORE.AddFiles(metadatas) return client_path_hash_id
Adds new files consisting of given blob references. Args: client_path_blob_refs: A dictionary mapping `db.ClientPath` instances to lists of blob references. use_external_stores: A flag indicating if the files should also be added to external file stores. Returns: A dictionary mapping `db.ClientPath` to hash ids of the file. Raises: BlobNotFoundError: If one of the referenced blobs cannot be found.
juraj-google-style
def dvds_current_releases(self, **kwargs): path = self._get_path('dvds_current_releases') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Gets the upcoming movies from the API. Args: page_limit (optional): number of movies to show per page, default=16 page (optional): results page number, default=1 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def from_bytes(cls, bt): log.debug('Parsing email from bytes') if six.PY2: raise MailParserEnvironmentError('Parsing from bytes is valid only for Python 3.x version') message = email.message_from_bytes(bt) return cls(message)
Init a new object from bytes. Args: bt (bytes-like object): raw email as bytes-like object Returns: Instance of MailParser
codesearchnet
def _normalize_array(array, domain=(0, 1)): array = np.array(array) array = np.squeeze(array) assert (len(array.shape) <= 3) assert np.issubdtype(array.dtype, np.number) assert (not np.isnan(array).any()) (low, high) = (np.min(array), np.max(array)) if (domain is None): message = 'No domain specified, normalizing from measured (~%.2f, ~%.2f)' log.debug(message, low, high) domain = (low, high) if ((low < domain[0]) or (high > domain[1])): message = 'Clipping domain from (~{:.2f}, ~{:.2f}) to (~{:.2f}, ~{:.2f}).' log.info(message.format(low, high, domain[0], domain[1])) array = array.clip(*domain) (min_value, max_value) = (np.iinfo(np.uint8).min, np.iinfo(np.uint8).max) if np.issubdtype(array.dtype, np.inexact): offset = domain[0] if (offset != 0): array -= offset log.debug('Converting inexact array by subtracting -%.2f.', offset) scalar = (max_value / (domain[1] - domain[0])) if (scalar != 1): array *= scalar log.debug('Converting inexact array by scaling by %.2f.', scalar) return array.clip(min_value, max_value).astype(np.uint8)
Given an arbitrary rank-3 NumPy array, produce one representing an image. This ensures the resulting array has a dtype of uint8 and a domain of 0-255. Args: array: NumPy array representing the image domain: expected range of values in array, defaults to (0, 1), if explicitly set to None will use the array's own range of values and normalize them. Returns: normalized PIL.Image
codesearchnet
def normalize(image: np.ndarray, mean: Union[float, Collection[float]], std: Union[float, Collection[float]], data_format: Optional[ChannelDimension]=None, input_data_format: Optional[Union[str, ChannelDimension]]=None) -> np.ndarray: if not isinstance(image, np.ndarray): raise ValueError('image must be a numpy array') if input_data_format is None: input_data_format = infer_channel_dimension_format(image) channel_axis = get_channel_dimension_axis(image, input_data_format=input_data_format) num_channels = image.shape[channel_axis] if not np.issubdtype(image.dtype, np.floating): image = image.astype(np.float32) if isinstance(mean, Collection): if len(mean) != num_channels: raise ValueError(f'mean must have {num_channels} elements if it is an iterable, got {len(mean)}') else: mean = [mean] * num_channels mean = np.array(mean, dtype=image.dtype) if isinstance(std, Collection): if len(std) != num_channels: raise ValueError(f'std must have {num_channels} elements if it is an iterable, got {len(std)}') else: std = [std] * num_channels std = np.array(std, dtype=image.dtype) if input_data_format == ChannelDimension.LAST: image = (image - mean) / std else: image = ((image.T - mean) / std).T image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image return image
Normalizes `image` using the mean and standard deviation specified by `mean` and `std`. image = (image - mean) / std Args: image (`np.ndarray`): The image to normalize. mean (`float` or `Collection[float]`): The mean to use for normalization. std (`float` or `Collection[float]`): The standard deviation to use for normalization. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output image. If unset, will use the inferred format from the input. input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input.
github-repos
def batch_decode(self, sequences): char_preds, bpe_preds, wp_preds = sequences batch_size = char_preds.size(0) char_strs, char_scores = self._decode_helper(char_preds, 'char') bpe_strs, bpe_scores = self._decode_helper(bpe_preds, 'bpe') wp_strs, wp_scores = self._decode_helper(wp_preds, 'wp') final_strs = [] final_scores = [] for i in range(batch_size): scores = [char_scores[i], bpe_scores[i], wp_scores[i]] strs = [char_strs[i], bpe_strs[i], wp_strs[i]] max_score_index = scores.index(max(scores)) final_strs.append(strs[max_score_index]) final_scores.append(scores[max_score_index]) out = {} out['generated_text'] = final_strs out['scores'] = final_scores out['char_preds'] = char_strs out['bpe_preds'] = bpe_strs out['wp_preds'] = wp_strs return out
Convert a list of lists of token ids into a list of strings by calling decode. Args: sequences (`torch.Tensor`): List of tokenized input ids. Returns: `Dict[str, any]`: Dictionary of all the outputs of the decoded results. generated_text (`List[str]`): The final results after fusion of char, bpe, and wp. scores (`List[float]`): The final scores after fusion of char, bpe, and wp. char_preds (`List[str]`): The list of character decoded sentences. bpe_preds (`List[str]`): The list of bpe decoded sentences. wp_preds (`List[str]`): The list of wp decoded sentences. This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information.
github-repos
def create_local_server(config=None, start=True): return Server({'localhost': ['localhost:0']}, protocol='grpc', config=config, start=start)
Creates a new single-process cluster running on the local host. This method is a convenience wrapper for creating a `tf.distribute.Server` with a `tf.train.ServerDef` that specifies a single-process cluster containing a single task in a job called `"local"`. Args: config: (Options.) A `tf.compat.v1.ConfigProto` that specifies default configuration options for all sessions that run on this server. start: (Optional.) Boolean, indicating whether to start the server after creating it. Defaults to `True`. Returns: A local `tf.distribute.Server`.
github-repos
def _translate_fhir_path_expression(self, builder: expressions.Builder) -> Tuple[Optional[str], Optional[str]]: try: result = self._bq_interpreter.visit(builder.node, use_resource_alias=False) expression = f'{result.as_operand()}' expression_as_array = f'ARRAY(SELECT {result.sql_alias}\nFROM {result.to_subquery()}\nWHERE {result.sql_alias} IS NOT NULL)' return (expression, expression_as_array) except Exception as e: self._error_reporter.report_fhir_path_error(self._abs_path_invocation(builder), str(builder), self._error_message_for_exception(e)) return (None, None)
Returns a tuple containing both the SQL translation of a FHIRPath expression with array wrapping and the SQL translation without array wrapping. If an error is encountered during encoding, the associated error reporter will be notified, and this method will return [`None`, `None`]. Args: builder: Builder containing the information to be encoded to Standard SQL. Returns: A tuple (expression, expression_as_array) where `expression` is the SQL translation of the FHIRPath expression without array wrapping and `expression_as_array` is the SQL translation with array wrapping.
github-repos
def _validate_isvalid_history(self, isvalid_history, field, value): history_type = value['type'] if history_type.endswith('emission'): history_type = 'emission' elif history_type.endswith('absorption'): history_type = 'absorption' quantity = (1.0 * units(value['quantity']['units'])) try: quantity.to(property_units[history_type]) except pint.DimensionalityError: self._error(field, ('incompatible units; should be consistent with ' + property_units[history_type])) time = (1.0 * units(value['time']['units'])) try: time.to(property_units['time']) except pint.DimensionalityError: self._error(field, ('incompatible units; should be consistent with ' + property_units['time'])) n_cols = len(value['values'][0]) max_cols = (max(value['time']['column'], value['quantity']['column'], value.get('uncertainty', {}).get('column', 0)) + 1) if (n_cols > max_cols): self._error(field, 'too many columns in the values') elif (n_cols < max_cols): self._error(field, 'not enough columns in the values')
Checks that the given time history is properly formatted. Args: isvalid_history (`bool`): flag from schema indicating units to be checked. field (`str`): property associated with history in question. value (`dict`): dictionary of values from file associated with this property. The rule's arguments are validated against this schema: {'isvalid_history': {'type': 'bool'}, 'field': {'type': 'str'}, 'value': {'type': 'dict'}}
codesearchnet
def _suppression_loop_body(boxes, iou_threshold, output_size, idx, tile_size): with ops.name_scope('suppression_loop_body'): num_tiles = array_ops.shape(boxes)[1] batch_size = array_ops.shape(boxes)[0] def cross_suppression_func(boxes, box_slice, iou_threshold, inner_idx): return _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size) box_slice = array_ops.slice(boxes, [0, idx * tile_size, 0], [batch_size, tile_size, 4]) _, box_slice, _, _ = while_loop.while_loop(lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx, cross_suppression_func, [boxes, box_slice, iou_threshold, constant_op.constant(0)]) iou = _bbox_overlap(box_slice, box_slice) mask = array_ops.expand_dims(array_ops.reshape(math_ops.range(tile_size), [1, -1]) > array_ops.reshape(math_ops.range(tile_size), [-1, 1]), 0) iou *= math_ops.cast(math_ops.logical_and(mask, iou >= iou_threshold), iou.dtype) suppressed_iou, _, _, _ = while_loop.while_loop(lambda _iou, loop_condition, _iou_sum, _: loop_condition, _self_suppression, [iou, constant_op.constant(True), math_ops.reduce_sum(iou, [1, 2]), iou_threshold]) suppressed_box = math_ops.reduce_sum(suppressed_iou, 1) > 0 box_slice *= array_ops.expand_dims(1.0 - math_ops.cast(suppressed_box, box_slice.dtype), 2) mask = array_ops.reshape(math_ops.cast(math_ops.equal(math_ops.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1]) boxes = array_ops.tile(array_ops.expand_dims(box_slice, [1]), [1, num_tiles, 1, 1]) * mask + array_ops.reshape(boxes, [batch_size, num_tiles, tile_size, 4]) * (1 - mask) boxes = array_ops.reshape(boxes, [batch_size, -1, 4]) output_size += math_ops.reduce_sum(math_ops.cast(math_ops.reduce_any(box_slice > 0, [2]), dtypes.int32), [1]) return (boxes, iou_threshold, output_size, idx + 1)
Process boxes in the range [idx*tile_size, (idx+1)*tile_size). Args: boxes: a tensor with a shape of [batch_size, anchors, 4]. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IOU. output_size: an int32 tensor of size [batch_size]. Representing the number of selected boxes for each batch. idx: an integer scalar representing induction variable. tile_size: an integer representing the number of boxes in a tile Returns: boxes: updated boxes. iou_threshold: pass down iou_threshold to the next iteration. output_size: the updated output_size. idx: the updated induction variable.
github-repos
def download_structure(pdb_id, file_type, outdir='', only_header=False, force_rerun=False): pdb_id = pdb_id.lower() file_type = file_type.lower() file_types = ['pdb', 'pdb.gz', 'mmcif', 'cif', 'cif.gz', 'xml.gz', 'mmtf', 'mmtf.gz'] if (file_type not in file_types): raise ValueError('Invalid file type, must be either: pdb, pdb.gz, cif, cif.gz, xml.gz, mmtf, mmtf.gz') if (file_type == 'mmtf'): file_type = 'mmtf.gz' if file_type.endswith('.gz'): gzipped = True else: gzipped = False if (file_type == 'mmcif'): file_type = 'cif' if only_header: folder = 'header' outfile = op.join(outdir, '{}.header.{}'.format(pdb_id, file_type)) else: folder = 'download' outfile = op.join(outdir, '{}.{}'.format(pdb_id, file_type)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): if ((file_type == 'mmtf.gz') or (file_type == 'mmtf')): mmtf_api = '1.0' download_link = 'http: else: download_link = 'http: urlretrieve(download_link, outfile) if gzipped: outfile = ssbio.utils.gunzip_file(infile=outfile, outfile=outfile.strip('.gz'), outdir=outdir, delete_original=False, force_rerun_flag=force_rerun) log.debug('{}: saved structure file'.format(outfile)) else: if (file_type == 'mmtf.gz'): outfile = op.join(outdir, '{}.{}'.format(pdb_id, 'mmtf')) log.debug('{}: structure file already saved'.format(outfile)) return outfile
Download a structure from the RCSB PDB by ID. Specify the file type desired. Args: pdb_id: PDB ID file_type: pdb, pdb.gz, mmcif, cif, cif.gz, xml.gz, mmtf, mmtf.gz outdir: Optional output directory only_header: If only the header file should be downloaded force_rerun: If the file should be downloaded again even if it exists Returns: str: Path to outfile
codesearchnet
def kill(self, container, signal=None): url = self._url('/containers/{0}/kill', container) params = {} if (signal is not None): if (not isinstance(signal, six.string_types)): signal = int(signal) params['signal'] = signal res = self._post(url, params=params) self._raise_for_status(res)
Kill a container or send a signal to a container. Args: container (str): The container to kill signal (str or int): The signal to send. Defaults to ``SIGKILL`` Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def streaming_restore(status, session=None): if context.executing_eagerly(): return if session is None: session = get_session() if isinstance(status, NameBasedSaverStatus): raise NotImplementedError('Streaming restore not supported from name-based checkpoints when graph building. File a feature request if this limitation bothers you. As a workaround, consider either using tf.train.Checkpoint to load name-based checkpoints or enabling eager execution.') status.run_restore_ops(session=session) status._checkpoint.new_restore_ops_callback = lambda ops: session.run(ops, feed_dict=status._feed_dict)
When graph building, runs restore ops as soon as they come in. Args: status: A _LoadStatus objects from an object-based saver's restore(). Streaming restore from name-based checkpoints is not currently supported. session: A session to run new restore ops in.
github-repos
def __init__(self, element_id, driver): self.element_id = str(element_id) self._driver = driver
Initialize the WebElement Args: element_id(str): The UDID returned by remote servers. driver(WebDriver): The WebDriver Object.
juraj-google-style
def __init__(self, sink: DataSink, store_type: Type[S], transform: Callable[[T], S]) -> None: self._sink = sink self._store_type = store_type self._transform = transform
Initializes a handler for a data sink. Args: sink: The data sink. store_type: ??? transform: ???
juraj-google-style
def user_avatar_url(username, size=64, default='retro'): openid = 'http: return libravatar_url(openid=openid, size=size, default=default)
Get the avatar URL of the provided Fedora username. The URL is returned from the Libravatar service. Args: username (str): The username to get the avatar of. size (int): Size of the avatar in pixels (it's a square). default (str): Default avatar to return if not found. Returns: str: The URL to the avatar image.
codesearchnet
def print_stats(self, reset=True): if (not self.ncalls): return stats = self.stats code = self.fn.__code__ print('--- Function Profiling ---') print('File "{}", line {}, function {}'.format(code.co_filename, code.co_firstlineno, self.fn.__name__)) stats.sort_stats(*self.sort_keys) stats.print_stats(*self.print_restrictions) print('--------------------------') if reset: self.reset_stats()
Manually print profiling result. Args: reset (bool): If False is specified, the profiling statistics so far is maintained. If ``True`` (default), :obj:`~reset_stats` is called to reset the profiling statistics.
codesearchnet
def ephemeris(self, **kwargs): for orb in self.iter(inclusive=True, **kwargs): (yield orb)
Generator giving the propagation of the orbit at different dates Args: start (Date) stop (Date or timedelta) step (timedelta) Yield: Orbit
codesearchnet
def create_predictable_zip(path): if os.path.isdir(path): paths = [] for root, directories, filenames in os.walk(path): paths += [os.path.join(root, filename)[len(path)+1:] for filename in filenames] reader = lambda x: _read_file(os.path.join(path, x)) elif os.path.isfile(path) and os.path.splitext(path)[1] == ".zip": inputzip = zipfile.ZipFile(path) paths = inputzip.namelist() reader = lambda x: inputzip.read(x) else: raise Exception("The `path` must either point to a directory or to a zip file.") zippathfd, zippath = tempfile.mkstemp(suffix=".zip") with zipfile.ZipFile(zippath, "w") as outputzip: for filepath in sorted(paths): write_file_to_zip_with_neutral_metadata(outputzip, filepath, reader(filepath)) os.fdopen(zippathfd).close() return zippath
Create a zip file with predictable sort order and metadata so that MD5 will stay consistent if zipping the same content twice. Args: path (str): absolute path either to a directory to zip up, or an existing zip file to convert. Returns: path (str) to the output zip file
juraj-google-style
def tokeninfo(self, jwt): warnings.warn('/tokeninfo will be deprecated in future releases', DeprecationWarning) return self.post(url='https:
Returns user profile based on the user's jwt Validates a JSON Web Token (signature and expiration) and returns the user information associated with the user id (sub property) of the token. Args: jwt (str): User's jwt Returns: The user profile.
codesearchnet
def make_pool_tests(pool_op_in, allow_fully_quantize=False): pool_op = pool_op_in def f(options, expected_tf_failures=0): test_parameters = [{'ksize': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'strides': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'input_shape': [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'fully_quantize': [False], 'quant_16x8': [False]}, {'ksize': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'strides': [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]], 'input_shape': [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'fully_quantize': [True], 'quant_16x8': [False]}, {'ksize': [[1, 1, 1, 1]], 'strides': [[1, 1, 1, 1]], 'input_shape': [[1, 1, 1, 1]], 'padding': ['SAME', 'VALID'], 'data_format': ['NHWC'], 'fully_quantize': [True], 'quant_16x8': [True]}] if not allow_fully_quantize: test_parameters = [test_parameter for test_parameter in test_parameters if True not in test_parameter['fully_quantize']] def build_graph(parameters): input_tensor = tf.compat.v1.placeholder(dtype=tf.float32, name='input', shape=parameters['input_shape']) out = pool_op(input_tensor, ksize=parameters['ksize'], strides=parameters['strides'], data_format=parameters['data_format'], padding=parameters['padding']) return ([input_tensor], [out]) def build_inputs(parameters, sess, inputs, outputs): if allow_fully_quantize: input_values = create_tensor_data(tf.float32, parameters['input_shape'], min_value=-1, max_value=1) else: input_values = create_tensor_data(tf.float32, parameters['input_shape']) return ([input_values], sess.run(outputs, feed_dict=dict(zip(inputs, [input_values])))) make_zip_of_tests(options, test_parameters, build_graph, build_inputs, expected_tf_failures=expected_tf_failures) return f
Make a set of tests to do average pooling. Args: pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool2d`. allow_fully_quantize: bool, whether fully_quantize is allowed. Returns: A function representing the true generator (after curried pool_op_in).
github-repos
def dict_get_path(data, path, default=None): keys = path.split(".") for k in keys: if type(data) == list: found = False for item in data: name = item.get("name", item.get("type")) if name == k: found = True data = item break if not found: return default elif type(data) == dict: if k in data: data = data[k] else: return default else: return default return data
Returns the value inside nested structure of data located at period delimited path When traversing a list, as long as that list is containing objects of type dict, items in that list will have their "name" and "type" values tested against the current key in the path. Args: data (dict or list): data to traverse path (str): '.' delimited string Kwargs: default: value to return if path does not exist
juraj-google-style
def NeedsCustomDescription(component): type_ = type(component) if type_ in (str, int, bytes) or type_ in (float, complex, bool) or type_ in (dict, tuple, list, set, frozenset): return True return False
Whether the component should use a custom description and summary. Components of primitive type, such as ints, floats, dicts, lists, and others have messy builtin docstrings. These are inappropriate for display as descriptions and summaries in a CLI. This function determines whether the provided component has one of these docstrings. Note that an object such as `int` has the same docstring as an int like `3`. The docstring is OK for `int`, but is inappropriate as a docstring for `3`. Args: component: The component of interest. Returns: Whether the component should use a custom description and summary.
github-repos
class FocalNetLayer(nn.Module): def __init__(self, config, index, dim, input_resolution, drop_path=0.0): super().__init__() self.config = config self.dim = dim self.input_resolution = input_resolution self.drop = config.hidden_dropout_prob self.use_post_layernorm = config.use_post_layernorm self.norm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.modulation = FocalNetModulation(config=config, index=index, dim=dim, projection_dropout=self.drop) self.drop_path = FocalNetDropPath(drop_path) if drop_path > 0.0 else nn.Identity() self.norm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps) mlp_hidden_dim = int(dim * config.mlp_ratio) self.mlp = FocalNetMlp(config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=self.drop) self.gamma_1 = 1.0 self.gamma_2 = 1.0 if config.use_layerscale: self.gamma_1 = nn.Parameter(config.layerscale_value * torch.ones(dim), requires_grad=True) self.gamma_2 = nn.Parameter(config.layerscale_value * torch.ones(dim), requires_grad=True) def forward(self, hidden_state, input_dimensions): height, width = input_dimensions batch_size, _, num_channels = hidden_state.shape shortcut = hidden_state hidden_state = hidden_state if self.use_post_layernorm else self.norm1(hidden_state) hidden_state = hidden_state.view(batch_size, height, width, num_channels) hidden_state = self.modulation(hidden_state).view(batch_size, height * width, num_channels) hidden_state = hidden_state if not self.use_post_layernorm else self.norm1(hidden_state) hidden_state = shortcut + self.drop_path(self.gamma_1 * hidden_state) hidden_state = hidden_state + self.drop_path(self.gamma_2 * (self.norm2(self.mlp(hidden_state)) if self.use_post_layernorm else self.mlp(self.norm2(hidden_state)))) return hidden_state
Focal Modulation Network layer (block). Args: config (`FocalNetConfig`): Model config. index (`int`): Layer index. dim (`int`): Number of input channels. input_resolution (`Tuple[int]`): Input resolution. drop_path (`float`, *optional*, defaults to 0.0): Stochastic depth rate.
github-repos
def add_email(self, email_path, source, reference, method='', upload_type='raw', campaign='', confidence='', description='', bucket_list=[], password=''): if (not os.path.isfile(email_path)): log.error('{} is not a file'.format(email_path)) return None with open(email_path, 'rb') as fdata: data = {'api_key': self.api_key, 'username': self.username, 'source': source, 'reference': reference, 'method': method, 'upload_type': upload_type, 'campaign': campaign, 'confidence': confidence, 'bucket_list': bucket_list, 'description': description} if password: data['password'] = password r = requests.post('{0}/emails/'.format(self.url), data=data, files={'filedata': fdata}, verify=self.verify, proxies=self.proxies) if (r.status_code == 200): result_data = json.loads(r.text) return result_data else: print('Error with status code {0} and message {1}'.format(r.status_code, r.text)) return None
Add an email object to CRITs. Only RAW, MSG, and EML are supported currently. Args: email_path: The path on disk of the email. source: Source of the information reference: A reference where more information can be found method: The method for obtaining the email. upload_type: 'raw', 'eml', or 'msg' campaign: An associated campaign confidence: The campaign confidence description: A description of the email bucket_list: A list of bucket list items to add password: A password for a 'msg' type. Returns: A JSON email object from CRITs or None if there was an error.
codesearchnet
def __init__(self, engine): super(StatikJinjaTemplateProvider, self).__init__(engine) project = engine.project logger.debug("Instantiating Jinja2 template provider") self.templatetags_path = os.path.join(project.path, project.TEMPLATETAGS_DIR) if os.path.exists(self.templatetags_path) and os.path.isdir(self.templatetags_path): import_python_modules_by_path(self.templatetags_path) extensions = [ 'statik.jinja2ext.StatikUrlExtension', 'statik.jinja2ext.StatikAssetExtension', 'statik.jinja2ext.StatikLoremIpsumExtension', 'statik.jinja2ext.StatikTemplateTagsExtension', 'jinja2.ext.do', 'jinja2.ext.loopcontrols', 'jinja2.ext.with_', 'jinja2.ext.autoescape', ] jinja2_config = project.config.vars.get('jinja2', dict()) extensions.extend(jinja2_config.get('extensions', list())) self.env = jinja2.Environment( loader=jinja2.FileSystemLoader( engine.template_paths, encoding=project.config.encoding ), extensions=extensions ) if templatetags.store.filters: logger.debug( "Loaded custom template tag filters: %s", ", ".join(templatetags.store.filters) ) self.env.filters.update(templatetags.store.filters) self.env.statik_views = project.views self.env.statik_base_url = project.config.base_path self.env.statik_base_asset_url = add_url_path_component( project.config.base_path, project.config.assets_dest_path )
Constructor. Args: engine: The StatikTemplateEngine to which this template provider belongs.
juraj-google-style
def PrivateKeyFromNEP2(nep2_key, passphrase): if not nep2_key or len(nep2_key) != 58: raise ValueError('Please provide a nep2_key with a length of 58 bytes (LEN: {0:d})'.format(len(nep2_key))) ADDRESS_HASH_SIZE = 4 ADDRESS_HASH_OFFSET = len(NEP_FLAG) + len(NEP_HEADER) try: decoded_key = base58.b58decode_check(nep2_key) except Exception as e: raise ValueError("Invalid nep2_key") address_hash = decoded_key[ADDRESS_HASH_OFFSET:ADDRESS_HASH_OFFSET + ADDRESS_HASH_SIZE] encrypted = decoded_key[-32:] pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8') derived = scrypt.hash(pwd_normalized, address_hash, N=SCRYPT_ITERATIONS, r=SCRYPT_BLOCKSIZE, p=SCRYPT_PARALLEL_FACTOR, buflen=SCRYPT_KEY_LEN_BYTES) derived1 = derived[:32] derived2 = derived[32:] cipher = AES.new(derived2, AES.MODE_ECB) decrypted = cipher.decrypt(encrypted) private_key = xor_bytes(decrypted, derived1) kp_new = KeyPair(priv_key=private_key) kp_new_address = kp_new.GetAddress() kp_new_address_hash_tmp = hashlib.sha256(kp_new_address.encode("utf-8")).digest() kp_new_address_hash_tmp2 = hashlib.sha256(kp_new_address_hash_tmp).digest() kp_new_address_hash = kp_new_address_hash_tmp2[:4] if (kp_new_address_hash != address_hash): raise ValueError("Wrong passphrase") return private_key
Gets the private key from a NEP-2 encrypted private key Args: nep2_key (str): The nep-2 encrypted private key passphrase (str): The password to encrypt the private key with, as unicode string Returns: bytes: The private key
juraj-google-style
def is_native_ion_gate(gate: ops.Gate) -> bool: return isinstance(gate, (ops.XXPowGate, ops.MeasurementGate, ops.XPowGate, ops.YPowGate, ops.ZPowGate))
Check if a gate is a native ion gate. Args: gate: Input gate. Returns: True if the gate is native to the ion, false otherwise.
juraj-google-style
def _relative_attention_inner(x, y, z, transpose): batch_size = tf.shape(x)[0] heads = x.get_shape().as_list()[1] length = tf.shape(x)[2] xy_matmul = tf.matmul(x, y, transpose_b=transpose) x_t = tf.transpose(x, [2, 0, 1, 3]) x_t_r = tf.reshape(x_t, [length, (heads * batch_size), (- 1)]) x_tz_matmul = tf.matmul(x_t_r, z, transpose_b=transpose) x_tz_matmul_r = tf.reshape(x_tz_matmul, [length, batch_size, heads, (- 1)]) x_tz_matmul_r_t = tf.transpose(x_tz_matmul_r, [1, 2, 0, 3]) return (xy_matmul + x_tz_matmul_r_t)
Relative position-aware dot-product attention inner calculation. This batches matrix multiply calculations to avoid unnecessary broadcasting. Args: x: Tensor with shape [batch_size, heads, length or 1, length or depth]. y: Tensor with shape [batch_size, heads, length or 1, depth]. z: Tensor with shape [length or 1, length, depth]. transpose: Whether to transpose inner matrices of y and z. Should be true if last dimension of x is depth, not length. Returns: A Tensor with shape [batch_size, heads, length, length or depth].
codesearchnet
def build_info_string(info): info_list = [] for annotation in info: if info[annotation]: info_list.append('='.join([annotation, ','.join(info[annotation])])) else: info_list.append(annotation) return ';'.join(info_list)
Build a new vcf INFO string based on the information in the info_dict. The info is a dictionary with vcf info keys as keys and lists of vcf values as values. If there is no value False is value in info Args: info (dict): A dictionary with information from the vcf file Returns: String: A string that is on the proper vcf format for the INFO column
juraj-google-style
def _get_ref_args(self, node): op_def = op_def_registry.get(node.op) if op_def is None: return [] ref_args = [] for i, output_arg in enumerate(op_def.output_arg): if output_arg.is_ref: arg_name = node.name if i == 0 else '%s:%d' % (node.name, i) ref_args.append(arg_name) return ref_args
Determine whether an input of an op is ref-type. Args: node: A `NodeDef`. Returns: A list of the arg names (as strs) that are ref-type.
github-repos
async def puts(self, items, seqn=None): size = 0 for chunk in s_common.chunks(items, 1000): metrics = self._items.save(chunk) self._metrics.add(metrics) (await self.fire('cryotank:puts', numrecords=len(chunk))) size += len(chunk) (await asyncio.sleep(0)) if (seqn is not None): (iden, offs) = seqn self.setOffset(iden, (offs + size)) return size
Add the structured data from items to the CryoTank. Args: items (list): A list of objects to store in the CryoTank. seqn (iden, offs): An iden / offset pair to record. Returns: int: The ending offset of the items or seqn.
codesearchnet
def visualize_conv_activations(activation, name): import math with tf.name_scope('visualize_act_' + name): _, h, w, c = activation.get_shape().as_list() rows = [] c_per_row = int(math.sqrt(c)) for y in range(0, c - c_per_row, c_per_row): row = activation[:, :, :, y:y + c_per_row] cols = tf.unstack(row, axis=3) row = tf.concat(cols, 1) rows.append(row) viz = tf.concat(rows, 2) tf.summary.image('visualize_act_' + name, tf.expand_dims(viz, -1))
Visualize activations for convolution layers. Remarks: This tries to place all activations into a square. Args: activation: tensor with the activation [B,H,W,C] name: label for tensorboard Returns: image of almost all activations
juraj-google-style
def memory_read32(self, addr, num_words, zone=None): return self.memory_read(addr, num_words, zone=zone, nbits=32)
Reads memory from the target system in units of 32-bits. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_words (int): number of words to read zone (str): memory zone to read from Returns: List of words read from the target system. Raises: JLinkException: if memory could not be read
codesearchnet
def _process_health_pill_value(self, wall_time, step, device_name, output_slot, node_name, tensor_proto, node_name_set=None): if (node_name_set and (node_name not in node_name_set)): return None elements = list(tensor_util.make_ndarray(tensor_proto)) return HealthPillEvent(wall_time=wall_time, step=step, device_name=device_name, output_slot=output_slot, node_name=node_name, dtype=repr(tf.as_dtype(elements[12])), shape=elements[14:], value=elements)
Creates a HealthPillEvent containing various properties of a health pill. Args: wall_time: The wall time in seconds. step: The session run step of the event. device_name: The name of the node's device. output_slot: The numeric output slot. node_name: The name of the node (without the output slot). tensor_proto: A tensor proto of data. node_name_set: An optional set of node names that are relevant. If not provided, no filtering by relevance occurs. Returns: An event_accumulator.HealthPillEvent. Or None if one could not be created.
codesearchnet
def build_vocab(self, texts, verbose=1, **kwargs): if self.has_vocab: logger.warn( "Tokenizer already has existing vocabulary. Overriding and building new vocabulary.") progbar = Progbar(len(texts), verbose=verbose, interval=0.25) count_tracker = utils._CountTracker() self._token_counts.clear() self._num_texts = len(texts) for token_data in self.token_generator(texts, **kwargs): indices, token = token_data[:-1], token_data[-1] count_tracker.update(indices) self._token_counts[token] += 1 progbar.update(indices[0]) self.create_token_indices(self._token_counts.keys()) count_tracker.finalize() self._counts = count_tracker.counts progbar.update(len(texts))
Builds the internal vocabulary and computes various statistics. Args: texts: The list of text items to encode. verbose: The verbosity level for progress. Can be 0, 1, 2. (Default value = 1) **kwargs: The kwargs for `token_generator`.
juraj-google-style
def on_smart_contract_created(self, sc_event: SmartContractEvent): if isinstance(sc_event.contract, ContractState): if not sc_event.test_mode: sc_event.CheckIsNEP5() if sc_event.token: self._new_contracts_to_write.append(sc_event)
Listener for SmartContractEvent Args: sc_event (SmartContractEvent): event to check and see if it contains NEP5Token created
juraj-google-style
def _extract_hunt_results(self, output_file_path): collection_paths = [] client_ids = set() client_id_to_fqdn = {} hunt_dir = None try: with zipfile.ZipFile(output_file_path) as archive: items = archive.infolist() for f in items: if (not hunt_dir): hunt_dir = f.filename.split('/')[0] if (f.filename.split('/')[(- 1)] == 'client_info.yaml'): (client_id, fqdn) = self._get_client_fqdn(archive.read(f)) client_id_to_fqdn[client_id] = fqdn continue client_id = f.filename.split('/')[1] if client_id.startswith('C.'): if (client_id not in client_ids): client_directory = os.path.join(self.output_path, hunt_dir, client_id) collection_paths.append((client_id, client_directory)) client_ids.add(client_id) try: archive.extract(f, self.output_path) except KeyError as exception: print('Extraction error: {0:s}'.format(exception)) return [] except OSError as exception: msg = 'Error manipulating file {0:s}: {1!s}'.format(output_file_path, exception) self.state.add_error(msg, critical=True) return [] except zipfile.BadZipfile as exception: msg = 'Bad zipfile {0:s}: {1!s}'.format(output_file_path, exception) self.state.add_error(msg, critical=True) return [] try: os.remove(output_file_path) except OSError as exception: print('Output path {0:s} could not be removed: {1:s}'.format(output_file_path, exception)) fqdn_collection_paths = [] for (client_id, path) in collection_paths: fqdn = client_id_to_fqdn.get(client_id, client_id) fqdn_collection_paths.append((fqdn, path)) if (not fqdn_collection_paths): self.state.add_error('Nothing was extracted from the hunt archive', critical=True) return [] return fqdn_collection_paths
Open a hunt output archive and extract files. Args: output_file_path: The path where the hunt archive is downloaded to. Returns: list: tuples containing: str: The name of the client from where the files were downloaded. str: The directory where the files were downloaded to.
codesearchnet
def prod(x, axis=None, keepdims=False): from .function_bases import prod as prod_base if (axis is None): axis = range(x.ndim) elif (not hasattr(axis, '__iter__')): axis = [axis] return prod_base(x, axis, keepdims)
Reduction along axes with product operation. Args: x (Variable): An input variable. axis (None, int or tuple of ints): Axis or axes along which product is calculated. Passing the default value `None` will reduce all dimensions. keepdims (bool): Flag whether the reduced axes are kept as a dimension with 1 element. Returns: ~nnabla.Variable: N-D array. Note: Backward computation is not accurate in a zero value input.
codesearchnet
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool=True): with open(json_file_path, 'w', encoding='utf-8') as writer: writer.write(self.to_json_string(use_diff=use_diff))
Save this instance to a JSON file. Args: json_file_path (`str` or `os.PathLike`): Path to the JSON file in which this configuration instance's parameters will be saved. use_diff (`bool`, *optional*, defaults to `True`): If set to `True`, only the difference between the config instance and the default `PretrainedConfig()` is serialized to JSON file.
github-repos
def _get_task_id(source): if type(source) is ray.actor.ActorHandle: return source._ray_actor_id else: if type(source) is ray.TaskID: return source else: return ray._raylet.compute_task_id(source)
Return the task id associated to the generic source of the signal. Args: source: source of the signal, it can be either an object id returned by a task, a task id, or an actor handle. Returns: - If source is an object id, return id of task which creted object. - If source is an actor handle, return id of actor's task creator. - If source is a task id, return same task id.
juraj-google-style
def squad_v2_exact_match(y_true: List[List[str]], y_predicted: List[str]) -> float: EM_total = sum(normalize_answer(prediction) in map(normalize_answer, ground_truth) for ground_truth, prediction in zip(y_true, y_predicted)) return 100 * EM_total / len(y_true) if len(y_true) > 0 else 0
Calculates Exact Match score between y_true and y_predicted EM score uses the best matching y_true answer: if y_pred equal at least to one answer in y_true then EM = 1, else EM = 0 The same as in SQuAD-v2.0 Args: y_true: list of correct answers (correct answers are represented by list of strings) y_predicted: list of predicted answers Returns: exact match score : float
juraj-google-style
def _destructively_move(self, dest_doc): if dest_doc is self: raise RuntimeError("Attempted to overwrite a document with itself") dest_doc.clear() roots = [] self._push_all_models_freeze() try: while self.roots: r = next(iter(self.roots)) self.remove_root(r) roots.append(r) finally: self._pop_all_models_freeze() for r in roots: if r.document is not None: raise RuntimeError("Somehow we didn't detach %r" % (r)) if len(self._all_models) != 0: raise RuntimeError("_all_models still had stuff in it: %r" % (self._all_models)) for r in roots: dest_doc.add_root(r) dest_doc.title = self.title
Move all data in this doc to the dest_doc, leaving this doc empty. Args: dest_doc (Document) : The Bokeh document to populate with data from this one Returns: None
juraj-google-style
def movie(self, **kwargs): path = self._get_path('movie') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Search for movies by title. Args: query: CGI escpaed string. page: (optional) Minimum value of 1. Expected value is an integer. language: (optional) ISO 639-1 code. include_adult: (optional) Toggle the inclusion of adult titles. Expected value is True or False. year: (optional) Filter the results release dates to matches that include this value. primary_release_year: (optional) Filter the results so that only the primary release dates have this value. search_type: (optional) By default, the search type is 'phrase'. This is almost guaranteed the option you will want. It's a great all purpose search type and by far the most tuned for every day querying. For those wanting more of an "autocomplete" type search, set this option to 'ngram'. Returns: A dict respresentation of the JSON returned from the API.
codesearchnet
def remove(self, **kwargs): return self.client.api.remove_container(self.id, **kwargs)
Remove this container. Similar to the ``docker rm`` command. Args: v (bool): Remove the volumes associated with the container link (bool): Remove the specified link and not the underlying container force (bool): Force the removal of a running container (uses ``SIGKILL``) Raises: :py:class:`docker.errors.APIError` If the server returns an error.
codesearchnet
def _maybe_refresh_metadata(self, wakeup=False): ttl = self.cluster.ttl() wait_for_in_progress_ms = (self.config['request_timeout_ms'] if self._metadata_refresh_in_progress else 0) metadata_timeout = max(ttl, wait_for_in_progress_ms) if (metadata_timeout > 0): return metadata_timeout node_id = self.least_loaded_node() if (node_id is None): log.debug('Give up sending metadata request since no node is available') return self.config['reconnect_backoff_ms'] if self._can_send_request(node_id): topics = list(self._topics) if ((not topics) and self.cluster.is_bootstrap(node_id)): topics = list(self.config['bootstrap_topics_filter']) if (self.cluster.need_all_topic_metadata or (not topics)): topics = ([] if (self.config['api_version'] < (0, 10)) else None) api_version = (0 if (self.config['api_version'] < (0, 10)) else 1) request = MetadataRequest[api_version](topics) log.debug('Sending metadata request %s to node %s', request, node_id) future = self.send(node_id, request, wakeup=wakeup) future.add_callback(self.cluster.update_metadata) future.add_errback(self.cluster.failed_update) self._metadata_refresh_in_progress = True def refresh_done(val_or_error): self._metadata_refresh_in_progress = False future.add_callback(refresh_done) future.add_errback(refresh_done) return self.config['request_timeout_ms'] if self._connecting: return self.config['reconnect_backoff_ms'] if self.maybe_connect(node_id, wakeup=wakeup): log.debug('Initializing connection to node %s for metadata request', node_id) return self.config['reconnect_backoff_ms'] return float('inf')
Send a metadata request if needed. Returns: int: milliseconds until next refresh
codesearchnet
def to_dense(self, sampling_rate): duration = int(math.ceil(sampling_rate * self.get_duration())) ts = np.zeros(duration, dtype=self.values.dtype) onsets = np.round(self.onset * sampling_rate).astype(int) durations = np.round(self.duration * sampling_rate).astype(int) run_i, start, last_ind = 0, 0, 0 for i, val in enumerate(self.values.values): if onsets[i] < last_ind: start += self.run_info[run_i].duration * sampling_rate run_i += 1 _onset = int(start + onsets[i]) _offset = int(_onset + durations[i]) if _onset >= duration: warnings.warn("The onset time of a variable seems to exceed the runs" "duration, hence runs are incremented by one internally.") ts[_onset:_offset] = val last_ind = onsets[i] run_info = list(self.run_info) return DenseRunVariable( name=self.name, values=ts, run_info=run_info, source=self.source, sampling_rate=sampling_rate)
Convert the current sparse column to a dense representation. Returns: A DenseRunVariable. Args: sampling_rate (int, str): Sampling rate (in Hz) to use when constructing the DenseRunVariable. Returns: A DenseRunVariable.
juraj-google-style
def xavier_init(n_inputs, n_outputs, uniform=True): if uniform: init_range = math.sqrt((6.0 / (n_inputs + n_outputs))) return tf.random_uniform_initializer((- init_range), init_range) else: stddev = math.sqrt((3.0 / (n_inputs + n_outputs))) return tf.truncated_normal_initializer(stddev=stddev)
Set the parameter initialization using the method described. This method is designed to keep the scale of the gradients roughly the same in all layers. Xavier Glorot and Yoshua Bengio (2010): Understanding the difficulty of training deep feedforward neural networks. International conference on artificial intelligence and statistics. Args: n_inputs: The number of input nodes into each output. n_outputs: The number of output nodes for each input. uniform: If true use a uniform distribution, otherwise use a normal. Returns: An initializer.
codesearchnet
def process(self, element): decoded_inputs = self._tokenizer.decode(element.example, skip_special_tokens=True) decoded_outputs = self._tokenizer.decode(element.inference, skip_special_tokens=True) print(f'{decoded_inputs} \t Output: {decoded_outputs}')
Process the PredictionResult to print the translated texts Args: element: The RunInference output to be processed.
github-repos
def compute_transpose_output_shape(input_shape, axes): input_shape = list(input_shape) if axes is None: return tuple(input_shape[::-1]) if len(axes) != len(input_shape): raise ValueError(f'axis must be a list of the same length as the input shape, expected {len(input_shape)}, but received {len(axes)}.') return tuple((input_shape[ax] for ax in axes))
Compute the output shape for the `transpose` operation. Args: input_shape: Input shape. axes: Permutation of the dimensions for the `transpose` operation. Returns: Tuple of ints: The output shape after the `transpose` operation.
github-repos
def from_json(cls, raw): if (raw is None): return None bcls = None _type = raw.get('type') try: bcls = cls._blob_type_map[BlobType(_type)] except (KeyError, ValueError) as e: logger.warning('Unknown blob type: %s', _type) if DEBUG: raise_from(exception.ParseException(('Parse error for %s' % _type), raw), e) return None blob = bcls() blob.load(raw) return blob
Helper to construct a blob from a dict. Args: raw (dict): Raw blob representation. Returns: NodeBlob: A NodeBlob object or None.
codesearchnet
def __setstate__(self, state): self._api = state['api'] self._path_with_token = state['path_token'] self._buffer = state['buffer'] self._buffered = state['buffered'] self._written = state['written'] self._offset = state['offset'] self.closed = state['closed'] self._path = state['path'] self.name = api_utils._unquote_filename(self._path)
Restore state as part of deserialization/unpickling. Args: state: the dictionary from a __getstate__ call
juraj-google-style
def get_enabled_references(self, datas, meta_references): references = OrderedDict() for section in meta_references: references[section] = self.get_reference(datas, section) return references
Get enabled manifest references declarations. Enabled references are defined through meta references declaration, every other references are ignored. Arguments: datas (dict): Data where to search for reference declarations. This is commonly the fully parsed manifest. meta_references (list): List of enabled reference names. Returns: collections.OrderedDict: Serialized enabled references datas.
codesearchnet
def dodge(field_name, value, range=None): return field(field_name, Dodge(value=value, range=range))
Create a ``DataSpec`` dict that applies a client-side ``Jitter`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with value (float) : the fixed offset to add to column data range (Range, optional) : a range to use for computing synthetic coordinates when necessary, e.g. a ``FactorRange`` when the column data is categorical (default: None) Returns: dict
juraj-google-style
def DEFINE_float(name, default, help, lower_bound=None, upper_bound=None, flag_values=_flagvalues.FLAGS, **args): parser = _argument_parser.FloatParser(lower_bound, upper_bound) serializer = _argument_parser.ArgumentSerializer() DEFINE(parser, name, default, help, flag_values, serializer, **args) _register_bounds_validator_if_needed(parser, name, flag_values=flag_values)
Registers a flag whose value must be a float. If lower_bound or upper_bound are set, then this flag must be within the given range. Args: name: str, the flag name. default: float|str|None, the default value of the flag. help: str, the help message. lower_bound: float, min value of the flag. upper_bound: float, max value of the flag. flag_values: FlagValues, the FlagValues instance with which the flag will be registered. This should almost never need to be overridden. **args: dict, the extra keyword args that are passed to DEFINE.
codesearchnet
def pack_image_features(self, image_features, image_sizes, image_newline=None, vision_aspect_ratio='anyres_max_9'): new_image_features = [] feature_lens = [] for image_idx, image_feature in enumerate(image_features): if image_feature.shape[0] > 1: base_image_feature = image_feature[0] image_feature = image_feature[1:] height = width = self.config.vision_config.image_size if height * width != base_image_feature.shape[0]: raise ValueError('The number of patches is not consistent with the image size.') num_patch_height, num_patch_width = get_anyres_image_grid_shape(image_sizes[image_idx], self.config.image_grid_pinpoints, self.config.vision_config.image_size) image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1) image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous() image_feature = image_feature.flatten(1, 2).flatten(2, 3) image_feature = unpad_image(image_feature, image_sizes[image_idx]) max_num_patches = int(vision_aspect_ratio.strip('anyres_max_')) channels, curr_height, curr_width = image_feature.shape ratio = math.sqrt(curr_height * curr_width / (max_num_patches * height ** 2)) if ratio > 1.1: image_feature = image_feature[None] image_feature = nn.functional.interpolate(image_feature, [int(curr_height if image_newline is not None: image_feature = torch.cat((image_feature, image_newline[:, None, None].expand(*image_feature.shape[:-1], 1).to(image_feature.device, image_feature.dtype)), dim=-1) image_feature = image_feature.flatten(1, 2).transpose(0, 1) image_feature = torch.cat((base_image_feature, image_feature), dim=0) else: image_feature = image_feature[0] if image_newline is not None: image_feature = torch.cat((image_feature, image_newline[None].to(image_feature)), dim=0) image_feature = image_feature.flatten(0, 1) new_image_features.append(image_feature) feature_lens.append(image_feature.size(0)) feature_lens = torch.tensor(feature_lens, dtype=torch.long, device=image_features[0].device) return (new_image_features, feature_lens)
Reshape, unpad and then pack each image_feature into a single image_features tensor containing all visual vectors. Args: image_features (`List[torch.Tensor]` of length num_images, each of shape `(num_patches, image_length, embed_dim)`) List of image feature tensor, each contains all the visual feature of all patches. image_sizes (`torch.Tensor` of shape `(num_images, 2)`) Actual image size of each images (H, W). image_newline (`torch.Tensor` of shape `(embed_dim)`) New line embedding vector. vision_aspect_ratio (`str`, *optional*, "anyres_max_9"): Aspect ratio used when processong image features. The default value is "anyres_max_9". Returns: image_features (`torch.Tensor` of shape `(all_feat_len, embed_dim)`) feature_lens (`List[int]`) token length of each image in image_features
github-repos
def run_parallel(self, para_func): if self.timer: start_timer = time.time() with mp.Pool(self.num_processors) as pool: print('start pool with {} processors: {} total processes.\n'.format( self.num_processors, len(self.args))) results = [pool.apply_async(para_func, arg) for arg in self.args] out = [r.get() for r in results] out = {key: np.concatenate([out_i[key] for out_i in out]) for key in out[0].keys()} if self.timer: print("SNR calculation time:", time.time()-start_timer) return out
Run parallel calulation This will run the parallel calculation on self.num_processors. Args: para_func (obj): Function object to be used in parallel. Returns: (dict): Dictionary with parallel results.
juraj-google-style
def __init__(self, coupling_map, layout=None): super().__init__() self.coupling_map = coupling_map self.layout = layout self.ancilla_name = 'ancilla'
Extends a Layout with the idle nodes from coupling_map. Args: coupling_map (Coupling): directed graph representing a coupling map. layout (Layout): an existing layout. ancilla allocation occurs if the layout is smaller than the coupling_map.
juraj-google-style
def exportUsufy(data, ext, fileH): if ext == "csv": usufyToCsvExport(data, fileH+"."+ext) elif ext == "gml": usufyToGmlExport(data, fileH+"."+ext) elif ext == "json": usufyToJsonExport(data, fileH+"."+ext) elif ext == "ods": usufyToOdsExport(data, fileH+"."+ext) elif ext == "png": usufyToPngExport(data, fileH+"."+ext) elif ext == "txt": usufyToTextExport(data, fileH+"."+ext) elif ext == "xls": usufyToXlsExport(data, fileH+"."+ext) elif ext == "xlsx": usufyToXlsxExport(data, fileH+"."+ext)
Method that exports the different structures onto different formats. Args: ----- data: Data to export. ext: One of the following: csv, excel, json, ods. fileH: Fileheader for the output files. Returns: -------- Performs the export as requested by parameter.
juraj-google-style
def get_enumerations_from_bit_mask(enumeration, mask): return [x for x in enumeration if (x.value & mask) == x.value]
A utility function that creates a list of enumeration values from a bit mask for a specific mask enumeration class. Args: enumeration (class): The enumeration class from which to draw enumeration values. mask (int): The bit mask from which to identify enumeration values. Returns: list: A list of enumeration values corresponding to the bit mask.
juraj-google-style
def remove_observer(self, callback): if callback not in self._observers: raise ValueError('{} is not an observer of {}' .format(callback, self)) self._observers.remove(callback)
Remove an observer from this event. Args: callback: A function or coroutine callback to remove from this event. Raises: ValueError: If the callback is not an observer of this event.
juraj-google-style
def localize_file(path_or_buffer): path_or_buffer = _stringify_path(path_or_buffer) if _is_url(path_or_buffer): req = urlopen(path_or_buffer) filename = os.path.basename(req.geturl()) if (os.path.splitext(filename)[(- 1)] is not '.pdf'): pid = os.getpid() filename = '{0}.pdf'.format(pid) with open(filename, 'wb') as f: shutil.copyfileobj(req, f) return (filename, True) elif is_file_like(path_or_buffer): pid = os.getpid() filename = '{0}.pdf'.format(pid) with open(filename, 'wb') as f: shutil.copyfileobj(path_or_buffer, f) return (filename, True) else: return (os.path.expanduser(path_or_buffer), False)
Ensure localize target file. If the target file is remote, this function fetches into local storage. Args: path (str): File path or file like object or URL of target file. Returns: filename (str): file name in local storage temporary_file_flag (bool): temporary file flag
codesearchnet
def _GetAction(self, action, text): if 'airportdProcessDLILEvent' in action: interface = text.split()[0] return 'Interface {0:s} turn up.'.format(interface) if 'doAutoJoin' in action: match = self._CONNECTED_RE.match(text) if match: ssid = match.group(1)[1:-1] else: ssid = 'Unknown' return 'Wifi connected to SSID {0:s}'.format(ssid) if 'processSystemPSKAssoc' in action: wifi_parameters = self._WIFI_PARAMETERS_RE.search(text) if wifi_parameters: ssid = wifi_parameters.group(1) bssid = wifi_parameters.group(2) security = wifi_parameters.group(3) if not ssid: ssid = 'Unknown' if not bssid: bssid = 'Unknown' if not security: security = 'Unknown' return ( 'New wifi configured. BSSID: {0:s}, SSID: {1:s}, ' 'Security: {2:s}.').format(bssid, ssid, security) return text
Parse the well known actions for easy reading. Args: action (str): the function or action called by the agent. text (str): mac Wifi log text. Returns: str: a formatted string representing the known (or common) action. If the action is not known the original log text is returned.
juraj-google-style