code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def df_numeric_column(min_value=0, max_value=1, num_rows=100): return pd.Series(np.random.uniform(min_value, max_value, num_rows))
Generate a numeric column with random data Args: min_value (float): Minimum value (default = 0) max_value (float): Maximum value (default = 1) num_rows (int): The number of rows to generate (default = 100)
juraj-google-style
def SetAndLoadTagFile(self, tagging_file_path): tag_file = tagging_file.TaggingFile(tagging_file_path) self._tagging_rules = tag_file.GetEventTaggingRules()
Sets the tag file to be used by the plugin. Args: tagging_file_path (str): path of the tagging file.
codesearchnet
def validate_bagit_file(bagit_path): _assert_zip_file(bagit_path) bagit_zip = zipfile.ZipFile(bagit_path) manifest_info_list = _get_manifest_info_list(bagit_zip) _validate_checksums(bagit_zip, manifest_info_list) return True
Check if a BagIt file is valid. Raises: ServiceFailure If the BagIt zip archive file fails any of the following checks: - Is a valid zip file. - The tag and manifest files are correctly formatted. - Contains all the files listed in the manifests. - The file checksums match the manifests.
codesearchnet
def token_validate_with_login(self, **kwargs): path = self._get_path('token_validate_with_login') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Authenticate a user with a TMDb username and password. The user must have a verified email address and be registered on TMDb. Args: request_token: The token you generated for the user to approve. username: The user's username on TMDb. password: The user's password on TMDb. Returns: A dict respresentation of the JSON returned from the API.
juraj-google-style
def Serialize(self, writer): writer.WriteVarString(self.name) writer.WriteVarString(self.symbol) writer.WriteUInt8(self.decimals)
Serialize this token data to bytes Args: writer (neocore.IO.BinaryWriter): binary writer to write serialization data to
juraj-google-style
def slicewise(self, fn, *inputs): if fn == tf.add: assert len(inputs) == 2 if isinstance(inputs[0], mtf.LazyAllreduceSum): return inputs[0] + inputs[1] inputs = mtf.convert_args_to_laid_out_tensors(inputs) inputs = [x.tensor_list if isinstance(x, self.LaidOutTensor) else [x] * len(self.devices) for x in inputs] ret = mtf.parallel(self.devices, fn, *inputs) if isinstance(ret[0], tuple): ret = mtf.transpose_list_of_lists(ret) return tuple([self.LaidOutTensor(t) for t in ret]) else: return self.LaidOutTensor(ret)
Execute a function in parallel on all slices. Args: fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors. *inputs: a list of inputs. Each input is either a LaidOutTensor or is convertible to a tf.Tensor. Returns: a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple.
juraj-google-style
def generate_custom(self, cpu, vcpu_num, fill_topology): try: cpu = utils.dict_to_xml({'cpu': cpu}) except: raise LagoInitException("conversion of 'cpu' to XML failed") if ((not cpu.xpath('topology')) and fill_topology): cpu.append(self.generate_topology(vcpu_num)) return cpu
Generate custom CPU model. This method attempts to convert the dict to XML, as defined by ``xmltodict.unparse`` method. Args: cpu(dict): CPU spec vcpu_num(int): number of virtual cpus fill_topology(bool): if topology is not defined in ``cpu`` and ``vcpu`` was not set, will add CPU topology to the generated CPU. Returns: lxml.etree.Element: CPU XML node Raises: :exc:`~LagoInitException`: when failed to convert dict to XML
codesearchnet
def import_descriptor_loader(definition_name, importer=__import__): if definition_name.startswith('.'): definition_name = definition_name[1:] if (not definition_name.startswith('.')): leaf = definition_name.split('.')[(- 1)] if definition_name: try: module = importer(definition_name, '', '', [leaf]) except ImportError: pass else: return describe(module) try: return describe(messages.find_definition(definition_name, importer=__import__)) except messages.DefinitionNotFoundError as err: split_name = definition_name.rsplit('.', 1) if (len(split_name) > 1): (parent, child) = split_name try: parent_definition = import_descriptor_loader(parent, importer=importer) except messages.DefinitionNotFoundError: pass else: if isinstance(parent_definition, EnumDescriptor): search_list = (parent_definition.values or []) elif isinstance(parent_definition, MessageDescriptor): search_list = (parent_definition.fields or []) else: search_list = [] for definition in search_list: if (definition.name == child): return definition raise err
Find objects by importing modules as needed. A definition loader is a function that resolves a definition name to a descriptor. The import finder resolves definitions to their names by importing modules when necessary. Args: definition_name: Name of definition to find. importer: Import function used for importing new modules. Returns: Appropriate descriptor for any describable type located by name. Raises: DefinitionNotFoundError when a name does not refer to either a definition or a module.
codesearchnet
def get_model_hash(model): hash_value = 0 for subgraph in model.subgraphs: if subgraph.operators is not None: hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.operators)) for operator in subgraph.operators: if operator.inputs is not None: hash_value = update_hash_with_array(hash_value, operator.inputs) if operator.outputs is not None: hash_value = update_hash_with_array(hash_value, operator.outputs) if subgraph.tensors is not None: hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.tensors)) for tensor in subgraph.tensors: if tensor.buffer is not None: buffer = model.buffers[tensor.buffer] if buffer.data is not None: hash_value = update_hash_with_primitive_value(hash_value, len(buffer.data)) if tensor.shape is not None: hash_value = update_hash_with_array(hash_value, tensor.shape) if subgraph.inputs is not None: hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.inputs)) if subgraph.outputs is not None: hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.outputs)) return hash_value
Calculate a 64-bit integer hash for a TensorFlow Lite model based on its structure. Args: model: A TensorFlow Lite model object. Returns: int: A 64-bit integer hash value representing the model structure.
github-repos
def remove_node_by_value(self, value): self.node_list = [node for node in self.node_list if node.value != value] for node in self.node_list: node.link_list = [link for link in node.link_list if link.target.value != value]
Delete all nodes in ``self.node_list`` with the value ``value``. Args: value (Any): The value to find and delete owners of. Returns: None Example: >>> from blur.markov.node import Node >>> node_1 = Node('One') >>> graph = Graph([node_1]) >>> graph.remove_node_by_value('One') >>> len(graph.node_list) 0
juraj-google-style
def setup(self, target_directory=None): self._target_directory = target_directory if (not target_directory): self._target_directory = tempfile.mkdtemp() elif (not os.path.exists(target_directory)): try: os.makedirs(target_directory) except OSError as exception: message = 'An unknown error occurred: {0!s}'.format(exception) self.state.add_error(message, critical=True)
Sets up the _target_directory attribute. Args: target_directory: Directory in which collected files will be dumped.
codesearchnet
def normalize(p): l = math.sqrt(p[0]**2 + p[1]**2) return [0.0, 0.0] if l == 0 else [p[0]/l, p[1]/l]
Normalizes a point/vector Args: p ([float, float]): x and y coordinates Returns: float
juraj-google-style
def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=''): if (flag_id or flag_filt): qry_string += ', ' param_str += ', ' if (not flag_filt): qry_string += filt_st return (qry_string, param_str)
Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list.
codesearchnet
def _parse_resources(resource_values: dict, resource_name: str) -> dict: resources = {} for r_values in resource_values[resource_name]: if 'limits' in r_values: for r_key, r_value in \ resource_values[resource_name][r_values].items(): if 'cpu' in r_key: cpu_value = float(r_value) * 10 ** 9 cpu_key = r_key[:3] + '_limit' resources[cpu_key] = int(cpu_value) if 'mem' in r_key: mem_value = re.sub('M', '', r_value) mem_key = r_key[:3] + '_limit' resources[mem_key] = int(mem_value) * 1048576 resources_spec = docker.types.Resources(**resources) return resources_spec
Parse resources key. Args: resource_values (dict): resource configurations values resource_name (string): Resource name Returns: dict, resources specification
juraj-google-style
def _add_parameters(self, parameter_map, parameter_list): for parameter in parameter_list: if parameter.get('$ref'): parameter = self.specification['parameters'].get(parameter.get('$ref').split('/')[-1]) parameter_map[parameter['name']] = parameter
Populates the given parameter map with the list of parameters provided, resolving any reference objects encountered. Args: parameter_map: mapping from parameter names to parameter objects parameter_list: list of either parameter objects or reference objects
juraj-google-style
def parse_arguments(argv): parser = argparse.ArgumentParser(description='benchmark-runinference') parser.add_argument('-m', '--mode', help='Mode to run pipeline in.', choices=['local', 'cloud'], default='local') parser.add_argument('-p', '--project', help='GCP project to run pipeline on.', default=cfg.PROJECT_ID) parser.add_argument('-d', '--device', help='Device to run the dataflow job on', choices=['CPU', 'GPU'], default='CPU') args, _ = parser.parse_known_args(args=argv) return args
Parses the arguments passed to the command line and returns them as an object Args: argv: The arguments passed to the command line. Returns: The arguments that are being passed in.
github-repos
def transfer(self, data): if not isinstance(data, (bytes, bytearray, list)): raise TypeError("Invalid data type, should be bytes, bytearray, or list.") try: buf = array.array('B', data) except OverflowError: raise ValueError("Invalid data bytes.") buf_addr, buf_len = buf.buffer_info() spi_xfer = _CSpiIocTransfer() spi_xfer.tx_buf = buf_addr spi_xfer.rx_buf = buf_addr spi_xfer.len = buf_len try: fcntl.ioctl(self._fd, SPI._SPI_IOC_MESSAGE_1, spi_xfer) except OSError as e: raise SPIError(e.errno, "SPI transfer: " + e.strerror) if isinstance(data, bytes): return bytes(bytearray(buf)) elif isinstance(data, bytearray): return bytearray(buf) elif isinstance(data, list): return buf.tolist()
Shift out `data` and return shifted in data. Args: data (bytes, bytearray, list): a byte array or list of 8-bit integers to shift out. Returns: bytes, bytearray, list: data shifted in. Raises: SPIError: if an I/O or OS error occurs. TypeError: if `data` type is invalid. ValueError: if data is not valid bytes.
juraj-google-style
def remove(self, name): try: del self.data[name] except (ValueError, KeyError): import warnings warnings.warn("Unable to find column '%s' in data source" % name)
Remove a column of data. Args: name (str) : name of the column to remove Returns: None .. note:: If the column name does not exist, a warning is issued.
juraj-google-style
def save_config(self, out_file_name): def get_hidden_parameter(item): numer_of_sub_elements = item.childCount() if numer_of_sub_elements == 0: dictator = {item.name : item.visible} else: dictator = {item.name:{}} for child_id in range(numer_of_sub_elements): dictator[item.name].update(get_hidden_parameter(item.child(child_id))) return dictator out_file_name = str(out_file_name) if not os.path.exists(os.path.dirname(out_file_name)): os.makedirs(os.path.dirname(out_file_name)) dictator = {} for index in range(self.tree_scripts.topLevelItemCount()): script_item = self.tree_scripts.topLevelItem(index) dictator.update(get_hidden_parameter(script_item)) dictator = {"gui_settings": self.gui_settings, "scripts_hidden_parameters":dictator} for index in range(self.tree_scripts.topLevelItemCount()): script_item = self.tree_scripts.topLevelItem(index) self.update_script_from_item(script_item) dictator.update({'instruments': {}, 'scripts': {}, 'probes': {}}) for instrument in self.instruments.values(): dictator['instruments'].update(instrument.to_dict()) for script in self.scripts.values(): dictator['scripts'].update(script.to_dict()) for instrument, probe_dict in self.probes.items(): dictator['probes'].update({instrument: ','.join(list(probe_dict.keys()))}) with open(out_file_name, 'w') as outfile: tmp = json.dump(dictator, outfile, indent=4)
saves gui configuration to out_file_name Args: out_file_name: name of file
juraj-google-style
def _create_extra_packages(extra_packages, temp_dir) -> List[beam_runner_api_pb2.ArtifactInformation]: resources: List[beam_runner_api_pb2.ArtifactInformation] = [] staging_temp_dir = tempfile.mkdtemp(dir=temp_dir) local_packages: List[str] = [] for package in extra_packages: if not (os.path.basename(package).endswith('.tar') or os.path.basename(package).endswith('.tar.gz') or os.path.basename(package).endswith('.whl') or os.path.basename(package).endswith('.zip')): raise RuntimeError('The --extra_package option expects a full path ending with ".tar", ".tar.gz", ".whl" or ".zip" instead of %s' % package) if os.path.basename(package).endswith('.whl'): _LOGGER.warning('The .whl package "%s" provided in --extra_package must be binary-compatible with the worker runtime environment.' % package) if not os.path.isfile(package): if Stager._is_remote_path(package): _LOGGER.info('Downloading extra package: %s locally before staging', package) _, last_component = FileSystems.split(package) local_file_path = FileSystems.join(staging_temp_dir, last_component) Stager._download_file(package, local_file_path) else: raise RuntimeError('The file %s cannot be found. It was specified in the --extra_packages command line option.' % package) else: local_packages.append(package) local_packages.extend([FileSystems.join(staging_temp_dir, f) for f in os.listdir(staging_temp_dir)]) for package in local_packages: basename = os.path.basename(package) resources.append(Stager._create_file_stage_to_artifact(package, basename)) with open(os.path.join(temp_dir, EXTRA_PACKAGES_FILE), 'wt') as f: for package in local_packages: f.write('%s\n' % os.path.basename(package)) resources.append(Stager._create_file_stage_to_artifact(os.path.join(temp_dir, EXTRA_PACKAGES_FILE), EXTRA_PACKAGES_FILE)) return resources
Creates a list of local extra packages. Args: extra_packages: Ordered list of local paths to extra packages to be staged. Only packages on localfile system and GCS are supported. temp_dir: Temporary folder where the resource building can happen. Caller is responsible for cleaning up this folder after this function returns. Returns: A list of ArtifactInformation of local file paths and file names (no paths) for the resources staged. All the files are assumed to be staged in staging_location. Raises: RuntimeError: If files specified are not found or do not have expected name patterns.
github-repos
def convert_data_to_dtype(data, data_type, mot_float_type='float'): scalar_dtype = ctype_to_dtype(data_type, mot_float_type) if isinstance(data, numbers.Number): data = scalar_dtype(data) if is_vector_ctype(data_type): shape = data.shape dtype = ctype_to_dtype(data_type, mot_float_type) ve = np.zeros(shape[:-1], dtype=dtype) if len(shape) == 1: for vector_ind in range(shape[0]): ve[0][vector_ind] = data[vector_ind] elif len(shape) == 2: for i in range(data.shape[0]): for vector_ind in range(data.shape[1]): ve[i][vector_ind] = data[i, vector_ind] elif len(shape) == 3: for i in range(data.shape[0]): for j in range(data.shape[1]): for vector_ind in range(data.shape[2]): ve[i, j][vector_ind] = data[i, j, vector_ind] return np.require(ve, requirements=['C', 'A', 'O']) return np.require(data, scalar_dtype, ['C', 'A', 'O'])
Convert the given input data to the correct numpy type. Args: data (ndarray): The value to convert to the correct numpy type data_type (str): the data type we need to convert the data to mot_float_type (str): the data type of the current ``mot_float_type`` Returns: ndarray: the input data but then converted to the desired numpy data type
juraj-google-style
def touch(path, content='', encoding='utf-8', overwrite=False): path = os.path.abspath(path) if ((not overwrite) and os.path.exists(path)): logger.warning('touch: "%s" already exists', path) return False try: logger.info('touch: %s', path) with io.open(path, 'wb') as f: if (not isinstance(content, six.binary_type)): content = content.encode(encoding) f.write(content) return True except Exception as e: logger.error('touch: %s failed. Error: %s', path, e) return False
Create a file at the given path if it does not already exists. Args: path (str): Path to the file. content (str): Optional content that will be written in the file. encoding (str): Encoding in which to write the content. Default: ``utf-8`` overwrite (bool): Overwrite the file if exists. Returns: bool: True if the operation is successful, False otherwise.
codesearchnet
def decode_proto(proto): return _map_structure(proto, _get_decoders())
Decodes proto representing a nested structure. Args: proto: Proto to decode. Returns: Decoded structure. Raises: NotEncodableError: For values for which there are no encoders.
github-repos
def scan_devices(self, devnames, timeout=DEF_TIMEOUT, interval=DEF_SCAN_INTERVAL, window=DEF_SCAN_WINDOW): logger.debug('configuring scan parameters') self.api.ble_cmd_gap_set_scan_parameters(interval, window, 1) self._set_state(self._STATE_CONFIGURE_SCAN) self.api.ble_cmd_gap_discover(1) self._wait_for_state(self._STATE_CONFIGURE_SCAN) logger.debug('starting scan for devices {}'.format(devnames)) self.scan_targets = devnames self._set_state(self._STATE_SCANNING) self._wait_for_state(self._STATE_SCANNING, timeout) self._set_state(self._STATE_GAP_END) self.api.ble_cmd_gap_end_procedure() self._wait_for_state(self._STATE_GAP_END) logger.debug('scanning completed') return self.scan_responses
Run a BLE scan for a defined interval and return results. Alternative to :meth:`begin_scan/:meth:`end_scan`. Args: timeout (float): time in seconds to run the scanning process for interval (int): BLE scan interval, in units of 625us window (int): BLE scan window, in units of 625us Returns: a :class:`ScanResults` object containing the scan results.
juraj-google-style
def _build_file_writer(cls, session: AppSession): args = session.args if args.delete_after: return session.factory.new('FileWriter') elif args.output_document: session.factory.class_map['FileWriter'] = SingleDocumentWriter return session.factory.new('FileWriter', args.output_document, headers_included=args.save_headers) use_dir = ((len(args.urls) != 1) or args.page_requisites or args.recursive) if (args.use_directories == 'force'): use_dir = True elif (args.use_directories == 'no'): use_dir = False os_type = ('windows' if ('windows' in args.restrict_file_names) else 'unix') ascii_only = ('ascii' in args.restrict_file_names) no_control = ('nocontrol' not in args.restrict_file_names) if ('lower' in args.restrict_file_names): case = 'lower' elif ('upper' in args.restrict_file_names): case = 'upper' else: case = None path_namer = session.factory.new('PathNamer', args.directory_prefix, index=args.default_page, use_dir=use_dir, cut=args.cut_dirs, protocol=args.protocol_directories, hostname=args.host_directories, os_type=os_type, ascii_only=ascii_only, no_control=no_control, case=case, max_filename_length=args.max_filename_length) if (args.recursive or args.page_requisites or args.continue_download): if (args.clobber_method == 'disable'): file_class = OverwriteFileWriter else: file_class = IgnoreFileWriter elif args.timestamping: file_class = TimestampingFileWriter else: file_class = AntiClobberFileWriter session.factory.class_map['FileWriter'] = file_class return session.factory.new('FileWriter', path_namer, file_continuing=args.continue_download, headers_included=args.save_headers, local_timestamping=args.use_server_timestamps, adjust_extension=args.adjust_extension, content_disposition=args.content_disposition, trust_server_names=args.trust_server_names)
Create the File Writer. Returns: FileWriter: An instance of :class:`.writer.BaseFileWriter`.
codesearchnet
def get_namespaces(start=None, end=None): q = Namespace.query() if (start is not None): q = q.filter((Namespace.key >= Namespace.key_for_namespace(start))) if (end is not None): q = q.filter((Namespace.key < Namespace.key_for_namespace(end))) return [x.namespace_name for x in q]
Return all namespaces in the specified range. Args: start: only return namespaces >= start if start is not None. end: only return namespaces < end if end is not None. Returns: A list of namespace names between the (optional) start and end values.
codesearchnet
def __init__(self, output_names, force_strict=False): self._after_all_pipelines = set() self._output_dict = { 'default': Slot(name='default'), } self._strict = len(output_names) > 0 or force_strict if self._strict: for name in output_names: if name in self._output_dict: raise UnexpectedPipelineError('Output name reserved: "%s"' % name) self._output_dict[name] = Slot(name=name, strict=True)
Initializer. Args: output_names: The list of require output names that will be strictly enforced by this class. force_strict: If True, force this future to be in strict mode.
juraj-google-style
def depth_february_average_ground_temperature(self, value=None): if (value is not None): try: value = float(value) except ValueError: raise ValueError('value {} need to be of type float for field `depth_february_average_ground_temperature`'.format(value)) self._depth_february_average_ground_temperature = value
Corresponds to IDD Field `depth_february_average_ground_temperature` Args: value (float): value for IDD Field `depth_february_average_ground_temperature` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def get_num_bytes(self, batch: Sequence[np.ndarray]) -> int: return sum((np_array.itemsize for np_array in batch))
Returns: The number of bytes of data for a batch of Tensors.
github-repos
def num_records_produced(self, name=None): if self._reader_ref.dtype == dtypes.resource: return gen_io_ops.reader_num_records_produced_v2(self._reader_ref, name=name) else: return gen_io_ops.reader_num_records_produced(self._reader_ref, name=name)
Returns the number of records this reader has produced. This is the same as the number of Read executions that have succeeded. Args: name: A name for the operation (optional). Returns: An int64 Tensor.
github-repos
def reset_sequence(self, topic): if topic in self.queues: self.queues[topic].reset()
Reset the expected sequence number for a topic If the topic is unknown, this does nothing. This behaviour is useful when you have wildcard topics that only create queues once they receive the first message matching the topic. Args: topic (string): The topic to reset the packet queue on
juraj-google-style
def write_file(self, filename, file_format="xyz"): mol = pb.Molecule(self._obmol) return mol.write(file_format, filename, overwrite=True)
Uses OpenBabel to output all supported formats. Args: filename: Filename of file to output file_format: String specifying any OpenBabel supported formats.
juraj-google-style
def intersect(self, other): operation = bool.__and__ self.cross_product(other, operation) return self
Constructs an unminimized DFA recognizing the intersection of the languages of two given DFAs. Args: other (DFA): The other DFA that will be used for the intersect operation Returns: Returns: DFA: The resulting DFA
juraj-google-style
def raster_to_gtiff(tif, geotif, change_nodata=False, change_gdal_type=False): rst_file = RasterUtilClass.read_raster(tif) nodata = rst_file.noDataValue if change_nodata: if not MathClass.floatequal(rst_file.noDataValue, DEFAULT_NODATA): nodata = DEFAULT_NODATA rst_file.data[rst_file.data == rst_file.noDataValue] = DEFAULT_NODATA gdal_type = rst_file.dataType if change_gdal_type: gdal_type = GDT_Float32 RasterUtilClass.write_gtiff_file(geotif, rst_file.nRows, rst_file.nCols, rst_file.data, rst_file.geotrans, rst_file.srs, nodata, gdal_type)
Converting Raster format to GeoTIFF. Args: tif: source raster file path. geotif: output raster file path. change_nodata: change NoDataValue to -9999 or not. gdal_type (:obj:`pygeoc.raster.GDALDataType`): GDT_Float32 as default. change_gdal_type: If True, output the Float32 data type.
juraj-google-style
def from_tensor(tensor, validate=True): tensor = tf.convert_to_tensor(tensor, dtype=tf.int32) return from_year_month_day(year=tensor[..., 0], month=tensor[..., 1], day=tensor[..., 2], validate=validate)
Creates DateTensor from a single tensor containing years, months and days. This function is complementary to DateTensor.to_tensor: given an int32 Tensor of shape (..., 3), creates a DateTensor. The three elements of the last dimension are years, months and days, in this order. Args: tensor: Tensor of type int32 and shape (..., 3). validate: Whether to validate the dates. Returns: DateTensor object. #### Example ```python tensor = tf.constant([[2015, 4, 15], [2017, 12, 30]], dtype=tf.int32) date_tensor = tff.datetime.dates_from_tensor(tensor) ```
github-repos
def _has_tf_decorator_attr(obj): return hasattr(obj, '_tf_decorator') and isinstance(getattr(obj, '_tf_decorator'), TFDecorator)
Checks if object has _tf_decorator attribute. This check would work for mocked object as well since it would check if returned attribute has the right type. Args: obj: Python object.
github-repos
def get_attribute_label( self, main_type, sub_type, unique_id, attribute_id, label, owner=None, params=None ): return self.attribute_label( main_type, sub_type, unique_id, attribute_id, label, owner=owner, params=params )
Args: owner: main_type: sub_type: unique_id: attribute_id: label: params: Return:
juraj-google-style
def _collect_leaf_level_keys(cross): leaf_level_keys = [] for k in cross.keys: if isinstance(k, _CrossedColumn): leaf_level_keys.extend(_collect_leaf_level_keys(k)) else: leaf_level_keys.append(k) return leaf_level_keys
Collects base keys by expanding all nested crosses. Args: cross: A `_CrossedColumn`. Returns: A list of strings or `_CategoricalColumn` instances.
github-repos
def _wait_time(self, shard_state, secs, now=datetime.datetime.now): assert shard_state.slice_start_time is not None delta = now() - shard_state.slice_start_time duration = datetime.timedelta(seconds=secs) if delta < duration: return util.total_seconds(duration - delta) else: return 0
Time to wait until slice_start_time is secs ago from now. Args: shard_state: shard state. secs: duration in seconds. now: a func that gets now. Returns: 0 if no wait. A positive int in seconds otherwise. Always around up.
juraj-google-style
def wavelength_match(a, b): if type(a) == (type(b) or isinstance(a, numbers.Number) and isinstance(b, numbers.Number)): return a == b elif a is None or b is None: return False elif isinstance(a, (list, tuple)) and len(a) == 3: return a[0] <= b <= a[2] elif isinstance(b, (list, tuple)) and len(b) == 3: return b[0] <= a <= b[2] else: raise ValueError("Can only compare wavelengths of length 1 or 3")
Return if two wavelengths are equal. Args: a (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl b (tuple or scalar): (min wl, nominal wl, max wl) or scalar wl
juraj-google-style
async def play(self, author, text_channel, query, index=None, stop_current=False, shuffle=False): if (self.state == 'off'): self.state = 'starting' self.prev_queue = [] (await self.set_topic('')) (await self.msetup(text_channel)) (await self.enqueue(query, index, stop_current, shuffle)) (await self.vsetup(author)) self.state = ('ready' if (self.mready and self.vready) else 'off') else: (await self.enqueue(query, index, stop_current, shuffle)) if (self.state == 'ready'): if (self.streamer is None): (await self.vplay())
The play command Args: author (discord.Member): The member that called the command text_channel (discord.Channel): The channel where the command was called query (str): The argument that was passed with the command index (str): Whether to play next or at the end of the queue stop_current (bool): Whether to stop the currently playing song shuffle (bool): Whether to shuffle the queue after starting
codesearchnet
def exists(self, path): return self._gcsIO().exists(path)
Check if the provided path exists on the FileSystem. Args: path: string path that needs to be checked. Returns: boolean flag indicating if path exists
github-repos
def get_label(self, main_type, sub_type, unique_id, label, owner=None, params=None): params = params or {} return self.label( main_type, sub_type, unique_id, label, action='GET', owner=owner, params=params )
Args: owner: main_type: sub_type: unique_id: label: params: Return:
juraj-google-style
def routerify(obj): router = Router() for info in get_routing_attributes(obj): router.add_route(*info) obj.__growler_router = router return router
Scan through attributes of object parameter looking for any which match a route signature. A router will be created and added to the object with parameter. Args: obj (object): The object (with attributes) from which to setup a router Returns: Router: The router created from attributes in the object.
codesearchnet
def insert(self, optional_root_locations_path): encountered_simple_optional = False parent_location = self._root_location for optional_root_location in optional_root_locations_path: if encountered_simple_optional: raise AssertionError(u'Encountered simple optional root location {} in path, butfurther locations are present. This should not happen: {}'.format(optional_root_location, optional_root_locations_path)) if (optional_root_location not in self._location_to_children): encountered_simple_optional = True else: self._location_to_children[parent_location].add(optional_root_location) parent_location = optional_root_location
Insert a path of optional Locations into the tree. Each OptionalTraversalTree object contains child Location objects as keys mapping to other OptionalTraversalTree objects. Args: optional_root_locations_path: list of optional root Locations all except the last of which must be present in complex_optional_roots
codesearchnet
def git_checkout(branch_name, create=False): log.info("Checking out <33>{}".format(branch_name)) shell.run('git checkout {} {}'.format('-b' if create else '', branch_name))
Checkout or create a given branch Args: branch_name (str): The name of the branch to checkout or create. create (bool): If set to **True** it will create the branch instead of checking it out.
juraj-google-style
def get_submission_filenames(self, tournament=None, round_num=None): query = '\n query {\n user {\n submissions {\n filename\n selected\n round {\n tournament\n number\n }\n }\n }\n }\n ' data = self.raw_query(query, authorization=True)['data']['user'] filenames = [{'round_num': item['round']['number'], 'tournament': item['round']['tournament'], 'filename': item['filename']} for item in data['submissions'] if item['selected']] if (round_num is not None): filenames = [f for f in filenames if (f['round_num'] == round_num)] if (tournament is not None): filenames = [f for f in filenames if (f['tournament'] == tournament)] filenames.sort(key=(lambda f: (f['round_num'], f['tournament']))) return filenames
Get filenames of the submission of the user. Args: tournament (int): optionally filter by ID of the tournament round_num (int): optionally filter round number Returns: list: list of user filenames (`dict`) Each filenames in the list as the following structure: * filename (`str`) * round_num (`int`) * tournament (`int`) Example: >>> NumerAPI().get_submission_filenames(3, 111) [{'filename': 'model57-dMpHpYMPIUAF.csv', 'round_num': 111, 'tournament': 3}]
codesearchnet
def restore(self, state): reading = state.get(u'reading') if reading is not None: reading = IOTileReading.FromDict(reading) selector = DataStreamSelector.FromString(state.get(u'selector')) if self.selector != selector: raise ArgumentError("Attempted to restore a VirtualStreamWalker with a different selector", selector=self.selector, serialized_data=state) self.reading = reading
Restore the contents of this virtual stream walker. Args: state (dict): The previously serialized state. Raises: ArgumentError: If the serialized state does not have a matching selector.
juraj-google-style
def ProcessClientResourcesStats(self, client_id, status): if hasattr(status, 'child_session_id'): flow_path = status.child_session_id else: flow_path = ('aff4:/%s/flows/%s' % (status.client_id, status.flow_id)) resources = rdf_client_stats.ClientResources() resources.client_id = client_id resources.session_id = flow_path resources.cpu_usage.user_cpu_time = status.cpu_time_used.user_cpu_time resources.cpu_usage.system_cpu_time = status.cpu_time_used.system_cpu_time resources.network_bytes_sent = status.network_bytes_sent self.context.usage_stats.RegisterResources(resources)
Process status message from a client and update the stats. Args: client_id: Client id. status: The status object returned from the client.
codesearchnet
def coordinate_filter(self, query, mongo_query): LOG.debug('Adding genomic coordinates to the query') chromosome = query['chrom'] mongo_query['chromosome'] = chromosome if (query.get('start') and query.get('end')): mongo_query['position'] = {'$lte': int(query['end'])} mongo_query['end'] = {'$gte': int(query['start'])} return mongo_query
Adds genomic coordinated-related filters to the query object Args: query(dict): a dictionary of query filters specified by the users mongo_query(dict): the query that is going to be submitted to the database Returns: mongo_query(dict): returned object contains coordinate filters
juraj-google-style
def get_box_files(self, box_key): uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.files_suffix ]) return self._req('get', uri)
Gets to file infos in a single box. Args: box_key key for the file return (status code, list of file info dicts)
juraj-google-style
def set_dimension(tensor, axis, value): shape = tensor.shape.as_list() if shape[axis] not in (value, None): message = 'Cannot set dimension {} of tensor {} to {}; is already {}.' raise ValueError(message.format(axis, tensor.name, value, shape[axis])) shape[axis] = value tensor.set_shape(shape)
Set the length of a tensor along the specified dimension. Args: tensor: Tensor to define shape of. axis: Dimension to set the static shape for. value: Integer holding the length. Raises: ValueError: When the tensor already has a different length specified.
juraj-google-style
def send_file(self, file_name, remote_destination=None, **kwargs): if (not remote_destination): remote_destination = file_name return SubprocessTask((self._rsync_cmd() + ['-ut', file_name, ('%s:%s' % (self.hostname, remote_destination))]), **kwargs)
Send a file to a remote host with rsync. Args: file_name (str): The relative location of the file on the local host. remote_destination (str): The destination for the file on the remote host. If `None`, will be assumed to be the same as **file_name**. Default `None`. **kwargs: Passed to ``SubprocessTask``'s init method. Return: ``pyrem.task.SubprocessTask``: The resulting task.
codesearchnet
def __len__(self): if not context.executing_eagerly(): raise TypeError('`tf.data.Dataset` only supports `len` in eager mode. Use `tf.data.Dataset.cardinality()` instead.') length = self.cardinality() if length.numpy() == INFINITE: raise TypeError('The dataset is infinite.') if length.numpy() == UNKNOWN: raise TypeError('The dataset length is unknown.') return length
Returns the length of the dataset if it is known and finite. This method requires that you are running in eager mode, and that the length of the dataset is known and non-infinite. When the length may be unknown or infinite, or if you are running in graph mode, use `tf.data.Dataset.cardinality` instead. Returns: An integer representing the length of the dataset. Raises: RuntimeError: If the dataset length is unknown or infinite, or if eager execution is not enabled.
github-repos
def column(self, index_or_label): if (isinstance(index_or_label, str) and (index_or_label not in self.labels)): raise ValueError('The column "{}" is not in the table. The table contains these columns: {}'.format(index_or_label, ', '.join(self.labels))) if (isinstance(index_or_label, int) and (not (0 <= index_or_label < len(self.labels)))): raise ValueError('The index {} is not in the table. Only indices between 0 and {} are valid'.format(index_or_label, (len(self.labels) - 1))) return self._columns[self._as_label(index_or_label)]
Return the values of a column as an array. table.column(label) is equivalent to table[label]. >>> tiles = Table().with_columns( ... 'letter', make_array('c', 'd'), ... 'count', make_array(2, 4), ... ) >>> list(tiles.column('letter')) ['c', 'd'] >>> tiles.column(1) array([2, 4]) Args: label (int or str): The index or label of a column Returns: An instance of ``numpy.array``. Raises: ``ValueError``: When the ``index_or_label`` is not in the table.
codesearchnet
def sample(self, num_samples: int): return self._sample(num_samples)
Returns samples from the EBM corresponding to `self.energy`. Args: num_samples: Number of samples to draw from the EBM.
github-repos
def minhash(self, v): if not isinstance(v, collections.Iterable): raise TypeError("Input vector must be an iterable") if not len(v) == self.dim: raise ValueError("Input dimension mismatch, expecting %d" % self.dim) if not isinstance(v, np.ndarray): v = np.array(v, dtype=np.float32) elif v.dtype != np.float32: v = v.astype(np.float32) hashvalues = np.zeros((self.sample_size, 2), dtype=np.int) vzeros = (v == 0) if vzeros.all(): raise ValueError("Input is all zeros") v[vzeros] = np.nan vlog = np.log(v) for i in range(self.sample_size): t = np.floor((vlog / self.rs[i]) + self.betas[i]) ln_y = (t - self.betas[i]) * self.rs[i] ln_a = self.ln_cs[i] - ln_y - self.rs[i] k = np.nanargmin(ln_a) hashvalues[i][0], hashvalues[i][1] = k, int(t[k]) return WeightedMinHash(self.seed, hashvalues)
Create a new weighted MinHash given a weighted Jaccard vector. Each dimension is an integer frequency of the corresponding element in the multi-set represented by the vector. Args: v (numpy.array): The Jaccard vector.
juraj-google-style
def load_pickle(file, encoding=None): if encoding: with open(file, 'rb') as f: return pickle.load(f, encoding=encoding) with open(file, 'rb') as f: return pickle.load(f)
Load a pickle file. Args: file (str): Path to pickle file Returns: object: Loaded object from pickle file
codesearchnet
def _construct_adb_cmd(self, raw_name, args, shell): args = (args or '') name = raw_name.replace('_', '-') if shell: args = utils.cli_cmd_to_string(args) if self.serial: adb_cmd = ('"%s" -s "%s" %s %s' % (ADB, self.serial, name, args)) else: adb_cmd = ('"%s" %s %s' % (ADB, name, args)) else: adb_cmd = [ADB] if self.serial: adb_cmd.extend(['-s', self.serial]) adb_cmd.append(name) if args: if isinstance(args, basestring): adb_cmd.append(args) else: adb_cmd.extend(args) return adb_cmd
Constructs an adb command with arguments for a subprocess call. Args: raw_name: string, the raw unsanitized name of the adb command to format. args: string or list of strings, arguments to the adb command. See subprocess.Proc() documentation. shell: bool, True to run this command through the system shell, False to invoke it directly. See subprocess.Proc() docs. Returns: The adb command in a format appropriate for subprocess. If shell is True, then this is a string; otherwise, this is a list of strings.
codesearchnet
def __init__(self, graduation_year): if graduation_year is None: self._number = 13 else: self._number = settings.SENIOR_GRADUATION_YEAR - int(graduation_year) + 12 if 9 <= self._number <= 12: self._name = Grade.names[self._number - 9] else: self._name = "graduate"
Initialize the Grade object. Args: graduation_year The numerical graduation year of the user
juraj-google-style
def _expand_place_ids(self, terms): place_vids = [] first_type = None for result in self.backend.identifier_index.search(terms): if (not first_type): first_type = result.type if (result.type != first_type): continue place_vids.append(result.vid) if place_vids: all_set = set(itertools.chain.from_iterable((iallval(GVid.parse(x)) for x in place_vids))) place_vids += list((str(x) for x in all_set)) return place_vids else: return terms
Lookups all of the place identifiers to get gvids Args: terms (str or unicode): terms to lookup Returns: str or list: given terms if no identifiers found, otherwise list of identifiers.
codesearchnet
def __init__(self, ports_to_serve): self._port_pool = _PortPool() self._total_allocations = 0 self._denied_allocations = 0 self._client_request_errors = 0 for port in ports_to_serve: self._port_pool.add_port_to_free_pool(port)
Initialize a new port server. Args: ports_to_serve: A sequence of unique port numbers to test and offer up to clients.
juraj-google-style
def all(self, data={}, **kwargs): return super(Settlement, self).all(data, **kwargs)
Fetch all Settlement entities Returns: Dictionary of Settlement data
codesearchnet
def CaptureFrameLocals(self, frame): variables = {n: self.CaptureNamedVariable(n, v, 1, self.default_capture_limits) for (n, v) in six.viewitems(frame.f_locals)} nargs = frame.f_code.co_argcount if (frame.f_code.co_flags & inspect.CO_VARARGS): nargs += 1 if (frame.f_code.co_flags & inspect.CO_VARKEYWORDS): nargs += 1 frame_arguments = [] for argname in frame.f_code.co_varnames[:nargs]: if (argname in variables): frame_arguments.append(variables.pop(argname)) return (frame_arguments, list(six.viewvalues(variables)))
Captures local variables and arguments of the specified frame. Args: frame: frame to capture locals and arguments. Returns: (arguments, locals) tuple.
codesearchnet
def safe_filename(filename, os_type='unix', no_control=True, ascii_only=True, case=None, encoding='utf8', max_length=None): assert isinstance(filename, str), 'Expect str. Got {}.'.format(type(filename)) if (filename in ('.', os.curdir)): new_filename = '%2E' elif (filename in ('.', os.pardir)): new_filename = '%2E%2E' else: unix = (os_type == 'unix') windows = (os_type == 'windows') encoder_args = (unix, no_control, windows, ascii_only) if (encoder_args not in _encoder_cache): _encoder_cache[encoder_args] = PercentEncoder(unix=unix, control=no_control, windows=windows, ascii_=ascii_only) encoder = _encoder_cache[encoder_args] encoded_filename = filename.encode(encoding) new_filename = encoder.quote(encoded_filename).decode(encoding) if (os_type == 'windows'): if (new_filename[(- 1)] in ' .'): new_filename = '{0}{1:02X}'.format(new_filename[:(- 1)], new_filename[(- 1)]) if (max_length and (len(new_filename) > max_length)): hash_obj = hashlib.sha1(new_filename.encode(encoding)) new_length = max(0, (max_length - 8)) new_filename = '{0}{1}'.format(new_filename[:new_length], hash_obj.hexdigest()[:8]) if (case == 'lower'): new_filename = new_filename.lower() elif (case == 'upper'): new_filename = new_filename.upper() return new_filename
Return a safe filename or path part. Args: filename (str): The filename or path component. os_type (str): If ``unix``, escape the slash. If ``windows``, escape extra Windows characters. no_control (bool): If True, escape control characters. ascii_only (bool): If True, escape non-ASCII characters. case (str): If ``lower``, lowercase the string. If ``upper``, uppercase the string. encoding (str): The character encoding. max_length (int): The maximum length of the filename. This function assumes that `filename` has not already been percent-encoded. Returns: str
codesearchnet
def write(self, destination, filename, content): if not os.path.exists(destination): try: os.makedirs(destination) except: pass filepath = "%s/%s" % (destination, filename) f = open(filepath, "w+") f.write(content) f.close()
Write a file at the specific destination with the content. Args: destination (string): the destination location filename (string): the filename that will be written content (string): the content of the filename
juraj-google-style
def __diff_internal(self): assert (self.p > 0), 'order of Bspline must be > 0' t = self.knot_vector p = self.p Bi = Bspline(t[:(- 1)], (p - 1)) Bip1 = Bspline(t[1:], (p - 1)) numer1 = (+ p) numer2 = (- p) denom1 = (t[p:(- 1)] - t[:(- (p + 1))]) denom2 = (t[(p + 1):] - t[1:(- p)]) with np.errstate(divide='ignore', invalid='ignore'): ci = np.where((denom1 != 0.0), (numer1 / denom1), 0.0) cip1 = np.where((denom2 != 0.0), (numer2 / denom2), 0.0) return ((ci, Bi), (cip1, Bip1))
Differentiate a B-spline once, and return the resulting coefficients and Bspline objects. This preserves the Bspline object nature of the data, enabling recursive implementation of higher-order differentiation (see `diff`). The value of the first derivative of `B` at a point `x` can be obtained as:: def diff1(B, x): terms = B.__diff_internal() return sum( ci*Bi(x) for ci,Bi in terms ) Returns: tuple of tuples, where each item is (coefficient, Bspline object). See: `diff`: differentiation of any order >= 0
codesearchnet
def get_values(self, k, v): metadata = self.metadata values = [] if metadata != None: if k in metadata: for metav in metadata[k]: if v in metav: values.append(metav[v]) return values
Get a list of values from the key value metadata attribute. Args: k (str): Key in :class:`api.results`.metadata v (str): Values from each item in the key of :class:`api.results`.metadata Returns: A list containing all the ``v`` values in the ``k`` key for the :class:`api.results`.metadata attribute.
juraj-google-style
def unwrap_arguments(xml_response): xml_response = xml_response.encode('utf-8') try: tree = XML.fromstring(xml_response) except XML.ParseError: filtered = illegal_xml_re.sub('', xml_response.decode('utf-8')).encode('utf-8') tree = XML.fromstring(filtered) action_response = tree.find('{http: return dict(((i.tag, (i.text or '')) for i in action_response))
Extract arguments and their values from a SOAP response. Args: xml_response (str): SOAP/xml response text (unicode, not utf-8). Returns: dict: a dict of ``{argument_name: value}`` items.
codesearchnet
def StartService(service_name): try: win32serviceutil.StartService(service_name) logging.info("Service '%s' started.", service_name) except pywintypes.error as e: if getattr(e, "winerror", None) == winerror.ERROR_SERVICE_DOES_NOT_EXIST: logging.debug("Tried to start '%s', but the service is not installed.", service_name) else: logging.exception("Encountered error trying to start '%s':", service_name)
Start a Windows service with the given name. Args: service_name: string The name of the service to be started.
juraj-google-style
def local_variables(scope=None): return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, scope)
Returns local variables. Local variables - per process variables, usually not saved/restored to checkpoint and used for temporary or intermediate values. For example, they can be used as counters for metrics computation or number of epochs this machine has read data. The `tf.contrib.framework.local_variable()` function automatically adds the new variable to `GraphKeys.LOCAL_VARIABLES`. This convenience function returns the contents of that collection. An alternative to local variables are global variables. See `tf.compat.v1.global_variables` Args: scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: A list of local `Variable` objects.
github-repos
def get_dialect_name(mixed: Union[(SQLCompiler, Engine, Dialect)]) -> str: dialect = get_dialect(mixed) return dialect.name
Finds the name of the SQLAlchemy dialect in use. Args: mixed: an SQLAlchemy :class:`SQLCompiler`, :class:`Engine`, or :class:`Dialect` object Returns: the SQLAlchemy dialect name being used
codesearchnet
def are_debian_packages_installed(packages: List[str]) -> Dict[str, bool]: assert len(packages) >= 1 require_executable(DPKG_QUERY) args = [ DPKG_QUERY, "-W", "-f=${Package} ${Status}\n", ] + packages completed_process = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) encoding = sys.getdefaultencoding() stdout = completed_process.stdout.decode(encoding) stderr = completed_process.stderr.decode(encoding) present = OrderedDict() for line in stdout.split("\n"): if line: words = line.split() assert len(words) >= 2 package = words[0] present[package] = "installed" in words[1:] for line in stderr.split("\n"): if line: words = line.split() assert len(words) >= 2 package = words[-1] present[package] = False log.debug("Debian package presence: {}", present) return present
Check which of a list of Debian packages are installed, via ``dpkg-query``. Args: packages: list of Debian package names Returns: dict: mapping from package name to boolean ("present?")
juraj-google-style
def load_raw_type(self, typ: type[Any]) -> abstract.BaseValue: if typ is type(None): return self.consts[None] pytd_node = self._pytd_loader.lookup_pytd(typ.__module__, typ.__name__) return self._load_pytd_node(pytd_node)
Converts a raw type to an abstract value. For convenience, this method can also be called via ctx.types[typ]. Args: typ: The type. Returns: The abstract representation of the type. For example, when passed `int`, this function returns `abstract.SimpleClass(int)`.
github-repos
def build_schedule(inputs: Optional[Set[EventSetNode]], outputs: Set[EventSetNode], verbose: int=0) -> Schedule: graph = infer_graph(inputs, outputs) schedule = Schedule(input_nodes=graph.inputs) if verbose >= 2: print('Graph:\n', graph, file=sys.stderr) ready_ops: List[Operator] = [] ready_ops_set: Set[Operator] = set() node_to_op: Dict[EventSetNode, List[Operator]] = defaultdict(lambda: []) op_to_num_pending_inputs: Dict[Operator, int] = defaultdict(lambda: 0) for op in graph.operators: num_pending_inputs = 0 for input_node in op.inputs.values(): node_to_op[input_node].append(op) if input_node in graph.inputs: continue num_pending_inputs += 1 if num_pending_inputs == 0: ready_ops.append(op) ready_ops_set.add(op) else: op_to_num_pending_inputs[op] = num_pending_inputs ready_ops.sort(key=lambda op: op._internal_ordered_id, reverse=True) while ready_ops: op = ready_ops.pop() ready_ops_set.remove(op) released_nodes = [] for input in op.inputs.values(): if input in outputs: continue if input not in node_to_op: continue input_usage = node_to_op[input] input_usage.remove(op) if not input_usage: released_nodes.append(input) del node_to_op[input] schedule.steps.append(ScheduleStep(op=op, released_nodes=released_nodes)) for output in op.outputs.values(): if output not in node_to_op: continue for new_op in node_to_op[output]: assert new_op in op_to_num_pending_inputs num_missing_inputs = op_to_num_pending_inputs[new_op] - 1 op_to_num_pending_inputs[new_op] = num_missing_inputs assert num_missing_inputs >= 0 if num_missing_inputs == 0: ready_ops.append(new_op) ready_ops_set.add(new_op) del op_to_num_pending_inputs[new_op] assert not op_to_num_pending_inputs return schedule
Calculates which operators need to be executed in which order to compute a set of output EventSetNodes given a set of input EventSetNodes. This implementation is based on Kahn's algorithm. Args: inputs: Input EventSetNodes. outputs: Output EventSetNodes. verbose: If >0, prints details about the execution on the standard error output. The larger the number, the more information is displayed. Returns: Tuple of: - Ordered list of operators, such that the first operator should be computed before the second, second before the third, etc. - Mapping of EventSetNode name inputs to EventSetNodes. The keys are the string values in the `inputs` argument, and the values are the EventSetNodes corresponding to each one. If a value was already an EventSetNode, it won't be present in the returned dictionary.
github-repos
def ParseGenericRow( self, parser_mediator, query, row, **unused_kwargs): query_hash = hash(query) event_data = WindowsTimelineGenericEventData() payload_json_bytes = bytes(self._GetRowValue(query_hash, row, 'Payload')) payload_json_string = payload_json_bytes.decode('utf-8') appid_entries_string = self._GetRowValue(query_hash, row, 'AppId') payload = json.loads(payload_json_string) appid_entries = json.loads(appid_entries_string) package_id_locations = [ 'packageId', 'x_exe_path', 'windows_win32', 'windows_universal', 'alternateId'] for location in package_id_locations: for entry in appid_entries: if entry['platform'] == location and entry['application'] != '': event_data.package_identifier = entry['application'] break if event_data.package_identifier is None: break if 'description' in payload: event_data.description = payload['description'] else: event_data.description = '' if 'appDisplayName' in payload and payload['appDisplayName'] != '': event_data.application_display_name = payload['appDisplayName'] elif 'displayText' in payload and payload['displayText'] != '': event_data.application_display_name = payload['displayText'] timestamp = self._GetRowValue(query_hash, row, 'StartTime') date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_START) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses a generic windows timeline row. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. query (str): query that created the row. row (sqlite3.Row): row.
juraj-google-style
def description(self, description): self._data['description'] = description request = self._base_request request['description'] = description return self._tc_requests.update(request, owner=self.owner)
Updates the security labels description. Args: description:
juraj-google-style
def create_single_token(self, *, payer_id, name, identification_number, payment_method, number, expiration_date): payload = { "language": self.client.language.value, "command": PaymentCommand.CREATE_TOKEN.value, "merchant": { "apiLogin": self.client.api_login, "apiKey": self.client.api_key }, "creditCardToken": { "payerId": payer_id, "name": name, "identificationNumber": identification_number, "paymentMethod": payment_method, "number": number, "expirationDate": expiration_date }, "test": self.client.is_test } return self.client._post(self.url, json=payload)
Using this feature you can register a customer’s credit card data and get a token sequential number. Args: payer_id: name: identification_number: payment_method: number: expiration_date: Returns:
juraj-google-style
def avg(vals, count=None): sum = 0 for v in vals: sum += v if (count is None): count = len(vals) return (float(sum) / count)
Returns the average value Args: vals: List of numbers to calculate average from. count: Int of total count that vals was part of. Returns: Float average value throughout a count.
codesearchnet
def CopyToProto(self, proto): if (self.file is not None and self._serialized_start is not None and self._serialized_end is not None): proto.ParseFromString(self.file.serialized_pb[ self._serialized_start:self._serialized_end]) else: raise Error('Descriptor does not contain serialization.')
Copies this to the matching proto in descriptor_pb2. Args: proto: An empty proto instance from descriptor_pb2. Raises: Error: If self couldnt be serialized, due to to few constructor arguments.
juraj-google-style
def DeserializeForImport(self, reader): super(Block, self).Deserialize(reader) self.Transactions = [] transaction_length = reader.ReadVarInt() for i in range(0, transaction_length): tx = Transaction.DeserializeFrom(reader) self.Transactions.append(tx) if len(self.Transactions) < 1: raise Exception('Invalid format %s ' % self.Index)
Deserialize full object. Args: reader (neo.IO.BinaryReader):
juraj-google-style
def _handle_problem_status(self, message, future): try: _LOGGER.trace("Handling response: %r", message) _LOGGER.debug("Handling response for %s with status %s", message.get('id'), message.get('status')) if 'error_code' in message and 'error_msg' in message: raise SolverFailureError(message['error_msg']) if 'status' not in message: raise InvalidAPIResponseError("'status' missing in problem description response") if 'id' not in message: raise InvalidAPIResponseError("'id' missing in problem description response") future.id = message['id'] future.remote_status = status = message['status'] with future._single_cancel_lock: if future._cancel_requested: if not future._cancel_sent and status == self.STATUS_PENDING: self._cancel(message['id'], future) future._cancel_sent = True if not future.time_received and message.get('submitted_on'): future.time_received = parse_datetime(message['submitted_on']) if not future.time_solved and message.get('solved_on'): future.time_solved = parse_datetime(message['solved_on']) if not future.eta_min and message.get('earliest_estimated_completion'): future.eta_min = parse_datetime(message['earliest_estimated_completion']) if not future.eta_max and message.get('latest_estimated_completion'): future.eta_max = parse_datetime(message['latest_estimated_completion']) if status == self.STATUS_COMPLETE: if 'answer' in message: future._set_message(message) else: self._load(future) elif status in self.ANY_STATUS_ONGOING: self._poll(future) elif status == self.STATUS_CANCELLED: raise CanceledFutureError() else: errmsg = message.get('error_message', 'An unknown error has occurred.') if 'solver is offline' in errmsg.lower(): raise SolverOfflineError(errmsg) else: raise SolverFailureError(errmsg) except Exception as error: future._set_error(error, sys.exc_info())
Handle the results of a problem submission or results request. This method checks the status of the problem and puts it in the correct queue. Args: message (dict): Update message from the SAPI server wrt. this problem. future `Future`: future corresponding to the problem Note: This method is always run inside of a daemon thread.
juraj-google-style
def _read_range(self, start, end=0): try: with _handle_client_error(): response = self._client.get_object( Range=self._http_range(start, end), **self._client_kwargs) except _ClientError as exception: if exception.response['Error']['Code'] == 'InvalidRange': return bytes() raise return response['Body'].read()
Read a range of bytes in stream. Args: start (int): Start stream position. end (int): End stream position. 0 To not specify end. Returns: bytes: number of bytes read
juraj-google-style
def merge(self, other): if (not isinstance(other, Ontology)): raise TypeError("'merge' requires an Ontology as argument, not {}".format(type(other))) self.terms.update(other.terms) self._empty_cache() self.adopt() self.reference()
Merge another ontology into the current one. Raises: TypeError: When argument is not an Ontology object. Example: >>> from pronto import Ontology >>> nmr = Ontology('tests/resources/nmrCV.owl', False) >>> po = Ontology('tests/resources/po.obo.gz', False) >>> 'NMR:1000271' in nmr True >>> 'NMR:1000271' in po False >>> po.merge(nmr) >>> 'NMR:1000271' in po True
codesearchnet
def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: Optional[dict]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: Optional[dict]=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] if encoder_attention_mask is None: batch_size, sequence_length = encoder_hidden_states.shape[:2] encoder_attention_mask = jnp.ones((batch_size, sequence_length)) batch_size, sequence_length = decoder_input_ids.shape if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=jnp.array(encoder_attention_mask, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is not None and return_dict: outputs, past = outputs outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): outputs, past = outputs outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs
Returns: Example: ```python >>> import jax.numpy as jnp >>> from transformers import AutoTokenizer, FlaxBlenderbotSmallForConditionalGeneration >>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M") >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") >>> text = "My friends are cool but they eat too many carbs." >>> inputs = tokenizer(text, max_length=1024, return_tensors="np") >>> encoder_outputs = model.encode(**inputs) >>> decoder_start_token_id = model.config.decoder_start_token_id >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id >>> outputs = model.decode(decoder_input_ids, encoder_outputs) >>> last_decoder_hidden_states = outputs.last_hidden_state ```
github-repos
def _QueryHashes(self, digests): url_parameters = {'apikey': self._api_key, 'resource': ', '.join(digests)} try: json_response = self.MakeRequestAndDecodeJSON( self._VIRUSTOTAL_API_REPORT_URL, 'GET', params=url_parameters) except errors.ConnectionError as exception: json_response = None logger.error('Unable to query VirusTotal with error: {0!s}.'.format( exception)) return json_response
Queries VirusTotal for a specfic hashes. Args: digests (list[str]): hashes to look up. Returns: dict[str, object]: JSON response or None on error.
juraj-google-style
def run(argv=None): parser = argparse.ArgumentParser() parser.add_argument('--input', dest='input', default='gs: parser.add_argument('--output', dest='output', help='Output file to write results to.', required=True) known_args, pipeline_args = parser.parse_known_args(argv) pipeline_options = PipelineOptions(pipeline_args) pipeline_options.view_as(SetupOptions).save_main_session = True with beam.Pipeline(options=pipeline_options) as p: p | ReadFromAvro(known_args.input) | beam.Filter(lambda record: all((record[k] is not None for k in ('hvfhs_license_num', 'trip_miles', 'trip_time', 'base_passenger_fare', 'tips', 'driver_pay'))) and any((record[k] is not None for k in ('request_datetime', 'on_scene_datetime', 'pickup_datetime', 'dropoff_datetime')))) | beam.ParDo(CreateKeyWithServiceAndDay()) | beam.CombinePerKey(CalculatePricePerAttribute()) | beam.Map(flatten_group) | WriteToAvro(known_args.output, SCHEMA, file_name_suffix='.avro')
Runs the New York City trips pipeline. Args: argv: Pipeline options as a list of arguments.
github-repos
def as_dataframe(self, pattern='*', max_rows=None): data = [] for i, resource in enumerate(self.list(pattern)): if max_rows is not None and i >= max_rows: break labels = ', '. join([l.key for l in resource.labels]) data.append([resource.type, resource.display_name, labels]) return pandas.DataFrame(data, columns=self._DISPLAY_HEADERS)
Creates a pandas dataframe from the descriptors that match the filters. Args: pattern: An optional pattern to further filter the descriptors. This can include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``. max_rows: The maximum number of descriptors to return. If None, return all. Returns: A pandas dataframe containing matching resource descriptors.
juraj-google-style
def setup(self, disk_name, project, turbinia_zone): if project is None or turbinia_zone is None: self.state.add_error( 'project or turbinia_zone are not all specified, bailing out', critical=True) return self.disk_name = disk_name self.project = project self.turbinia_zone = turbinia_zone try: turbinia_config.LoadConfig() self.turbinia_region = turbinia_config.TURBINIA_REGION self.instance = turbinia_config.PUBSUB_TOPIC if turbinia_config.PROJECT != self.project: self.state.add_error( 'Specified project {0:s} does not match Turbinia configured ' 'project {1:s}. Use gcp_turbinia_import recipe to copy the disk ' 'into the same project.'.format( self.project, turbinia_config.PROJECT), critical=True) return self._output_path = tempfile.mkdtemp() self.client = turbinia_client.TurbiniaClient() except TurbiniaException as e: self.state.add_error(e, critical=True) return
Sets up the object attributes. Args: disk_name (string): Name of the disk to process project (string): The project containing the disk to process turbinia_zone (string): The zone containing the disk to process
juraj-google-style
def persist_project(project): from benchbuild.utils.schema import Project, Session session = Session() projects = session.query(Project).filter((Project.name == project.name)).filter((Project.group_name == project.group)) name = project.name desc = project.__doc__ domain = project.domain group_name = project.group version = (project.version() if callable(project.version) else project.version) try: src_url = project.src_uri except AttributeError: src_url = 'unknown' if (projects.count() == 0): newp = Project() newp.name = name newp.description = desc newp.src_url = src_url newp.domain = domain newp.group_name = group_name newp.version = version session.add(newp) else: newp_value = {'name': name, 'description': desc, 'src_url': src_url, 'domain': domain, 'group_name': group_name, 'version': version} projects.update(newp_value) session.commit() return (projects, session)
Persist this project in the benchbuild database. Args: project: The project we want to persist.
codesearchnet
def drawdown_details(drawdown, index_type=pd.DatetimeIndex): is_zero = drawdown == 0 start = ~is_zero & is_zero.shift(1) start = list(start[start == True].index) end = is_zero & (~is_zero).shift(1) end = list(end[end == True].index) if len(start) is 0: return None if len(end) is 0: end.append(drawdown.index[-1]) if start[0] > end[0]: start.insert(0, drawdown.index[0]) if start[-1] > end[-1]: end.append(drawdown.index[-1]) result = pd.DataFrame( columns=('Start', 'End', 'Length', 'drawdown'), index=range(0, len(start)) ) for i in range(0, len(start)): dd = drawdown[start[i]:end[i]].min() if index_type is pd.DatetimeIndex: result.iloc[i] = (start[i], end[i], (end[i] - start[i]).days, dd) else: result.iloc[i] = (start[i], end[i], (end[i] - start[i]), dd) return result
Returns a data frame with start, end, days (duration) and drawdown for each drawdown in a drawdown series. .. note:: days are actual calendar days, not trading days Args: * drawdown (pandas.Series): A drawdown Series (can be obtained w/ drawdown(prices). Returns: * pandas.DataFrame -- A data frame with the following columns: start, end, days, drawdown.
juraj-google-style
def modify_lattice(self, new_lattice): self._lattice = new_lattice for site in self._sites: site.lattice = new_lattice
Modify the lattice of the structure. Mainly used for changing the basis. Args: new_lattice (Lattice): New lattice
juraj-google-style
def get_image_features(self, pixel_values: torch.FloatTensor, return_attentions: Optional[bool]=False, interpolate_pos_encoding: Optional[bool]=False): vision_model_output = self.vision_model(pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) image_embeds = self.vision_model.model.post_layernorm(vision_model_output[0]) image_embeds = nn.functional.normalize(image_embeds, dim=-1) image_embeds, projection_attentions = self.image_to_text_projection(image_embeds) if return_attentions: return (image_embeds, projection_attentions) return image_embeds
Encodes images into continuous embeddings that can be forwarded to the language model. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`): The tensors corresponding to the input images. return_attentions (`bool`, *optional*, defaults to `False`): Whether to return `projection_attentions` or not. interpolate_pos_encoding (`bool`, *optional*, defaults to `False`): Whether to interpolate positional embeddings or not.
github-repos
def compile_initial_state(self, batch_size: Optional[int] = None) -> Sequence[tf.Tensor]: with self.graph.as_default(): with tf.name_scope('initial_state'): self._initialize_initial_state_fluents() if batch_size is None: return self.initial_state_fluents return self._compile_batch_fluents(self.initial_state_fluents, batch_size)
Returns a tuple of tensors representing the initial state fluents. Args: batch_size (Optional[int]): The batch size. Returns: Sequence[tf.Tensor]: A tuple of tensors.
juraj-google-style
def extractUnits(self, inp): inp = self._preprocess(inp) units = [] description = '' for w in inp.split(' '): if (self.isValidUnit(w) or (w == '/')): if description: description += ' ' description += w else: if description: units.append(description) description = '' if description: units.append(description) return units
Collects all the valid units from an inp string. Works by appending consecutive words from the string and cross-referncing them with a set of valid units. Args: inp (str): Some text which hopefully contains descriptions of different units. Returns: A list of strings, each entry in which is a valid quantities unit.
codesearchnet
def _ApproxTopKGradient(op: ops.Operation, grad, _): idx_shape = op.outputs[1].shape lifted_idx_shape = idx_shape + [1] flat_shape_len = functools.reduce(operator.mul, idx_shape) rank = idx_shape.rank reduction_dim = op.get_attr('reduction_dimension') if reduction_dim < 0: reduction_dim = rank + reduction_dim def GetLiftedIdx(d): if d == reduction_dim: return array_ops.reshape(op.outputs[1], lifted_idx_shape) iota_len = idx_shape[d] iota_shape = list(itertools.repeat(1, rank + 1)) iota_shape[d] = iota_len iota = array_ops.reshape(math_ops.range(iota_len), iota_shape) return array_ops.broadcast_to(iota, lifted_idx_shape) lifted_idx = array_ops.concat(list((GetLiftedIdx(d) for d in range(rank))), axis=rank) flat_idx = array_ops.reshape(lifted_idx, [flat_shape_len, rank]) flat_grad = array_ops.reshape(grad, [flat_shape_len]) return array_ops.scatter_nd(flat_idx, flat_grad, op.inputs[0].shape)
Return the gradients for ApproxTopK. Args: op: The ApproxTopK for which we need to generate gradients. grad: The gradients for backprop. Returns: Scattered gradient based on the top-k indices.
github-repos
def CreateSignatureScanner(cls, specification_store): scanner_object = pysigscan.scanner() for format_specification in specification_store.specifications: for signature in format_specification.signatures: pattern_offset = signature.offset if (pattern_offset is None): signature_flags = pysigscan.signature_flags.NO_OFFSET elif (pattern_offset < 0): pattern_offset *= (- 1) signature_flags = pysigscan.signature_flags.RELATIVE_FROM_END else: signature_flags = pysigscan.signature_flags.RELATIVE_FROM_START scanner_object.add_signature(signature.identifier, pattern_offset, signature.pattern, signature_flags) return scanner_object
Creates a signature scanner for format specifications with signatures. Args: specification_store (FormatSpecificationStore): format specifications with signatures. Returns: pysigscan.scanner: signature scanner.
codesearchnet
def __init__(self, channel): self.Alarm = channel.unary_unary( '/etcdserverpb.Maintenance/Alarm', request_serializer=rpc__pb2.AlarmRequest.SerializeToString, response_deserializer=rpc__pb2.AlarmResponse.FromString, ) self.Status = channel.unary_unary( '/etcdserverpb.Maintenance/Status', request_serializer=rpc__pb2.StatusRequest.SerializeToString, response_deserializer=rpc__pb2.StatusResponse.FromString, ) self.Defragment = channel.unary_unary( '/etcdserverpb.Maintenance/Defragment', request_serializer=rpc__pb2.DefragmentRequest.SerializeToString, response_deserializer=rpc__pb2.DefragmentResponse.FromString, ) self.Hash = channel.unary_unary( '/etcdserverpb.Maintenance/Hash', request_serializer=rpc__pb2.HashRequest.SerializeToString, response_deserializer=rpc__pb2.HashResponse.FromString, ) self.HashKV = channel.unary_unary( '/etcdserverpb.Maintenance/HashKV', request_serializer=rpc__pb2.HashKVRequest.SerializeToString, response_deserializer=rpc__pb2.HashKVResponse.FromString, ) self.Snapshot = channel.unary_stream( '/etcdserverpb.Maintenance/Snapshot', request_serializer=rpc__pb2.SnapshotRequest.SerializeToString, response_deserializer=rpc__pb2.SnapshotResponse.FromString, ) self.MoveLeader = channel.unary_unary( '/etcdserverpb.Maintenance/MoveLeader', request_serializer=rpc__pb2.MoveLeaderRequest.SerializeToString, response_deserializer=rpc__pb2.MoveLeaderResponse.FromString, )
Constructor. Args: channel: A grpc.Channel.
juraj-google-style
def __init__(self, fname): self.file = fname self.folder = os.path.dirname(fname) self.tree = ET.parse(fname) self.root = self.tree.getroot() self.name = self.root.get("model") self.worldbody = self.create_default_element("worldbody") self.actuator = self.create_default_element("actuator") self.asset = self.create_default_element("asset") self.equality = self.create_default_element("equality") self.contact = self.create_default_element("contact") self.default = self.create_default_element("default") self.resolve_asset_dependency()
Loads a mujoco xml from file. Args: fname (str): path to the MJCF xml file.
juraj-google-style
def dump(self, file, payload): json.dump(payload, file, indent=2, ensure_ascii=False)
Dump json oject to open file output. Writes json with 2 spaces indentation. Args: file: Open file-like object. Must be open for writing. payload: The Json object to write to file. Returns: None.
codesearchnet