code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def assert_pipeline_equal(test_case, expected_pipeline, actual_pipeline): expected_pipeline_proto = expected_pipeline.to_runner_api(use_fake_coders=True) actual_pipeline_proto = actual_pipeline.to_runner_api(use_fake_coders=True) assert_pipeline_proto_equal(test_case, expected_pipeline_proto, actual_pipeline_proto)
Asserts the equivalence between two given apache_beam.Pipeline instances. Args: test_case: (unittest.TestCase) the unittest testcase where it asserts. expected_pipeline: (Pipeline) the pipeline instance expected. actual_pipeline: (Pipeline) the actual pipeline instance to be asserted.
github-repos
def to_json(self, is_admin=False): if is_admin: return { 'accountId': self.account_id, 'accountName': self.account_name, 'accountType': self.account_type, 'contacts': self.contacts, 'enabled': True if self.enabled == 1 else False, 'requiredRoles': self.required_roles, 'properties': {to_camelcase(prop.name): prop.value for prop in self.account.properties} } else: return { 'accountId': self.account_id, 'accountName': self.account_name, 'contacts': self.contacts }
Returns a dict representation of the object Args: is_admin (`bool`): If true, include information about the account that should be avaiable only to admins Returns: `dict`
juraj-google-style
def subscribe(self, callback, filter_): sub_id = "subscriber_{uuid}".format(uuid=uuid.uuid4()) sub = pd.DataFrame({sub_id: filter_}).T sub['callback'] = callback self.subscribers = self.subscribers.append(sub) this_subscriber_metrics = self.__filter(self.metrics_meta, filter_) if this_subscriber_metrics.empty: logger.debug('Metrics for subscriber %s not found', sub_id) else: logger.debug('Found metrics for this subscriber, subscribing...: %s', this_subscriber_metrics) this_subscriber_metrics['callback'] = callback prepared_callbacks = this_subscriber_metrics[['callback']] self.callbacks = self.callbacks.append(prepared_callbacks)
Create and register metric subscriber, find metrics for this subscriber (using filter_) and subscribe Args: callback (object method): subscriber's callback filter_ (dict): filter dict filter sample: {'type': 'metrics', 'source': 'gun'}
juraj-google-style
def zip_ll_row(params, data_row): l = params[0] pi = params[1] d0 = (data_row == 0) likelihood = ((d0 * pi) + ((1 - pi) * poisson.pmf(data_row, l))) return (- np.log((likelihood + eps)).sum())
Returns the negative log-likelihood of a row given ZIP data. Args: params (list): [lambda zero-inf] data_row (array): 1d array Returns: negative log-likelihood
codesearchnet
def _multi_get(self, cache_api_name, fmt_url_path, url_params, query_params=None): all_responses = {} if self._cache: all_responses = self._cache.bulk_lookup(cache_api_name, url_params) url_params = [key for key in url_params if (key not in all_responses.keys())] if len(url_params): urls = self._to_urls(fmt_url_path, url_params) responses = self._requests.multi_get(urls, query_params) for (url_param, response) in zip(url_params, responses): if self._cache: self._cache.cache_value(cache_api_name, url_param, response) all_responses[url_param] = response return all_responses
Makes multiple GETs to an OpenDNS endpoint. Args: cache_api_name: string api_name for caching fmt_url_path: format string for building URL paths url_params: An enumerable of strings used in building URLs query_params - None / dict / list of dicts containing query params Returns: A dict of {url_param: api_result}
codesearchnet
def retrieve_products(self, reviewer): if (not isinstance(reviewer, self._reviewer_cls)): raise TypeError("Type of given reviewer isn't acceptable:", reviewer, ', expected:', self._reviewer_cls) return list(self.graph.successors(reviewer))
Retrieve products reviewed by a given reviewer. Args: reviewer: A reviewer. Returns: A list of products which the reviewer reviews. Raises: TypeError: when given reviewer isn't instance of specified reviewer class when this graph is constructed.
codesearchnet
def AppendContent(self, src_fd): while 1: blob = src_fd.read(self.chunksize) if (not blob): break blob_id = data_store.BLOBS.WriteBlobWithUnknownHash(blob) self.AddBlob(blob_id, len(blob)) self.Flush()
Create new blob hashes and append to BlobImage. We don't support writing at arbitrary file offsets, but this method provides a convenient way to add blobs for a new file, or append content to an existing one. Args: src_fd: source file handle open for read Raises: IOError: if blob has already been finalized.
codesearchnet
def _ExpectedKeysForEntry(self, entry): return [entry.name]
Generate a list of expected cache keys for this type of map. Args: entry: A PasswdMapEntry Returns: A list of strings
github-repos
def __init__(self, generator_function, *args, **kwargs): if not inspect.isgeneratorfunction(generator_function): raise TypeError("generator_function must be a generator function.") self.generator_function = generator_function if sys.version_info[0] < 3: self.arguments = inspect.getcallargs( self.generator_function, *args, **kwargs ) else: signature = inspect.signature(self.generator_function) bound_arguments = signature.bind(*args, **kwargs) self.arguments = bound_arguments.arguments
Init a new GeneratorContainer. Args: generator_function(func): The generator function. *args: The arguments passed to the generator function. **kwargs: The keyword arguments passed to the generator function.
juraj-google-style
def from_utc_datetime(cls, dt: datetime.datetime) -> 'Timestamp': if dt.tzinfo is None: raise ValueError('dt has no timezone info ' + '(https: if dt.tzinfo != pytz.utc and dt.tzinfo != datetime.timezone.utc: raise ValueError('dt not in UTC: %s' % dt) duration = dt - cls._epoch_datetime_utc() return Timestamp(duration.total_seconds())
Create a ``Timestamp`` instance from a ``datetime.datetime`` object. Args: dt: A ``datetime.datetime`` object in UTC (offset-aware).
github-repos
def line_init(xo: int, yo: int, xd: int, yd: int) -> None: lib.TCOD_line_init(xo, yo, xd, yd)
Initilize a line whose points will be returned by `line_step`. This function does not return anything on its own. Does not include the origin point. Args: xo (int): X starting point. yo (int): Y starting point. xd (int): X destination point. yd (int): Y destination point. .. deprecated:: 2.0 Use `line_iter` instead.
codesearchnet
def simulate_w(self, index: int, half_turns: float, axis_half_turns: float): args = self._shard_num_args({ 'index': index, 'half_turns': half_turns, 'axis_half_turns': axis_half_turns }) if index >= self._num_shard_qubits: self._pool.map(_clear_scratch, args) self._pool.map(_w_between_shards, args) self._pool.map(_copy_scratch_to_state, args) else: self._pool.map(_w_within_shard, args) norm_squared = np.sum(self._pool.map(_norm_squared, args)) args = self._shard_num_args({ 'norm_squared': norm_squared }) self._pool.map(_renorm, args)
Simulate a single qubit rotation gate about a X + b Y. The gate simulated is U = exp(-i pi/2 W half_turns) where W = cos(pi axis_half_turns) X + sin(pi axis_half_turns) Y Args: index: The qubit to act on. half_turns: The amount of the overall rotation, see the formula above. axis_half_turns: The angle between the pauli X and Y operators, see the formula above.
juraj-google-style
async def with_call(self, request_iterator, timeout=None, metadata=None, credentials=None): fut = self.future(request_iterator, timeout, metadata, credentials) try: result = (await fut) return (result, fut) finally: if (not fut.done()): fut.cancel()
Synchronously invokes the underlying RPC on the client. Args: request_iterator: An ASYNC iterator that yields request values for the RPC. timeout: An optional duration of time in seconds to allow for the RPC. If None, the timeout is considered infinite. metadata: Optional :term:`metadata` to be transmitted to the service-side of the RPC. credentials: An optional CallCredentials for the RPC. Returns: The response value for the RPC and a Call object for the RPC. Raises: RpcError: Indicating that the RPC terminated with non-OK status. The raised RpcError will also be a Call for the RPC affording the RPC's metadata, status code, and details.
codesearchnet
def get_image_path(image_lists, label_name, index, image_dir, category): if (label_name not in image_lists): tf.logging.fatal('Label does not exist %s.', label_name) label_lists = image_lists[label_name] if (category not in label_lists): tf.logging.fatal('Category does not exist %s.', category) category_list = label_lists[category] if (not category_list): tf.logging.fatal('Label %s has no images in the category %s.', label_name, category) mod_index = (index % len(category_list)) base_name = category_list[mod_index] sub_dir = label_lists['dir'] full_path = os.path.join(image_dir, sub_dir, base_name) return full_path
Returns a path to an image for a label at the given index. Args: image_lists: OrderedDict of training images for each label. label_name: Label string we want to get an image for. index: Int offset of the image we want. This will be moduloed by the available number of images for the label, so it can be arbitrarily large. image_dir: Root folder string of the subfolders containing the training images. category: Name string of set to pull images from - training, testing, or validation. Returns: File system path string to an image that meets the requested parameters.
codesearchnet
def filter_by_hoys(self, hoys): existing_hoys = self.header.analysis_period.hoys hoys = [h for h in hoys if (h in existing_hoys)] _moys = tuple((int((hour * 60)) for hour in hoys)) return self.filter_by_moys(_moys)
Filter the Data Collection based onva list of hoys. Args: hoys: A List of hours of the year 0..8759 Return: A new Data Collection with filtered data
codesearchnet
def latent_to_dist(name, x, hparams, output_channels=None): architecture = hparams.get('latent_architecture', 'single_conv') depth = hparams.get('latent_encoder_depth', 1) pre_output_channels = hparams.get('latent_pre_output_channels', 512) width = hparams.get('latent_encoder_width', 512) with tf.variable_scope(name, reuse=tf.AUTO_REUSE): x_shape = common_layers.shape_list(x) if (output_channels is None): output_channels = x_shape[(- 1)] if (architecture == 'single_conv'): return single_conv_dist('single_conv', x, output_channels) if (architecture == 'glow_nn'): mean_log_scale = x for layer in range(1, (depth + 1)): mid_channels = (pre_output_channels mean_log_scale = conv_block(('glow_nn_%d' % layer), mean_log_scale, mid_channels=mid_channels) mean_log_scale = conv('glow_nn_zeros', mean_log_scale, filter_size=[3, 3], stride=[1, 1], output_channels=(2 * output_channels), apply_actnorm=False, conv_init='zeros') elif (architecture == 'glow_resnet'): h = x for layer in range(depth): h3 = conv_stack(('latent_resnet_%d' % layer), h, mid_channels=width, output_channels=x_shape[(- 1)], dropout=hparams.coupling_dropout) h += h3 mean_log_scale = conv('glow_res_final', h, conv_init='zeros', output_channels=(2 * output_channels), apply_actnorm=False) else: raise ValueError(('expected architecture to be single_conv or glow_nn got %s' % architecture)) mean = mean_log_scale[(:, :, :, 0::2)] log_scale = mean_log_scale[(:, :, :, 1::2)] return tfp.distributions.Normal(mean, tf.exp(log_scale))
Map latent to the mean and log-scale of a Gaussian. Args: name: variable scope. x: 4-D Tensor of shape (NHWC) hparams: HParams. latent_architecture - can be "single_conv", "glow_nn" or "glow_resnet", default = single_conv latent_encoder_depth - int, depth of architecture, valid if latent_architecture is "glow_nn" or "glow_resnet". latent_pre_output_channels - 512, valid only when latent_architecture is "glow_nn". latent_encoder_width - 512, maximum width of the network output_channels: int, number of output channels of the mean (and std). if not provided, set it to be the output channels of x. Returns: dist: instance of tfp.distributions.Normal Raises: ValueError: If architecture not in ["single_conv", "glow_nn"]
codesearchnet
def register_date_conversion_handler(date_specifier_patterns): def _decorator(func): global DATE_SPECIFIERS_CONVERSION_HANDLERS DATE_SPECIFIERS_CONVERSION_HANDLERS[DATE_SPECIFIERS_REGEXES[date_specifier_patterns]] = func return func return _decorator
Decorator for registering handlers that convert text dates to dates. Args: date_specifier_patterns (str): the date specifier (in regex pattern format) for which the handler is registered
juraj-google-style
def ParseFileObject(self, parser_mediator, file_object): try: file_header = self._ReadFileHeader(file_object) except (ValueError, errors.ParseError): raise errors.UnableToParseFile('Unable to parse file header.') tables = self._ReadTablesArray(file_object, file_header.tables_array_offset) table = tables.get(self._RECORD_TYPE_APPLICATION_PASSWORD, None) if table: for record in table.records: self._ParseApplicationPasswordRecord(parser_mediator, record) table = tables.get(self._RECORD_TYPE_INTERNET_PASSWORD, None) if table: for record in table.records: self._ParseInternetPasswordRecord(parser_mediator, record)
Parses a MacOS keychain file-like object. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): a file-like object. Raises: UnableToParseFile: when the file cannot be parsed.
codesearchnet
def make_acro(past, prefix, s): def _make_acro(s, t=0): v = ['a', 'e', 'i', 'o', 'u', 'y'] c = [chr(x) for x in six_xrange(ord('a'), ord('z') + 1) if chr(x) not in v] s = re.sub(r'\W+', '', s.lower()) vx = [x for x in s if x in v] cx = [x for x in s if x in c] if s.startswith('Mc'): if t < 1: return 'Mc' + v[0] if t < 2: return 'Mc' + c[0] if s[0] in v: if t < 1: return vx[0] + cx[0] + cx[1] if t < 2: return vx[0] + vx[1] + cx[0] if s[0] in c and s[1] in c: if t < 1: return cx[0] + cx[1] + vx[0] if t < 2: return cx[0] + cx[1] + cx[2] if t < 3: return cx[0] + vx[0] + cx[1] if t < 4: return cx[0] + cx[1] + cx[2] if t < 5: return cx[0] + vx[0] + vx[1] if t < 6: return cx[0] + cx[1] + cx[-1] if t < 7: return s[0:3] if t < 8: return s[1:4] if t < 9: return s[2:5] if t < 10: return s[3:6] return None for t in six_xrange(11): try: a = _make_acro(s, t) if a is not None: if prefix: aps = prefix + a else: aps = a if aps not in past: past.add(aps) return a except IndexError: pass raise Exception('Could not get acronym.')
Create a three letter acronym from the input string s. Args: past: A set object, for storing acronyms that have already been created prefix: A prefix added to the acronym before storing in the set s: The string to create the acronym from.
juraj-google-style
def _extract_id_token(id_token): if type(id_token) == bytes: segments = id_token.split(b'.') else: segments = id_token.split(u'.') if len(segments) != 3: raise VerifyJwtTokenError( 'Wrong number of segments in token: {0}'.format(id_token)) return json.loads( _helpers._from_bytes(_helpers._urlsafe_b64decode(segments[1])))
Extract the JSON payload from a JWT. Does the extraction w/o checking the signature. Args: id_token: string or bytestring, OAuth 2.0 id_token. Returns: object, The deserialized JSON payload.
juraj-google-style
def call_and_grads(fn: TransitionOperator, args: Union[(Tuple[Any], Any)]) -> Tuple[(tf.Tensor, TensorNest, TensorNest)]: with tf.GradientTape() as tape: tape.watch(args) (ret, extra) = call_fn(fn, args) grads = tape.gradient(ret, args) return (ret, extra, grads)
Calls `fn` and returns the gradients with respect to `fn`'s first output. Args: fn: A `TransitionOperator`. args: Arguments to `fn` Returns: ret: First output of `fn`. extra: Second output of `fn`. grads: Gradients of `ret` with respect to `args`.
codesearchnet
def increment_max_models(self, increment: int): if self._max_models is None: self._max_models = 0 self._max_models += increment
Increments the number of models that this instance of a _ModelManager is able to hold. If it is never called, no limit is imposed. Args: increment: the amount by which we are incrementing the number of models.
github-repos
def GetTaskPendingMerge(self, current_task): next_task = self._tasks_pending_merge.PeekTask() if (not next_task): return None if (current_task and (next_task.merge_priority > current_task.merge_priority)): return None with self._lock: next_task = self._tasks_pending_merge.PopTask() self._tasks_merging[next_task.identifier] = next_task return next_task
Retrieves the first task that is pending merge or has a higher priority. This function will check if there is a task with a higher merge priority than the current_task being merged. If so, that task with the higher priority is returned. Args: current_task (Task): current task being merged or None if no such task. Returns: Task: the next task to merge or None if there is no task pending merge or with a higher priority.
codesearchnet
def whole_subnet_maker(ip_addr, cidr): if ucast_ip(ip_addr, False) == False and mcast_ip(ip_addr, False) == False: LOGGER.critical('Function whole_subnet_maker ip_addr {item}'.format(item=ip_addr)) raise ValueError("Not a good ipv4 address") if not cidr_check(cidr, False): LOGGER.critical('Function whole_subnet_maker cidr {item}'.format(item=cidr)) raise ValueError("Not a good CIDR value should be 0 to 32") def subnet_corrector(octet, cidr): cidr_int = int(cidr) octet_int = int(octet) if cidr_int >= 24: cidr_int = __mask_conversion[cidr_int]["OCT4"] elif cidr_int >= 16: cidr_int = __mask_conversion[cidr_int]["OCT3"] elif cidr_int >= 8: cidr_int = __mask_conversion[cidr_int]["OCT2"] elif cidr_int >= 1: cidr_int = __mask_conversion[cidr_int]["OCT1"] cidr_count = 0 cidr_v = 256 - cidr_int cidr_2 = 256 - cidr_int while cidr_count < 300: if octet_int >= cidr_count and octet_int <= cidr_2: cidr_int = cidr_count cidr_count = cidr_2 cidr_2 = cidr_2 + cidr_v return str(cidr_int) ip_addr_split = ip_addr.split(".") if int(cidr) >= 24: octet = subnet_corrector(ip_addr_split[3], cidr) completed = ip_addr_split[0] + "." + ip_addr_split[1] + "." + ip_addr_split[2] + "." + octet return completed elif int(cidr) >= 16: octet = subnet_corrector(ip_addr_split[2], cidr) completed = ip_addr_split[0] + "." + ip_addr_split[1] + "." + octet + ".0" return completed elif int(cidr) >= 8: octet = subnet_corrector(ip_addr_split[1], cidr) completed = ip_addr_split[0] + "." + octet + ".0.0" return completed elif int(cidr) >= 1: octet = subnet_corrector(ip_addr_split[0], cidr) completed = octet + ".0.0.0" return completed else: return "0.0.0.0"
Function to return a whole subnet value from a IP address and CIDR pair Args: ip_addr: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1 cidr: CIDR value of 0 to 32 Returns: returns the corrected whole subnet
juraj-google-style
def __init__(self, control_handler, data_plane_handler, state, provision_info): self.control_handler = control_handler self.data_plane_handler = data_plane_handler self.state = state self.provision_info = provision_info with WorkerHandler._lock: WorkerHandler._worker_id_counter += 1 self.worker_id = 'worker_%s' % WorkerHandler._worker_id_counter
Initialize a WorkerHandler. Args: control_handler: data_plane_handler (data_plane.DataChannel): state: provision_info:
github-repos
def dumps(collection: BioCCollection, pretty_print: bool = True) -> str: doc = etree.ElementTree(BioCXMLEncoder().encode(collection)) s = etree.tostring(doc, pretty_print=pretty_print, encoding=collection.encoding, standalone=collection.standalone) return s.decode(collection.encoding)
Serialize ``collection`` to a BioC formatted ``str``. Args: collection: the BioC collection pretty_print: enables formatted XML Returns: a BioC formatted ``str``
juraj-google-style
def parse(self): (options, args) = self.parser.parse_args() self._set_attributes(args, options) return self._create_dictionary()
Parse command line arguments and options. Returns: Dictionary containing all given command line arguments and options.
codesearchnet
def GetStorageMediaImageTypeIndicators(cls, path_spec, resolver_context=None): if (cls._storage_media_image_remainder_list is None or cls._storage_media_image_store is None): specification_store, remainder_list = cls._GetSpecificationStore( definitions.FORMAT_CATEGORY_STORAGE_MEDIA_IMAGE) cls._storage_media_image_remainder_list = remainder_list cls._storage_media_image_store = specification_store if cls._storage_media_image_scanner is None: cls._storage_media_image_scanner = cls._GetSignatureScanner( cls._storage_media_image_store) return cls._GetTypeIndicators( cls._storage_media_image_scanner, cls._storage_media_image_store, cls._storage_media_image_remainder_list, path_spec, resolver_context=resolver_context)
Determines if a file contains a supported storage media image types. Args: path_spec (PathSpec): path specification. resolver_context (Optional[Context]): resolver context, where None represents the built-in context which is not multi process safe. Returns: list[str]: supported format type indicators.
juraj-google-style
def expect_true(condition, msg, extras=None): try: asserts.assert_true(condition, msg, extras) except signals.TestSignal as e: logging.exception('Expected a `True` value, got `False`.') recorder.add_error(e)
Expects an expression evaluates to True. If the expectation is not met, the test is marked as fail after its execution finishes. Args: expr: The expression that is evaluated. msg: A string explaining the details in case of failure. extras: An optional field for extra information to be included in test result.
juraj-google-style
def value_to_message(self, value): if not isinstance(value, self.type): raise EncodeError('Expected type %s, got %s: %r' % (self.type.__name__, type(value).__name__, value)) return value
Convert a value instance to a message. Used by serializers to convert Python user types to underlying messages for transmission. Args: value: A value of type self.type. Returns: An instance of type self.message_type.
juraj-google-style
def class_label_top(body_output, targets, model_hparams, vocab_size): del targets with tf.variable_scope(('class_label_modality_%d_%d' % (vocab_size, model_hparams.hidden_size))): x = body_output x = tf.reduce_mean(x, axis=[1, 2], keepdims=True) res = tf.layers.dense(x, vocab_size) return tf.expand_dims(res, 3)
Transform inputs from model space to target space. Average over inner dims and a linear layer to logits. Args: body_output: A Tensor with shape [batch, ?, ?, body_output_size]. targets: model_hparams: HParams, model hyperparmeters. vocab_size: int, vocabulary size. Returns: a Tensors, each with shape [batch_size, 1, 1, 1, vocab_size]
codesearchnet
def _prepare(f, xs_dtypes, xs_shapes): if context.executing_eagerly(): def decorated_eager(*xs_data): return f(*map(ops.convert_to_tensor, xs_data)) return decorated_eager xs = [array_ops.placeholder(x_dtype, shape=x_shape) for x_dtype, x_shape in zip(xs_dtypes, xs_shapes)] y = f(*xs) sess = ops.get_default_session() def decorated_graph(*xs_data): xs_data = [_to_numpy(a) for a in xs_data] return sess.run(y, feed_dict=dict(zip(xs, xs_data))) return decorated_graph
Return a function that executes 'f'. In TF 2.x, this is the same as `f`. In TF 1.x, returns a Python function that executes the graph defined by `f` in a Session. Args: f: the function. xs_dtypes: dtypes of f's arguments. xs_shapes: shapes of f's arguments. Returns:
github-repos
def _AddVolume(self, volume): if volume.identifier in self._volumes: raise KeyError( 'Volume object already set for volume identifier: {0:s}'.format( volume.identifier)) self._volumes[volume.identifier] = volume self._volume_identifiers.append(volume.identifier)
Adds a volume. Args: volume (Volume): a volume. Raises: KeyError: if volume is already set for the corresponding volume identifier.
juraj-google-style
def get_wigner_seitz_cell(self) -> List[List[np.ndarray]]: vec1 = self._matrix[0] vec2 = self._matrix[1] vec3 = self._matrix[2] list_k_points = [] for (i, j, k) in itertools.product([(- 1), 0, 1], [(- 1), 0, 1], [(- 1), 0, 1]): list_k_points.append((((i * vec1) + (j * vec2)) + (k * vec3))) from scipy.spatial import Voronoi tess = Voronoi(list_k_points) to_return = [] for r in tess.ridge_dict: if ((r[0] == 13) or (r[1] == 13)): to_return.append([tess.vertices[i] for i in tess.ridge_dict[r]]) return to_return
Returns the Wigner-Seitz cell for the given lattice. Returns: A list of list of coordinates. Each element in the list is a "facet" of the boundary of the Wigner Seitz cell. For instance, a list of four coordinates will represent a square facet.
codesearchnet
def profile_args(_args): if ((_args.get('app', {}).get('optional') is not None) or (_args.get('app', {}).get('required') is not None)): app_args_optional = _args.get('app', {}).get('optional', {}) app_args_required = _args.get('app', {}).get('required', {}) default_args = _args.get('default', {}) _args = {} _args.update(app_args_optional) _args.update(app_args_required) _args.update(default_args) elif ((_args.get('app') is not None) and (_args.get('default') is not None)): app_args = _args.get('app', {}) default_args = _args.get('default', {}) _args = {} _args.update(app_args) _args.update(default_args) return _args
Return args for v1, v2, or v3 structure. Args: _args (dict): The args section from the profile. Returns: dict: A collapsed version of the args dict.
codesearchnet
def get_template(template_file='', **kwargs): template = get_template_object(template_file) LOG.info('Rendering template %s', template.filename) for key, value in kwargs.items(): LOG.debug('%s => %s', key, value) rendered_json = template.render(**kwargs) LOG.debug('Rendered JSON:\n%s', rendered_json) return rendered_json
Get the Jinja2 template and renders with dict _kwargs_. Args: template_file (str): name of the template file kwargs: Keywords to use for rendering the Jinja2 template. Returns: String of rendered JSON template.
juraj-google-style
def determine_framework(model: str, framework: Optional[str]=None) -> str: if framework is not None: return framework framework_map = {'pt': 'PyTorch', 'tf': 'TensorFlow'} exporter_map = {'pt': 'torch', 'tf': 'tf2onnx'} if os.path.isdir(model): if os.path.isfile(os.path.join(model, WEIGHTS_NAME)): framework = 'pt' elif os.path.isfile(os.path.join(model, TF2_WEIGHTS_NAME)): framework = 'tf' else: raise FileNotFoundError(f'Cannot determine framework from given checkpoint location. There should be a {WEIGHTS_NAME} for PyTorch or {TF2_WEIGHTS_NAME} for TensorFlow.') logger.info(f'Local {framework_map[framework]} model found.') elif is_torch_available(): framework = 'pt' elif is_tf_available(): framework = 'tf' else: raise OSError('Neither PyTorch nor TensorFlow found in environment. Cannot export to ONNX.') logger.info(f'Framework not requested. Using {exporter_map[framework]} to export to ONNX.') return framework
Determines the framework to use for the export. The priority is in the following order: 1. User input via `framework`. 2. If local checkpoint is provided, use the same framework as the checkpoint. 3. Available framework in environment, with priority given to PyTorch Args: model (`str`): The name of the model to export. framework (`str`, *optional*, defaults to `None`): The framework to use for the export. See above for priority if none provided. Returns: The framework to use for the export.
github-repos
def _get_single_set(self, num_objects, num_features): data = np.random.uniform((- 1), 1, size=(num_objects, num_features)) distances = spdistance.squareform(spdistance.pdist(data)) distance_idx = np.argsort(distances) nth = np.random.randint(0, num_objects) nth_furthest = distance_idx[(:, nth)] reference = np.random.randint(0, num_objects) labels = nth_furthest[reference] object_ids = np.identity(num_objects) nth_matrix = np.zeros((num_objects, num_objects)) nth_matrix[(:, nth)] = 1 reference_object = np.zeros((num_objects, num_objects)) reference_object[(:, reference)] = 1 inputs = np.concatenate([data, object_ids, reference_object, nth_matrix], axis=(- 1)) inputs = np.random.permutation(inputs) labels = np.expand_dims(labels, axis=0) return (inputs.astype(np.float32), labels.astype(np.float32))
Generate one input sequence and output label. Each sequences of objects has a feature that consists of the feature vector for that object plus the encoding for its ID, the reference vector ID and the n-th value relative ID for a total feature size of: `num_objects` * 3 + `num_features` Args: num_objects: int. number of objects in the sequence. num_features: int. feature size of each object. Returns: 1. np.ndarray (`num_objects`, (`num_features` + 3 * `num_objects`)). 2. np.ndarray (1,). Output object reference label.
codesearchnet
def _get_table_names(statement): parts = statement.to_unicode().split() tables = set() for i, token in enumerate(parts): if token.lower() == 'from' or token.lower().endswith('join'): tables.add(parts[i + 1].rstrip(';')) return list(tables)
Returns table names found in the query. NOTE. This routine would use the sqlparse parse tree, but vnames don't parse very well. Args: statement (sqlparse.sql.Statement): parsed by sqlparse sql statement. Returns: list of str
juraj-google-style
def passthrough_context_definition(context_params): check.inst_param(context_params, 'context', ExecutionContext) context_definition = PipelineContextDefinition(context_fn=lambda *_args: context_params) return {DEFAULT_CONTEXT_NAME: context_definition}
Create a context definition from a pre-existing context. This can be useful in testing contexts where you may want to create a context manually and then pass it into a one-off PipelineDefinition Args: context (ExecutionContext): The context that will provided to the pipeline. Returns: PipelineContextDefinition: The passthrough context definition.
juraj-google-style
def unapprove(self, **kwargs): path = ('%s/%s/unapprove' % (self.manager.path, self.get_id())) data = {} server_data = self.manager.gitlab.http_post(path, post_data=data, **kwargs) self._update_attrs(server_data)
Unapprove the merge request. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabMRApprovalError: If the unapproval failed
codesearchnet
def serialCmdPwdAuth(self, password_str): result = False try: req_start = (('0150310228' + binascii.hexlify(password_str)) + '2903') req_crc = self.calc_crc16(req_start[2:].decode('hex')) req_str = (req_start + req_crc) self.m_serial_port.write(req_str.decode('hex')) if (self.m_serial_port.getResponse(self.getContext()).encode('hex') == '06'): ekm_log((('Password accepted (' + self.getContext()) + ')')) result = True else: ekm_log((('Password call failure no 06(' + self.getContext()) + ')')) except: ekm_log((('Password call failure by exception(' + self.getContext()) + ')')) ekm_log(traceback.format_exc(sys.exc_info())) return result
Password step of set commands This method is normally called within another serial command, so it does not issue a termination string. Any default password is set in the caller parameter list, never here. Args: password_str (str): Required password. Returns: bool: True on completion and ACK.
codesearchnet
def log(msg, level=0): red = '\x1b[91m' endc = '\x1b[0m' cfg = {'version': 1, 'disable_existing_loggers': False, 'formatters': {'stdout': {'format': '[%(levelname)s]: %(asctime)s - %(message)s', 'datefmt': '%x %X'}, 'stderr': {'format': ((red + '[%(levelname)s]: %(asctime)s - %(message)s') + endc), 'datefmt': '%x %X'}}, 'handlers': {'stdout': {'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'stdout'}, 'stderr': {'class': 'logging.StreamHandler', 'level': 'ERROR', 'formatter': 'stderr'}}, 'loggers': {'info': {'handlers': ['stdout'], 'level': 'INFO', 'propagate': True}, 'error': {'handlers': ['stderr'], 'level': 'ERROR', 'propagate': False}}} dictConfig(cfg) lg = ('info' if (level == 0) else 'error') lvl = (20 if (level == 0) else 40) logger = logging.getLogger(lg) logger.log(lvl, msg)
Logs a message to the console, with optional level paramater Args: - msg (str): message to send to console - level (int): log level; 0 for info, 1 for error (default = 0)
codesearchnet
def day(self, value=None): if (value is not None): try: value = int(value) except ValueError: raise ValueError('value {} need to be of type int for field `day`'.format(value)) if (value < 1): raise ValueError('value need to be greater or equal 1 for field `day`') if (value > 31): raise ValueError('value need to be smaller 31 for field `day`') self._day = value
Corresponds to IDD Field `day` Args: value (int): value for IDD Field `day` value >= 1 value <= 31 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
codesearchnet
def update_profiles(adapter): for case in adapter.cases(): if case.get('profile_path'): profiles = get_profiles(adapter, case['profile_path']) profiled_individuals = deepcopy(case['individuals']) for individual in profiled_individuals: ind_id = individual['ind_id'] try: profile = profiles[ind_id] individual['profile'] = profile except KeyError: LOG.warning(f"sample IDs in vcf does not match for case {case['case_id']}") updated_case = deepcopy(case) updated_case['individuals'] = profiled_individuals adapter.add_case(updated_case, update=True)
For all cases having vcf_path, update the profile string for the samples Args: adapter (MongoAdapter): Adapter to mongodb
codesearchnet
def foreach_model(self, fn): results = ray.get([w.foreach_model.remote(fn) for w in self.workers]) out = [] for r in results: out.extend(r) return out
Apply the given function to each model replica in each worker. Returns: List of results from applying the function.
codesearchnet
def find_bucket(self, bucketing_id, parent_id, traffic_allocations): bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id) bucketing_number = self._generate_bucket_value(bucketing_key) self.config.logger.debug(('Assigned bucket %s to user with bucketing ID "%s".' % (bucketing_number, bucketing_id))) for traffic_allocation in traffic_allocations: current_end_of_range = traffic_allocation.get('endOfRange') if (bucketing_number < current_end_of_range): return traffic_allocation.get('entityId') return None
Determine entity based on bucket value and traffic allocations. Args: bucketing_id: ID to be used for bucketing the user. parent_id: ID representing group or experiment. traffic_allocations: Traffic allocations representing traffic allotted to experiments or variations. Returns: Entity ID which may represent experiment or variation.
codesearchnet
def _make_ctx_options(ctx_options, config_cls=ContextOptions): if not ctx_options: return None for key in list(ctx_options): translation = _OPTION_TRANSLATIONS.get(key) if translation: if translation in ctx_options: raise ValueError('Cannot specify %s and %s at the same time' % (key, translation)) ctx_options[translation] = ctx_options.pop(key) return config_cls(**ctx_options)
Helper to construct a ContextOptions object from keyword arguments. Args: ctx_options: A dict of keyword arguments. config_cls: Optional Configuration class to use, default ContextOptions. Note that either 'options' or 'config' can be used to pass another Configuration object, but not both. If another Configuration object is given it provides default values. Returns: A Configuration object, or None if ctx_options is empty.
juraj-google-style
def random_tril_matrix(shape, dtype, force_well_conditioned=False, remove_upper=True): with ops.name_scope('random_tril_matrix'): tril = random_normal(shape, dtype=dtype) if remove_upper: tril = array_ops.matrix_band_part(tril, -1, 0) if force_well_conditioned: maxval = ops.convert_to_tensor(np.sqrt(2.0), dtype=dtype.real_dtype) diag = random_sign_uniform(shape[:-1], dtype=dtype, minval=1.0, maxval=maxval) tril = array_ops.matrix_set_diag(tril, diag) return tril
[batch] lower triangular matrix. Args: shape: `TensorShape` or Python `list`. Shape of the returned matrix. dtype: `TensorFlow` `dtype` or Python dtype force_well_conditioned: Python `bool`. If `True`, returned matrix will have eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit normal random variables. remove_upper: Python `bool`. If `True`, zero out the strictly upper triangle. If `False`, the lower triangle of returned matrix will have desired properties, but will not have the strictly upper triangle zero'd out. Returns: `Tensor` with desired shape and dtype.
github-repos
def disconnect_container_from_network(self, container, net_id, force=False): data = {'Container': container} if force: if version_lt(self._version, '1.22'): raise InvalidVersion('Forced disconnect was introduced in API 1.22') data['Force'] = force url = self._url('/networks/{0}/disconnect', net_id) res = self._post_json(url, data=data) self._raise_for_status(res)
Disconnect a container from a network. Args: container (str): container ID or name to be disconnected from the network net_id (str): network ID force (bool): Force the container to disconnect from a network. Default: ``False``
codesearchnet
def list_vms(access_token, subscription_id, resource_group): endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourceGroups/', resource_group, '/providers/Microsoft.Compute/virtualMachines', '?api-version=', COMP_API]) return do_get(endpoint, access_token)
List VMs in a resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. resource_group (str): Azure resource group name. Returns: HTTP response. JSON body of a list of VM model views.
codesearchnet
def create_jlink(self, args): jlink = pylink.JLink() jlink.open(args.serial_no, args.ip_addr) if (hasattr(args, 'tif') and (args.tif is not None)): if (args.tif.lower() == 'swd'): jlink.set_tif(pylink.JLinkInterfaces.SWD) else: jlink.set_tif(pylink.JLinkInterfaces.JTAG) if (hasattr(args, 'device') and (args.device is not None)): jlink.connect(args.device) return jlink
Creates an instance of a J-Link from the given arguments. Args: self (Command): the ``Command`` instance args (Namespace): arguments to construct the ``JLink`` instance from Returns: An instance of a ``JLink``.
codesearchnet
def is_duplicated(self, item): if isinstance(item, dict): hashable_item = json.dumps(item, sort_keys=True) elif isinstance(item, list): hashable_item = frozenset(item) else: hashable_item = item if (hashable_item in self._cache): return True else: if ((self.cache_capacity > 0) and (len(self._cache) >= self.cache_capacity)): self._cache.popitem(False) self._cache[hashable_item] = 1 return False
Check whether the item has been in the cache If the item has not been seen before, then hash it and put it into the cache, otherwise indicates the item is duplicated. When the cache size exceeds capacity, discard the earliest items in the cache. Args: item (object): The item to be checked and stored in cache. It must be immutable or a list/dict. Returns: bool: Whether the item has been in cache.
codesearchnet
def avg(vals, count=None): sum = 0 for v in vals: sum += v if count is None: count = len(vals) return float(sum) / count
Returns the average value Args: vals: List of numbers to calculate average from. count: Int of total count that vals was part of. Returns: Float average value throughout a count.
juraj-google-style
def list_to_file(orig_list, file_name, file_location): file = __os.path.join(file_location, file_name) def add_line_break(list_line): list_line = ('%s\n' % (list_line,)) return list_line write_file = open(file, "a") for orig_list_line in orig_list: write_file.write(add_line_break(str(orig_list_line))) write_file.close() return file_name
Function to export a list to a text file Args: orig_list: The list you want exported file_name: The name of the exported file file_location: The location of the file, derive from the os module Returns: returns the filename info
juraj-google-style
def ae_latent_softmax(latents_pred, latents_discrete_hot, vocab_size, hparams): with tf.variable_scope("latent_logits"): latents_logits = tf.layers.dense(latents_pred, vocab_size, name="logits_dense") if hparams.logit_normalization: latents_logits *= tf.rsqrt(1e-8 + tf.reduce_mean(tf.square(latents_logits))) loss = tf.nn.softmax_cross_entropy_with_logits_v2( labels=latents_discrete_hot, logits=latents_logits) sample = multinomial_sample(latents_logits, vocab_size, hparams.sampling_method, hparams.sampling_temp) return sample, loss
Latent prediction and loss. Args: latents_pred: Tensor of shape [..., depth]. latents_discrete_hot: Tensor of shape [..., vocab_size]. vocab_size: an int representing the vocab size. hparams: HParams. Returns: sample: Tensor of shape [...], a sample from a multinomial distribution. loss: Tensor of shape [...], the softmax cross-entropy.
juraj-google-style
def push(self, x): if not math.isnan(x): self._sorted_items.add(x) if self._window_mode == WindowMode.SLIDING: if len(self._queue) >= self._window_size and (not math.isnan((old_x := self.pop()))): self._sorted_items.discard(old_x) super().push(x)
Pushes a new value, maintains the sorted list, and manages the window. Args: x: The new value to be pushed.
github-repos
def safe_url(self, url, errors='strict'): if (url is not None): url = quote(self.s(url, errors=errors), safe='~') return url
URL encode value for safe HTTP request. Args: url (string): The string to URL Encode. Returns: (string): The urlencoded string.
codesearchnet
def _submit_request(self, url, params=None, data=None, headers=None, method='GET'): if (headers is None): headers = {} if (self._auth_header is not None): headers['Authorization'] = self._auth_header try: if (method == 'POST'): result = requests.post(url, params=params, data=data, headers=headers) elif (method == 'GET'): result = requests.get(url, params=params, data=data, headers=headers) result.raise_for_status() return (result.status_code, result.json()) except requests.exceptions.HTTPError as e: return (e.response.status_code, e.response.reason) except RemoteDisconnected as e: raise CliException(e) except (requests.exceptions.MissingSchema, requests.exceptions.InvalidURL) as e: raise CliException(e) except requests.exceptions.ConnectionError as e: raise CliException('Unable to connect to "{}": make sure URL is correct'.format(self._base_url))
Submits the given request, and handles the errors appropriately. Args: url (str): the request to send. params (dict): params to be passed along to get/post data (bytes): the data to include in the request. headers (dict): the headers to include in the request. method (str): the method to use for the request, "POST" or "GET". Returns: tuple of (int, str): The response status code and the json parsed body, or the error message. Raises: `CliException`: If any issues occur with the URL.
codesearchnet
def getprops(self, prop_names): attempts = DEFAULT_GETPROPS_ATTEMPTS results = {} for attempt in range(attempts): raw_output = self.shell(['getprop'], timeout=DEFAULT_GETPROP_TIMEOUT_SEC) properties = self._parse_getprop_output(raw_output) if properties: for name in prop_names: if name in properties: results[name] = properties[name] break if attempt < attempts - 1: time.sleep(DEFAULT_GETPROPS_RETRY_SLEEP_SEC) return results
Get multiple properties of the device. This is a convenience wrapper for `adb shell getprop`. Use this to reduce the number of adb calls when getting multiple properties. Args: prop_names: list of strings, the names of the properties to get. Returns: A dict containing name-value pairs of the properties requested, if they exist.
github-repos
def set_child_node(self, name, node): assert isinstance(node, TreeMapNode) self._nodes[name] = node node.set_parent(self)
Add one child node to this node. Args: name (str): Name of the child. node (TreeMapNode): Node to add. Warning: No test is done to see whether or not a node was already attached with that name. If this is the case, the new node takes the place of the old one that is now unreachable. See :meth:`set_unique_child_node`.
juraj-google-style
def _PrintSessionsOverview(self, storage_reader): table_view = views.ViewsFactory.GetTableView( self._views_format_type, title='Sessions') for session in storage_reader.GetSessions(): start_time = timelib.Timestamp.CopyToIsoFormat( session.start_time) session_identifier = uuid.UUID(hex=session.identifier) session_identifier = '{0!s}'.format(session_identifier) table_view.AddRow([session_identifier, start_time]) table_view.Write(self._output_writer)
Prints a sessions overview. Args: storage_reader (StorageReader): storage reader.
juraj-google-style
def getJsonFromApi(view, request): jsonText = view(request) jsonText = json.loads(jsonText.content.decode('utf-8')) return jsonText
Return json from querying Web Api Args: view: django view function. request: http request object got from django. Returns: json format dictionary
juraj-google-style
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size): with tf.name_scope("loss", [logits, labels]): logits, labels = _pad_tensors_to_same_length(logits, labels) with tf.name_scope("smoothing_cross_entropy", [logits, labels]): confidence = 1.0 - smoothing low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1) soft_targets = tf.one_hot( tf.cast(labels, tf.int32), depth=vocab_size, on_value=confidence, off_value=low_confidence) xentropy = tf.nn.softmax_cross_entropy_with_logits_v2( logits=logits, labels=soft_targets) normalizing_constant = -( confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) * low_confidence * tf.log(low_confidence + 1e-20)) xentropy -= normalizing_constant weights = tf.to_float(tf.not_equal(labels, 0)) return xentropy * weights, weights
Calculate cross entropy loss while ignoring padding. Args: logits: Tensor of size [batch_size, length_logits, vocab_size] labels: Tensor of size [batch_size, length_labels] smoothing: Label smoothing constant, used to determine the on and off values vocab_size: int size of the vocabulary Returns: Returns a float32 tensor with shape [batch_size, max(length_logits, length_labels)]
juraj-google-style
def _order_pases(self, passes): passes = set(passes) pass_deps = {} for opt in passes: _, before, after = self._known_passes[opt] if opt not in pass_deps: pass_deps[opt] = set() for after_pass in after: pass_deps[opt].add(after_pass) for other in before: if other not in passes: continue if other not in pass_deps: pass_deps[other] = set() pass_deps[other].add(opt) return toposort_flatten(pass_deps)
Topologically sort optimization passes. This ensures that the resulting passes are run in order respecting before/after constraints. Args: passes (iterable): An iterable of pass names that should be included in the optimization passes run.
juraj-google-style
def __security_definitions_descriptor(self, issuers): if (not issuers): result = {_DEFAULT_SECURITY_DEFINITION: {'authorizationUrl': '', 'flow': 'implicit', 'type': 'oauth2', 'x-google-issuer': 'https: return result result = {} for (issuer_key, issuer_value) in issuers.items(): result[issuer_key] = {'authorizationUrl': '', 'flow': 'implicit', 'type': 'oauth2', 'x-google-issuer': issuer_value.issuer} if issuer_value.jwks_uri: result[issuer_key]['x-google-jwks_uri'] = issuer_value.jwks_uri return result
Create a descriptor for the security definitions. Args: issuers: dict, mapping issuer names to Issuer tuples Returns: The dict representing the security definitions descriptor.
codesearchnet
def tool_cancellation(self) -> str | None: if not self.part.function_response: return None if self.part.function_response.name != 'tool_cancellation': return None if not self.part.function_response.response: return None return self.part.function_response.response.get('function_call_id', None)
Returns an id of a function call to be cancelled. If the part is not a tool cancellation request, returns None. Returns: The id of the function call to be cancelled or None if this part is not a tool cancellation from the model.
github-repos
def decode_field(self, field, value): if isinstance(field, messages.BytesField): try: padded_value = self.__pad_value(str(value), 4, '=') return base64.urlsafe_b64decode(padded_value) except (TypeError, UnicodeEncodeError), err: raise messages.DecodeError('Base64 decoding error: %s' % err) return super(EndpointsProtoJson, self).decode_field(field, value)
Decode a JSON value to a python value. Args: field: A ProtoRPC field instance. value: A serialized JSON value. Returns: A Python value compatible with field.
juraj-google-style
def _CheckLogicalLines(self, llines, list_of_expected): actual = [] for lline in llines: filtered_values = [ft.value for ft in lline.tokens if ft.name not in pytree_utils.NONSEMANTIC_TOKENS] actual.append((lline.depth, filtered_values)) self.assertEqual(list_of_expected, actual)
Check that the given LogicalLines match expectations. Args: llines: list of LogicalLine list_of_expected: list of (depth, values) pairs. Non-semantic tokens are filtered out from the expected values.
github-repos
def as_text_with_reasoning(content: ProcessorContentTypes, *, strict: bool=False) -> tuple[str, str]: text_parts = [] thought_parts = [] for mime, p in ProcessorContent(content).items(): if is_text(mime): if p.part.thought: thought_parts.append(p.text) else: text_parts.append(p.text) elif strict: raise ValueError(f'Unsupported content type {mime}.') return (''.join(text_parts), ''.join(thought_parts))
Returns a tuple of the final and reasoning text representing content. The returned tuple contains two elements: - The first element (index 0) is a string representing the main text extracted from the input `content`. - The second element (index 1) is a string representing the reasoning or thoughts associated with the input `content`. Args: content: The content to process. This can be of various types as defined by `ProcessorContentTypes`. strict: If True, unsupported content types will raise a ValueError. Otherwise, they will be ignored. Returns: A tuple containing two strings: (text, reasoning).
github-repos
def _operation_status_message(self): metadata = self._op['metadata'] if (not self._op['done']): if (('events' in metadata) and metadata['events']): last_event = metadata['events'][(- 1)] msg = last_event['description'] ds = last_event['startTime'] else: msg = 'Pending' ds = metadata['createTime'] else: ds = metadata['endTime'] if ('error' in self._op): msg = self._op['error']['message'] else: msg = 'Success' return (msg, google_base.parse_rfc3339_utc_string(ds))
Returns the most relevant status string and last updated date string. This string is meant for display only. Returns: A printable status string and date string.
codesearchnet
def Main(url): web_scrape = WebScraping() web_scrape.readable_web_pdf = WebPDFReading() document = web_scrape.scrape(url) auto_abstractor = AutoAbstractor() auto_abstractor.tokenizable_doc = MeCabTokenizer() abstractable_doc = TopNRankAbstractor() result_dict = auto_abstractor.summarize(document, abstractable_doc) [print(sentence) for sentence in result_dict["summarize_result"]]
Entry Point. Args: url: PDF url.
juraj-google-style
def get_all_plugin_assets(graph=None): if graph is None: graph = ops.get_default_graph() out = [] for name in graph.get_collection(_PLUGIN_ASSET_PREFIX): collection = graph.get_collection(_PLUGIN_ASSET_PREFIX + name) if len(collection) != 1: raise ValueError('Collection for %s had %d items, expected 1' % (name, len(collection))) out.append(collection[0]) return out
Retrieve all PluginAssets stored in the graph collection. Args: graph: Optionally, the graph to get assets from. If unspecified, the default graph is used. Returns: A list with all PluginAsset instances in the graph. Raises: ValueError: if we unexpectedly find a collection with the wrong number of PluginAssets.
github-repos
def match_validator(expression): if isinstance(expression, str): compiled = re.compile(expression) elif hasattr(expression, 'match'): compiled = expression else: raise TypeError('Provided match is nor a string nor has a match method (like re expressions)') def validator(value): if (not compiled.match(value)): raise ValidationError('{} does not match pattern: {}'.format(value, (compiled.pattern if hasattr(compiled, 'pattern') else compiled))) return validator
Return validator function that will check if matches given expression. Args: match: if string then this will be converted to regular expression using ``re.compile``. Can be also any object that has ``match()`` method like already compiled regular regular expression or custom matching object/class.
codesearchnet
def _GetSpecificationStore(cls, format_category): specification_store = specification.FormatSpecificationStore() remainder_list = [] for analyzer_helper in iter(cls._analyzer_helpers.values()): if not analyzer_helper.IsEnabled(): continue if format_category in analyzer_helper.format_categories: format_specification = analyzer_helper.GetFormatSpecification() if format_specification is not None: specification_store.AddSpecification(format_specification) else: remainder_list.append(analyzer_helper) return specification_store, remainder_list
Retrieves the specification store for specified format category. Args: format_category (str): format category. Returns: tuple[FormatSpecificationStore, list[AnalyzerHelper]]: a format specification store and remaining analyzer helpers that do not have a format specification.
juraj-google-style
def Relay(self, inventory): inventory = InvPayload(type=inventory.InventoryType, hashes=[inventory.Hash.ToBytes()]) m = Message('inv', inventory) self.SendSerializedMessage(m) return True
Wrap the inventory in a InvPayload object and send it over the write to the remote node. Args: inventory: Returns: bool: True (fixed)
codesearchnet
def users_setPresence(self, *, presence: str, **kwargs) -> SlackResponse: kwargs.update({'presence': presence}) return self.api_call('users.setPresence', json=kwargs)
Manually sets user presence. Args: presence (str): Either 'auto' or 'away'.
codesearchnet
def _sample(self, nmr_samples, thinning=1, return_output=True): kernel_data = self._get_kernel_data(nmr_samples, thinning, return_output) sample_func = self._get_compute_func(nmr_samples, thinning, return_output) sample_func.evaluate(kernel_data, self._nmr_problems, use_local_reduction=all((env.is_gpu for env in self._cl_runtime_info.cl_environments)), cl_runtime_info=self._cl_runtime_info) self._sampling_index += (nmr_samples * thinning) if return_output: return (kernel_data['samples'].get_data(), kernel_data['log_likelihoods'].get_data(), kernel_data['log_priors'].get_data())
Sample the given number of samples with the given thinning. If ``return_output`` we will return the samples, log likelihoods and log priors. If not, we will advance the state of the sampler without returning storing the samples. Args: nmr_samples (int): the number of iterations to advance the sampler thinning (int): the thinning to apply return_output (boolean): if we should return the output Returns: None or tuple: if ``return_output`` is True three ndarrays as (samples, log_likelihoods, log_priors)
codesearchnet
def fit1d(samples, e, remove_zeros=False, **kw): samples = samples[(~ np.isnan(samples))] length = (len(e) - 1) (hist, _) = np.histogramdd(samples, (e,)) hist = (hist / sum(hist)) (basis, knots) = spline_base1d(length, marginal=hist, **kw) non_zero = (hist > 0) model = linear_model.BayesianRidge() if remove_zeros: model.fit(basis[(non_zero, :)], hist[(:, np.newaxis)][(non_zero, :)]) else: hist[(~ non_zero)] = np.finfo(float).eps model.fit(basis, hist[(:, np.newaxis)]) return (model.predict(basis), hist, knots)
Fits a 1D distribution with splines. Input: samples: Array Array of samples from a probability distribution e: Array Edges that define the events in the probability distribution. For example, e[0] < x <= e[1] is the range of values that are associated with the first event. **kw: Arguments that are passed on to spline_bse1d. Returns: distribution: Array An array that gives an estimate of probability for events defined by e. knots: Array Sequence of knots that were used for the spline basis
codesearchnet
def _ConvertMessageDescriptor(self, desc_proto, package=None, file_desc=None, scope=None, syntax=None): if package: desc_name = '.'.join((package, desc_proto.name)) else: desc_name = desc_proto.name if (file_desc is None): file_name = None else: file_name = file_desc.name if (scope is None): scope = {} nested = [self._ConvertMessageDescriptor(nested, desc_name, file_desc, scope, syntax) for nested in desc_proto.nested_type] enums = [self._ConvertEnumDescriptor(enum, desc_name, file_desc, None, scope) for enum in desc_proto.enum_type] fields = [self._MakeFieldDescriptor(field, desc_name, index) for (index, field) in enumerate(desc_proto.field)] extensions = [self._MakeFieldDescriptor(extension, desc_name, index, is_extension=True) for (index, extension) in enumerate(desc_proto.extension)] oneofs = [descriptor.OneofDescriptor(desc.name, '.'.join((desc_name, desc.name)), index, None, [], desc.options) for (index, desc) in enumerate(desc_proto.oneof_decl)] extension_ranges = [(r.start, r.end) for r in desc_proto.extension_range] if extension_ranges: is_extendable = True else: is_extendable = False desc = descriptor.Descriptor(name=desc_proto.name, full_name=desc_name, filename=file_name, containing_type=None, fields=fields, oneofs=oneofs, nested_types=nested, enum_types=enums, extensions=extensions, options=_OptionsOrNone(desc_proto), is_extendable=is_extendable, extension_ranges=extension_ranges, file=file_desc, serialized_start=None, serialized_end=None, syntax=syntax) for nested in desc.nested_types: nested.containing_type = desc for enum in desc.enum_types: enum.containing_type = desc for (field_index, field_desc) in enumerate(desc_proto.field): if field_desc.HasField('oneof_index'): oneof_index = field_desc.oneof_index oneofs[oneof_index].fields.append(fields[field_index]) fields[field_index].containing_oneof = oneofs[oneof_index] scope[_PrefixWithDot(desc_name)] = desc self._descriptors[desc_name] = desc return desc
Adds the proto to the pool in the specified package. Args: desc_proto: The descriptor_pb2.DescriptorProto protobuf message. package: The package the proto should be located in. file_desc: The file containing this message. scope: Dict mapping short and full symbols to message and enum types. syntax: string indicating syntax of the file ("proto2" or "proto3") Returns: The added descriptor.
codesearchnet
def asdate(self): return datetime.date(self.year, self.month, self.day)
Return this datetime_tz as a date object. Returns: This datetime_tz as a date object.
codesearchnet
def insert_varargs_and_kwargs(self, args: Iterable[str]): varargs_names = [] kwargs_names = [] for name in args: if self.has_param(name): continue if pytd_utils.ANON_PARAM.match(name): varargs_names.append(name) else: kwargs_names.append(name) new_param_names = self.param_names + tuple(sorted(varargs_names)) + tuple(sorted(kwargs_names)) return self._replace(param_names=new_param_names)
Insert varargs and kwargs from args into the signature. Args: args: An iterable of passed arg names. Returns: A copy of this signature with the passed varargs and kwargs inserted.
github-repos
def valid(self, name): name = re.sub('[^0-9a-zA-Z_]', '', name) if re.match('[0-9]', name): name = '_' + name return name
Ensure a variable name is valid. Note: Assumes variable names are ASCII, which isn't necessarily true in Python 3. Args: name: A proposed variable name. Returns: A valid version of the name.
juraj-google-style
def get_filetypes(self): if (not self.is_requestable()): return [resource.get_file_type() for resource in self.get_resources()] return self._get_stringlist_from_commastring('file_types')
Return list of filetypes in your data Returns: List[str]: List of filetypes
codesearchnet
def read_eof(self, echo=None): d = b'' while True: try: d += self.read(1, echo) except EOFError: return d
Read until the channel is closed. Args: echo(bool): Whether to write the read data to stdout. Returns: bytes: The read data.
juraj-google-style
def stat(filename, retry_params=None, _account_id=None): common.validate_file_path(filename) api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) (status, headers, content) = api.head_object(api_utils._quote_filename(filename)) errors.check_status(status, [200], filename, resp_headers=headers, body=content) file_stat = common.GCSFileStat(filename=filename, st_size=common.get_stored_content_length(headers), st_ctime=common.http_time_to_posix(headers.get('last-modified')), etag=headers.get('etag'), content_type=headers.get('content-type'), metadata=common.get_metadata(headers)) return file_stat
Get GCSFileStat of a Google Cloud storage file. Args: filename: A Google Cloud Storage filename of form '/bucket/filename'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Returns: a GCSFileStat object containing info about this file. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if an object that's expected to exist doesn't.
codesearchnet
def load_cobra_model(self, model): self.model = ModelPro(model) for g in self.model.genes: if self.genes_dir: g.root_dir = self.genes_dir g.protein.pdb_file_type = self.pdb_file_type self.genes = self.model.genes log.info('{}: loaded model'.format(model.id)) log.info('{}: number of reactions'.format(len(self.model.reactions))) log.info('{}: number of reactions linked to a gene'.format(ssbio.core.modelpro.true_num_reactions(self.model))) log.info('{}: number of genes (excluding spontaneous)'.format(ssbio.core.modelpro.true_num_genes(self.model, custom_spont_id=self.custom_spont_id))) log.info('{}: number of metabolites'.format(len(self.model.metabolites))) log.warning('IMPORTANT: All Gene objects have been transformed into GenePro objects, and will be for any new ones')
Load a COBRApy Model object into the GEM-PRO project. Args: model (Model): COBRApy ``Model`` object
codesearchnet
def match_all_args(ctx: 'context.Context', node: cfg.CFGNode, func: '_function_base.NativeFunction|_interpreter_function.InterpreterFunction', args: 'Args') -> 'tuple[Args, Sequence[tuple[Exception, str, _base.BaseValue]]]': positional_names = func.get_positional_names() needs_checking = True errors = [] while needs_checking: try: func.match_args(node, args) except error_types.FailedFunctionCall as e: if isinstance(e, error_types.WrongKeywordArgs): errors.append((e, e.extra_keywords[0], None)) for i in e.extra_keywords: args = args.delete_namedarg(i) elif isinstance(e, error_types.DuplicateKeyword): errors.append((e, e.duplicate, None)) args = args.delete_namedarg(e.duplicate) elif isinstance(e, error_types.MissingParameter): errors.append((e, e.missing_parameter, None)) args = args.replace_namedarg(e.missing_parameter, ctx.new_unsolvable(node)) elif isinstance(e, error_types.WrongArgTypes): arg_name = e.bad_call.bad_param.name for name, value in e.bad_call.passed_args: if name != arg_name: continue errors.append((e, name, value)) try: pos = positional_names.index(name) except ValueError: args = args.replace_namedarg(name, ctx.new_unsolvable(node)) else: args = args.replace_posarg(pos, ctx.new_unsolvable(node)) break else: raise AssertionError(f'Mismatched parameter {arg_name} not found in passed_args') from e else: raise else: needs_checking = False return (args, errors)
Call match_args multiple times to find all type errors. Args: ctx: The abstract context. node: The current CFG node. func: An abstract function args: An Args object to match against func Returns: A tuple of (new_args, errors) where new_args = args with all incorrectly typed values set to Any errors = a list of [(type mismatch error, arg name, value)] Reraises any error that is not InvalidParameters
github-repos
def wait(self, timeout=None): with self._put_wait_lock, self._queue_lock: logging.info('Waiting for all global closures to be finished.') while not self._error and (not self._queue.empty() or self._inflight_closure_count > 0): if not self._stop_waiting_condition.wait(timeout=timeout): return False self._raise_if_error() return True
Wait for all closures to be finished before returning. If `mark_failed` was called before or during `wait`, the error from the first invocation of `mark_failed` will be raised. Args: timeout: A float specifying a timeout for the wait in seconds. Returns: True unless the given timeout expired, in which case it returns False.
github-repos
def __init__(self, target_pixels=None, **kwargs): super(DiabeticRetinopathyDetectionConfig, self).__init__(**kwargs) self._target_pixels = target_pixels
BuilderConfig for DiabeticRetinopathyDetection. Args: target_pixels: If given, rescale the images so that the total number of pixels is roughly this value. **kwargs: keyword arguments forward to super.
juraj-google-style
def _unpack_sequence(self, state, n_before, n_after=-1): assert n_after >= -1 state, seq = state.pop() options = [] nontuple_seq = self.ctx.program.NewVariable() has_slurp = n_after > -1 count = n_before + max(n_after, 0) nondeterministic_iterable = False for b in abstract_utils.expand_type_parameter_instances(seq.bindings): if b.data.full_name in ('builtins.set', 'builtins.frozenset'): nondeterministic_iterable = True tup = self._get_literal_sequence(b.data) if tup is not None: if has_slurp and len(tup) >= count: options.append(self._restructure_tuple(state, tup, n_before, n_after)) continue elif len(tup) == count: options.append(tup) continue else: self.ctx.errorlog.bad_unpacking(self.frames, len(tup), count) if b.IsVisible(state.node): nontuple_seq.PasteBinding(b, state.node) if nontuple_seq.bindings: state, itr = self._get_iter(state, nontuple_seq) state, itr_result = self._call(state, itr, '__next__', ()) elif not options: itr_result = self.ctx.new_unsolvable(state.node) else: itr_result = None if itr_result: option = [itr_result for _ in range(count)] if has_slurp: slurp = self.ctx.convert.build_list_of_type(state.node, itr_result) option = option[:n_before] + [slurp] + option[n_before:] options.append(option) values = tuple((self.ctx.convert.build_content(value, discard_concrete_values=False) for value in zip(*options))) if len(values) > 1 and nondeterministic_iterable: self.ctx.errorlog.nondeterministic_unpacking(self.frames) for value in reversed(values): if not value.bindings: value = self.ctx.convert.empty.to_variable(state.node) state = state.push(value) return state
Pops a tuple (or other iterable) and pushes it onto the VM's stack. Supports destructuring assignment with potentially a single list variable that slurps up the remaining elements: 1. a, b, c = ... # UNPACK_SEQUENCE 2. a, *b, c = ... # UNPACK_EX Args: state: The current VM state n_before: Number of elements before the list (n_elements for case 1) n_after: Number of elements after the list (-1 for case 1) Returns: The new state.
github-repos
def cancel(self): if not self.id: raise WorkflowError('Workflow is not running. Cannot cancel.') if self.batch_values: self.workflow.batch_workflow_cancel(self.id) else: self.workflow.cancel(self.id)
Cancel a running workflow. Args: None Returns: None
juraj-google-style
def _start_app_and_connect(self): self._check_app_installed() self.disable_hidden_api_blacklist() persists_shell_cmd = self._get_persist_command() self.log.info('Launching snippet apk %s with protocol %d.%d', self.package, _PROTOCOL_MAJOR_VERSION, _PROTOCOL_MINOR_VERSION) cmd = _LAUNCH_CMD % (persists_shell_cmd, self.package) start_time = time.time() self._proc = self._do_start_app(cmd) line = self._read_protocol_line() match = re.match('^SNIPPET START, PROTOCOL ([0-9]+) ([0-9]+)$', line) if not match or match.group(1) != '1': raise ProtocolVersionError(self._ad, line) line = self._read_protocol_line() match = re.match('^SNIPPET SERVING, PORT ([0-9]+)$', line) if not match: raise ProtocolVersionError(self._ad, line) self.device_port = int(match.group(1)) self.host_port = utils.get_available_host_port() self._adb.forward( ['tcp:%d' % self.host_port, 'tcp:%d' % self.device_port]) self.connect() self.log.debug('Snippet %s started after %.1fs on host port %s', self.package, time.time() - start_time, self.host_port)
Starts snippet apk on the device and connects to it. After prechecks, this launches the snippet apk with an adb cmd in a standing subprocess, checks the cmd response from the apk for protocol version, then sets up the socket connection over adb port-forwarding. Args: ProtocolVersionError, if protocol info or port info cannot be retrieved from the snippet apk.
juraj-google-style
def _contains(self, item): if self is item: return True for m in self.modules: if item in m: return True for p in self.packages: if item in p: return True return False
Whether given item is contained inside the node modules/packages. Args: item (Package/Module): a package or module. Returns: bool: True if self is item or item in self's packages/modules.
juraj-google-style
def _get_sorted_inputs(filename, delimiter="\n"): tf.logging.info("Getting sorted inputs") with tf.gfile.Open(filename) as f: text = f.read() records = text.split(delimiter) inputs = [record.strip() for record in records] if not inputs[-1]: inputs.pop() input_lens = [(i, -len(line.split())) for i, line in enumerate(inputs)] sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1)) sorted_keys = {} sorted_inputs = [] for i, (index, _) in enumerate(sorted_input_lens): sorted_inputs.append(inputs[index]) sorted_keys[index] = i return sorted_inputs, sorted_keys
Returning inputs sorted according to decreasing length. This causes inputs of similar lengths to be processed in the same batch, facilitating early stopping for short sequences. Longer sequences are sorted first so that if you're going to get OOMs, you'll see it in the first batch. Args: filename: path to file with inputs, 1 per line. delimiter: str, delimits records in the file. Returns: a sorted list of inputs
juraj-google-style
def mean_absolute_error(y_true, y_pred): y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) return backend.mean(math_ops.abs(y_pred - y_true), axis=-1)
Computes the mean absolute error between labels and predictions. `loss = mean(abs(y_true - y_pred), axis=-1)` Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1)) Args: y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`. y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`. Returns: Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`.
github-repos
def launch_batch_workflow(self, batch_workflow): url = '%(base_url)s/batch_workflows' % { 'base_url': self.base_url } try: r = self.gbdx_connection.post(url, json=batch_workflow) batch_workflow_id = r.json()['batch_workflow_id'] return batch_workflow_id except TypeError as e: self.logger.debug('Batch Workflow not launched, reason: {0}'.format(e))
Launches GBDX batch workflow. Args: batch_workflow (dict): Dictionary specifying batch workflow tasks. Returns: Batch Workflow id (str).
juraj-google-style
def in_coord_list_pbc(fcoord_list, fcoord, atol=1e-08): return (len(find_in_coord_list_pbc(fcoord_list, fcoord, atol=atol)) > 0)
Tests if a particular fractional coord is within a fractional coord_list. Args: fcoord_list: List of fractional coords to test fcoord: A specific fractional coord to test. atol: Absolute tolerance. Defaults to 1e-8. Returns: True if coord is in the coord list.
codesearchnet
def get_models(self, uniprot_acc): if (uniprot_acc in self.all_models): return self.all_models[uniprot_acc] else: log.error('{}: no SWISS-MODELs available'.format(uniprot_acc)) return None
Return all available models for a UniProt accession number. Args: uniprot_acc (str): UniProt ACC/ID Returns: dict: All available models in SWISS-MODEL for this UniProt entry
codesearchnet
def unflat_take(items_list, unflat_index_list): return [(unflat_take(items_list, xs) if isinstance(xs, list) else take(items_list, xs)) for xs in unflat_index_list]
r""" Returns nested subset of items_list Args: items_list (list): unflat_index_list (list): nested list of indices CommandLine: python -m utool.util_list --exec-unflat_take SeeAlso: ut.take Example: >>> # DISABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> items_list = [1, 2, 3, 4, 5] >>> unflat_index_list = [[0, 1], [2, 3], [0, 4]] >>> result = unflat_take(items_list, unflat_index_list) >>> print(result) [[1, 2], [3, 4], [1, 5]]
codesearchnet