code
stringlengths
20
4.93k
docstring
stringlengths
33
1.27k
source
stringclasses
3 values
def read(self, bands=None, **kwargs): arr = self if bands is not None: arr = self[bands, ...] return arr.compute(scheduler=threaded_get)
Reads data from a dask array and returns the computed ndarray matching the given bands Args: bands (list): band indices to read from the image. Returns bands in the order specified in the list of bands. Returns: ndarray: a numpy array of image data
juraj-google-style
def get_ISBNs(self): invalid_isbns = set(self.get_invalid_ISBNs()) valid_isbns = [self._clean_isbn(isbn) for isbn in self['020a'] if (self._clean_isbn(isbn) not in invalid_isbns)] if valid_isbns: return valid_isbns return [self._clean_isbn(isbn) for isbn in self['901i']]
Get list of VALID ISBN. Returns: list: List with *valid* ISBN strings.
codesearchnet
def is_flat(outputs): if isinstance(outputs, collections_abc.Sequence): for o in outputs: if isinstance(o, collections_abc.Sequence) or isinstance(o, collections_abc.Mapping) or hasattr(o.__class__, '__attrs_attrs__'): return False if isinstance(outputs, collections_abc.Mapping): return False if hasattr(outputs.__class__, '__attrs_attrs__'): return False return True
Checks if outputs is a flat structure. Following structures and values are considered flat: 1) None 2) A single object 3) A list or tuple of Tensors/Operations The only structures that this function understands are sequences, dictionaries and types defined using the attrs library. E.g. this means that if outputs contains a single user-defined Object, it is considered to be flat. Errors are raised later on if that Object cannot be converted to a Tensor. Args: outputs: Output from `computation` inside `xla.compile`. Returns: A boolean indicates whether outputs is flat.
github-repos
def cracked(self): logging.info('Cracking message: {0}'.format(self.message)) entropy_values = {} attempt_cache = {} message = self.message for i in range(25): self.message = message self.offset = (i * (- 1)) logging.debug('Attempting crack with offset: {0}'.format(self.offset)) test_cipher = self.cipher() logging.debug('Attempting plaintext: {0}'.format(test_cipher)) entropy_values[i] = self.calculate_entropy(test_cipher) attempt_cache[i] = test_cipher sorted_by_entropy = sorted(entropy_values, key=entropy_values.get) self.offset = (sorted_by_entropy[0] * (- 1)) cracked_text = attempt_cache[sorted_by_entropy[0]] self.message = cracked_text logging.debug('Entropy scores: {0}'.format(entropy_values)) logging.debug('Lowest entropy score: {0}'.format(str(entropy_values[sorted_by_entropy[0]]))) logging.debug('Most likely offset: {0}'.format(self.offset)) logging.debug('Most likely message: {0}'.format(cracked_text)) return cracked_text
Attempts to crack ciphertext using frequency of letters in English. Returns: String of most likely message.
codesearchnet
def from_task(cls, task): target = cls(name=task.get_name(), params=task.get_param_string()) return target
Create a new target representing a task and its parameters Args: task: Task instance to create target for; the task class has to inherit from :class:`ozelot.tasks.TaskBase`. Returns: ozelot.tasks.ORMTarget: a new target instance
juraj-google-style
def parse_case(config): if ('owner' not in config): raise ConfigError('A case has to have a owner') if ('family' not in config): raise ConfigError("A case has to have a 'family'") individuals = parse_individuals(config['samples']) case_data = {'owner': config['owner'], 'collaborators': [config['owner']], 'case_id': config['family'], 'display_name': config.get('family_name', config['family']), 'genome_build': config.get('human_genome_build'), 'rank_model_version': config.get('rank_model_version'), 'rank_score_threshold': config.get('rank_score_threshold', 0), 'analysis_date': config['analysis_date'], 'individuals': individuals, 'vcf_files': {'vcf_snv': config.get('vcf_snv'), 'vcf_sv': config.get('vcf_sv'), 'vcf_str': config.get('vcf_str'), 'vcf_cancer': config.get('vcf_cancer'), 'vcf_snv_research': config.get('vcf_snv_research'), 'vcf_sv_research': config.get('vcf_sv_research'), 'vcf_cancer_research': config.get('vcf_cancer_research')}, 'default_panels': config.get('default_gene_panels', []), 'gene_panels': config.get('gene_panels', []), 'assignee': config.get('assignee'), 'peddy_ped': config.get('peddy_ped'), 'peddy_sex': config.get('peddy_sex'), 'peddy_check': config.get('peddy_check'), 'delivery_report': config.get('delivery_report'), 'multiqc': config.get('multiqc'), 'track': config.get('track', 'rare')} if ('madeline' in config): mad_path = Path(config['madeline']) if (not mad_path.exists()): raise ValueError('madeline path not found: {}'.format(mad_path)) with mad_path.open('r') as in_handle: case_data['madeline_info'] = in_handle.read() if (case_data['vcf_files']['vcf_cancer'] or case_data['vcf_files']['vcf_cancer_research']): case_data['track'] = 'cancer' return case_data
Parse case information from config or PED files. Args: config (dict): case config with detailed information Returns: dict: parsed case data
codesearchnet
def GetApprovalForObject(object_urn, token=None, username=''): if (token is None): raise access_control.UnauthorizedAccess('No token given, cannot authenticate.') if (not username): username = token.username approvals_root_urn = aff4.ROOT_URN.Add('ACL').Add(object_urn.Path()).Add(username) children_urns = list(aff4.FACTORY.ListChildren(approvals_root_urn)) if (not children_urns): raise access_control.UnauthorizedAccess(('No approval found for user %s' % utils.SmartStr(username)), subject=object_urn) last_error = None approvals = aff4.FACTORY.MultiOpen(children_urns, mode='r', aff4_type=Approval, age=aff4.ALL_TIMES, token=token) for approval in approvals: try: test_token = access_control.ACLToken(username=username, reason=approval.Get(approval.Schema.REASON)) approval.CheckAccess(test_token) return test_token except access_control.UnauthorizedAccess as e: last_error = e if last_error: raise access_control.UnauthorizedAccess(last_error, subject=object_urn) else: raise access_control.UnauthorizedAccess(("Couldn't open any of %d approvals for user %s" % (len(children_urns), utils.SmartStr(username))), subject=object_urn)
Looks for approvals for an object and returns available valid tokens. Args: object_urn: Urn of the object we want access to. token: The token to use to lookup the ACLs. username: The user to get the approval for, if "" we get it from the token. Returns: A token for access to the object on success, otherwise raises. Raises: UnauthorizedAccess: If there are no valid approvals available.
codesearchnet
def get_bytes(obj): try: obj = obj.read(_NUM_SIGNATURE_BYTES) except AttributeError: pass kind = type(obj) if kind is bytearray: return signature(obj) if kind is str: return get_signature_bytes(obj) if kind is bytes: return signature(obj) if kind is memoryview: return signature(obj).tolist() raise TypeError('Unsupported type as file input: %s' % kind)
Infers the input type and reads the first 262 bytes, returning a sliced bytearray. Args: obj: path to readable, file, bytes or bytearray. Returns: First 262 bytes of the file content as bytearray type. Raises: TypeError: if obj is not a supported type.
juraj-google-style
def image(title, desc, image_name, group=None, height=None): ie = {'Type': 'Image', 'Title': title, 'Description': desc, 'Plot File': image_name} if group: ie['Group'] = group if height: ie['Height'] = height return ie
Builds an image element. Image elements are primarily created and then wrapped into an image gallery element. This is not required behavior, however and it's independent usage should be allowed depending on the behavior required. The Javascript will search for the `image_name` in the component's `imgs` directory when rendering. For example, all verification images are output to `vv_xxxx-xx-xx/verification/imgs` and then the verification case's output page will search for `image_name` within that directory. Args: title: The title to display desc: A description of the image or plot image_name: The filename of the image group: (optional) Title of lightbox group to join height: (optional) Height of image thumbnail to draw Returns: A dictionary with the metadata specifying that it is to be rendered as an image element
codesearchnet
def _set_optimizer(self, optimizer): if isinstance(optimizer, (list, tuple)): self.optimizer = [optimizers.get(opt) for opt in optimizer] else: self.optimizer = optimizers.get(optimizer) if isinstance(self._dtype_policy, policy.PolicyV1): loss_scale = self._dtype_policy.loss_scale elif self._dtype_policy.name == 'mixed_float16': loss_scale = 'dynamic' else: loss_scale = None if loss_scale is not None and (not isinstance(self.optimizer, loss_scale_optimizer.LossScaleOptimizer)): if isinstance(self.optimizer, list): raise ValueError('When a dtype policy with a loss scale is used, you can only pass a single optimizer. Using policy %s and got optimizers: %s' % self._dtype_policy, self.optimizer) if not isinstance(self.optimizer, optimizer_v2.OptimizerV2): raise ValueError('"optimizer" must be an instance of tf.keras.optimizers.Optimizer when a dype policy with a loss scale used, but got: %s. Using policy: %s' % (self.optimizer, self._dtype_policy)) if loss_scale == 'dynamic': self.optimizer = loss_scale_optimizer.LossScaleOptimizer(self.optimizer) else: self.optimizer = loss_scale_optimizer.LossScaleOptimizerV1(self.optimizer, loss_scale)
Sets self.optimizer. Sets self.optimizer to `optimizer`, potentially wrapping it with a LossScaleOptimizer. Args: optimizer: The optimizer(s) to assign to self.optimizer.
github-repos
def impute(X, value=None, train=None, dropna=True, inplace=True): if value is None: Xfit = X[train] if train is not None else X value = Xfit.mean() else: if train is not None: raise ValueError("Cannot pass both train and value arguments") if dropna: null_columns = value.index[value.isnull()] if len(null_columns) > 0: logging.info('Dropping null columns: \n\t%s' % null_columns) if inplace: X.drop(null_columns, axis=1, inplace=True) else: X = X.drop(null_columns, axis=1, inplace=False) if inplace: X.fillna(value.dropna(), inplace=True) else: X = X.fillna(value.dropna(), inplace=False) return X
Performs mean imputation on a pandas dataframe. Args: train: an optional training mask with which to compute the mean value: instead of computing the mean, use this as the value argument to fillna dropna: whether to drop all null columns inplace: whether to perform the imputation inplace Returns: the imputed DataFrame
juraj-google-style
def OpenFile(self, filepath): archive = False if '.zip/' in filepath: archive = True archive_type = '.zip' if '.par/' in filepath: archive = True archive_type = '.par' if archive: path, archived_file = filepath.split(archive_type) path += archive_type zip_file = zipfile.ZipFile(path) return zip_file.open(archived_file.strip('/')) return open(filepath)
open()-replacement that automatically handles zip files. This assumes there is at most one .zip in the file path. Args: filepath: the path to the file to open. Returns: An open file-like object.
juraj-google-style
def FormatArtifacts(self, artifacts): artifact_definitions = [artifact.AsDict() for artifact in artifacts] yaml_data = yaml.safe_dump_all(artifact_definitions) return yaml_data
Formats artifacts to desired output format. Args: artifacts (list[ArtifactDefinition]): artifact definitions. Returns: str: formatted string of artifact definition.
juraj-google-style
def runtime_deps(self): install_requires = self.metadata['install_requires'] if (self.metadata['entry_points'] and ('setuptools' not in install_requires)): install_requires.append('setuptools') return sorted(self.name_convert_deps_list(deps_from_pyp_format(install_requires, runtime=True)))
Returns list of runtime dependencies of the package specified in setup.py. Dependencies are in RPM SPECFILE format - see dependency_to_rpm() for details, but names are already transformed according to current distro. Returns: list of runtime dependencies of the package
codesearchnet
def _flush_range(self, buffer, start, end): with self._size_lock: if (not self._size_synched): self._size_synched = True try: self._size = self.raw._size except (ObjectNotFoundError, UnsupportedOperation): self._size = 0 while (start > self._size): sleep(self._FLUSH_WAIT) self._raw_flush(buffer, start, end)
Flush a buffer to a range of the file. Meant to be used asynchronously, used to provides parallel flushing of file parts when applicable. Args: buffer (memoryview): Buffer content. start (int): Start of buffer position to flush. end (int): End of buffer position to flush.
codesearchnet
def _apply_sparse(self, grad, var): raise NotImplementedError()
Add ops to apply sparse gradients to `var`. The IndexedSlices object passed to `grad` in this function is by default pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate indices (see its docstring for details). Optimizers which can tolerate or have correct special cases for duplicate sparse indices may override `_apply_sparse_duplicate_indices` instead of this function, avoiding that overhead. Args: grad: `IndexedSlices`, with no repeated indices. var: A `Variable` object. Returns: An `Operation`.
github-repos
def delete(self, uri): try: self.connect(uri, method='DELETE') return True except urllib.error.HTTPError: return False
Method deletes a Fedora Object in the repository Args: uri(str): URI of Fedora Object
juraj-google-style
def render(self, mode='human'): if mode == 'human': if self.viewer is None: from ._image_viewer import ImageViewer if self.spec is None: caption = self._rom_path.split('/')[-1] else: caption = self.spec.id self.viewer = ImageViewer( caption=caption, height=SCREEN_HEIGHT, width=SCREEN_WIDTH, ) self.viewer.show(self.screen) elif mode == 'rgb_array': return self.screen else: render_modes = [repr(x) for x in self.metadata['render.modes']] msg = 'valid render modes are: {}'.format(', '.join(render_modes)) raise NotImplementedError(msg)
Render the environment. Args: mode (str): the mode to render with: - human: render to the current display - rgb_array: Return an numpy.ndarray with shape (x, y, 3), representing RGB values for an x-by-y pixel image Returns: a numpy array if mode is 'rgb_array', None otherwise
juraj-google-style
def _build_mask_ds(mask, mask_offset): mask_ds = tf.data.Dataset.from_tensor_slices(mask) mask_ds = mask_ds.repeat() mask_ds = mask_ds.skip(mask_offset) return mask_ds
Build the mask dataset to indicate which element to skip. Args: mask: `tf.Tensor`, binary mask to apply to all following elements. This mask should have a length 100. mask_offset: `tf.Tensor`, Integer specifying from how much the mask should be shifted for the first element. Returns: mask_ds: `tf.data.Dataset`, a dataset returning False for examples to skip and True for examples to keep.
codesearchnet
def receive_datagram(self, data, address): if (not self.app): logger.debug('Packet received', address, data) return False try: response = self.app.handle_message(data, address) except Exception as err: logger.error(((('Error processing message from ' + str(address)) + ':') + str(data))) logger.error(traceback.format_exc()) return False if response: self.send_datagram(response, address)
Executes when UDP data has been received and sends the packet data to our app to process the request. Args: data (str): The raw serialized packet data received. address (tuple): The address and port of the origin of the received packet. E.g. (address, port). Returns: None
codesearchnet
def find(self, key, dynamic_default_value=None, name=None): with tf.name_scope(name or '%s_lookup_table_find' % self._name): key = tf.convert_to_tensor(key, dtype=self._key_dtype, name='key') if dynamic_default_value is not None: dynamic_default_value = tf.convert_to_tensor(dynamic_default_value, dtype=self._value_dtype, name='default_value') value = gen_simple_hash_table_op.examples_simple_hash_table_find(self.resource_handle, key, dynamic_default_value if dynamic_default_value is not None else self._default_value) return value
Looks up `key` in a table, outputs the corresponding value. The `default_value` is used if key not present in the table. Args: key: Key to look up. Must match the table's key_dtype. dynamic_default_value: The value to use if the key is missing in the table. If None (by default), the `table.default_value` will be used. name: A name for the operation (optional). Returns: A tensor containing the value in the same shape as `key` using the table's value type. Raises: TypeError: when `key` do not match the table data types.
github-repos
def _strip_debug_nodes(meta_graph_def: meta_graph_pb2.MetaGraphDef) -> None: def erase_regular_node_attributes(node: node_def_pb2.NodeDef) -> None: attributes_to_remove = [attribute for attribute in node.attr.keys() if not attribute.startswith('_')] for attribute in attributes_to_remove: node.attr.pop(attribute) def prune_all_non_t_attributes(node: node_def_pb2.NodeDef) -> None: if 'T' in node.attr: t_value = node.attr['T'] node.ClearField('attr') node.attr['T'].CopyFrom(t_value) else: node.ClearField('attr') def is_control_input(name: str) -> str: return name and name[0] == '^' def as_control_dep(name: str) -> str: return '^' + name.split(':')[0] def maybe_do_strip(node: node_def_pb2.NodeDef) -> None: if node.op == 'Assert' or node.op == 'PrintV2': node.op = 'NoOp' erase_regular_node_attributes(node) new_inputs = [] for inp in node.input: if not is_control_input(inp): new_inputs.append(as_control_dep(inp)) else: new_inputs.append(inp) node.ClearField('input') node.input.extend(new_inputs) elif node.op == 'CheckNumerics' or node.op == 'Print': node.op = 'Identity' prune_all_non_t_attributes(node) for i in range(1, len(node.input)): if not is_control_input(node.input[i]): node.input[i] = as_control_dep(node.input[i]) for node in meta_graph_def.graph_def.node: maybe_do_strip(node) for func in meta_graph_def.graph_def.library.function: for node in func.node_def: maybe_do_strip(node)
An experimental function to remove debug nodes from the final graph. This function removes all Assert and CheckNumerics nodes from the meta_graph. It strips the operators in both the nodes and in all of the function defs, with the Assert ops being replaced by `NoOp`s and the CheckNumerics ops being transformed into `Identity` ops. In addition to this, it creates control inputs for the nodes that are not relevant for the op. For more information about control inputs please see go/how-tensors-flow#control-dependencies. Args: meta_graph_def: The meta_graph that will be exported.
github-repos
def getall(self): vrfs_re = re.compile('(?<=^vrf definition\\s)(\\w+)', re.M) response = dict() for vrf in vrfs_re.findall(self.config): response[vrf] = self.get(vrf) return response
Returns a dict object of all VRFs in the running-config Returns: A dict object of VRF attributes
codesearchnet
def body(self, features): features["targets"] = features["inputs"] is_training = self.hparams.mode == tf.estimator.ModeKeys.TRAIN inputs = tf.to_float(features["targets_raw"]) z = tf.random_uniform([self.hparams.batch_size, self.hparams.bottleneck_bits], minval=-1, maxval=1, name="z") out_shape = common_layers.shape_list(inputs)[1:4] g = self.generator(z, is_training, out_shape) losses = self.losses(inputs, g) summary_g_image = tf.reshape( g[0, :], [1] + common_layers.shape_list(inputs)[1:]) tf.summary.image("generated", summary_g_image, max_outputs=1) if is_training: return tf.zeros_like(inputs), losses return tf.reshape(g, tf.shape(inputs)), losses
Body of the model. Args: features: a dictionary with the tensors. Returns: A pair (predictions, losses) where predictions is the generated image and losses is a dictionary of losses (that get added for the final loss).
juraj-google-style
def delete(self, alias_name, timeout=(- 1)): uri = ((self.URI + '/') + alias_name) return self._client.delete(uri, timeout=timeout)
Revokes a certificate signed by the internal CA. If client certificate to be revoked is RabbitMQ_readonly, then the internal CA root certificate, RabbitMQ client certificate and RabbitMQ server certificate will be regenerated. This will invalidate the previous version of RabbitMQ client certificate and the RabbitMQ server will be restarted to read the latest certificates. Args: alias_name (str): Alias name. timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stop waiting for its completion.
codesearchnet
def PushEvent(self, event): macb_group_identifier, content_identifier = self._GetEventIdentifiers(event) heap_values = (macb_group_identifier or '', content_identifier, event) heapq.heappush(self._heap, heap_values)
Pushes an event onto the heap. Args: event (EventObject): event.
juraj-google-style
def export_to_dir(network, export_dir): package_path = ding0.__path__[0] network.export_to_csv_folder(os.path.join(package_path, 'output', 'debug', 'grid', export_dir))
Exports PyPSA network as CSV files to directory Args: network: pypsa.Network export_dir: str Sub-directory in output/debug/grid/ where csv Files of PyPSA network are exported to.
codesearchnet
def outer_id(self, value): if value == self._defaults['outerId'] and 'outerId' in self._values: del self._values['outerId'] else: self._values['outerId'] = value
The outer_id property. Args: value (int). the property value.
juraj-google-style
def api_info(self, headers=None): return self.transport.forward_request( method='GET', path=self.api_prefix, headers=headers, )
Retrieves information provided by the API root endpoint ``'/api/v1'``. Args: headers (dict): Optional headers to pass to the request. Returns: dict: Details of the HTTP API provided by the BigchainDB server.
juraj-google-style
def _ParseIndexTable(self, file_object): cache_address_map = self._GetDataTypeMap('uint32le') file_offset = file_object.get_offset() cache_address_data = file_object.read(4) while (len(cache_address_data) == 4): try: value = self._ReadStructureFromByteStream(cache_address_data, file_offset, cache_address_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError('Unable to map cache address at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception)) if value: cache_address = CacheAddress(value) self.index_table.append(cache_address) file_offset += 4 cache_address_data = file_object.read(4)
Parses the index table. Args: file_object (dfvfs.FileIO): a file-like object to parse. Raises: ParseError: if the index table cannot be read.
codesearchnet
def ToJsonString(self): if ((self.seconds < 0) or (self.nanos < 0)): result = '-' seconds = ((- self.seconds) + int(((0 - self.nanos) nanos = ((0 - self.nanos) % 1000000000.0) else: result = '' seconds = (self.seconds + int((self.nanos nanos = (self.nanos % 1000000000.0) result += ('%d' % seconds) if ((nanos % 1000000000.0) == 0): return (result + 's') if ((nanos % 1000000.0) == 0): return (result + ('.%03ds' % (nanos / 1000000.0))) if ((nanos % 1000.0) == 0): return (result + ('.%06ds' % (nanos / 1000.0))) return (result + ('.%09ds' % nanos))
Converts Duration to string format. Returns: A string converted from self. The string format will contains 3, 6, or 9 fractional digits depending on the precision required to represent the exact Duration value. For example: "1s", "1.010s", "1.000000100s", "-3.100s"
codesearchnet
def SetOption(self, section, option, value, overwrite=True): if not overwrite and self.config.has_option(section, option): return if not self.config.has_section(section): self.config.add_section(section) self.config.set(section, option, str(value))
Set the value of an option in the config file. Args: section: string, the section of the config file to check. option: string, the option to set the value of. value: string, the value to set the option. overwrite: bool, True to overwrite an existing value in the config file.
juraj-google-style
def simulate(self, action): with tf.name_scope('environment/simulate'): if action.dtype in (tf.float16, tf.float32, tf.float64): action = tf.check_numerics(action, 'action') observ_dtype = self._parse_dtype(self._batch_env.observation_space) observ, reward, done = tf.py_func( lambda a: self._batch_env.step(a)[:3], [action], [observ_dtype, tf.float32, tf.bool], name='step') observ = tf.check_numerics(observ, 'observ') reward = tf.check_numerics(reward, 'reward') return tf.group( self._observ.assign(observ), self._action.assign(action), self._reward.assign(reward), self._done.assign(done))
Step the batch of environments. The results of the step can be accessed from the variables defined below. Args: action: Tensor holding the batch of actions to apply. Returns: Operation.
juraj-google-style
def last(series, order_by=None): if order_by is not None: series = order_series_by(series, order_by) last_s = series.iloc[series.size - 1] return last_s
Returns the last value of a series. Args: series (pandas.Series): column to summarize. Kwargs: order_by: a pandas.Series or list of series (can be symbolic) to order the input series by before summarization.
juraj-google-style
def plot(self, **plot_kwargs: Any) -> None: fig = plt.figure() plt.plot(self._num_cfds_seq, self._gnd_state_probs, 'ro-', figure=fig, **plot_kwargs) plt.xlabel(r"Number of Cliffords", figure=fig) plt.ylabel('Ground State Probability', figure=fig) fig.show()
Plots the average ground state probability vs the number of Cliffords in the RB study. Args: **plot_kwargs: Arguments to be passed to matplotlib.pyplot.plot.
juraj-google-style
def batch_slice(dist, params_event_ndims, params_overrides, slices): if (not isinstance(slices, collections.Sequence)): slices = (slices,) (orig_dist, slice_overrides_seq) = getattr(dist, PROVENANCE_ATTR, (dist, [])) slice_overrides_seq += [(slices, params_overrides)] dist = _apply_slice_sequence(orig_dist, params_event_ndims, slice_overrides_seq) setattr(dist, PROVENANCE_ATTR, (orig_dist, slice_overrides_seq)) return dist
Slices `dist` along its batch dimensions. Helper for tfd.Distribution. Args: dist: A `tfd.Distribution` instance. params_event_ndims: A `dict` of `str->int` indicating the number of dimensions of a given parameter required to parameterize a single event. params_overrides: A `dict` of parameter overrides. (e.g. from `Distribution.copy`). slices: A `slice` or `int` or `int` `Tensor` or `tf.newaxis` or `tuple` thereof. (e.g. the argument of a `__getitem__` method). Returns: new_dist: A batch-sliced `tfd.Distribution`.
codesearchnet
def sg_lookup(tensor, opt): assert (opt.emb is not None), 'emb is mandatory.' return tf.nn.embedding_lookup(opt.emb, tensor, name=opt.name)
r"""Looks up the `tensor`, which is the embedding matrix. Args: tensor: A tensor ( automatically given by chain ) opt: emb: A 2-D `Tensor`. An embedding matrix. name: If provided, replace current tensor's name. Returns: A `Tensor`.
codesearchnet
def take_ownership(self, **kwargs): path = ('%s/%s/take_ownership' % (self.manager.path, self.get_id())) server_data = self.manager.gitlab.http_post(path, **kwargs) self._update_attrs(server_data)
Update the owner of a pipeline schedule. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabOwnershipError: If the request failed
codesearchnet
def gene_by_alias(self, symbol, build='37'): res = self.hgnc_collection.find({'hgnc_symbol': symbol, 'build': build}) if (res.count() == 0): res = self.hgnc_collection.find({'aliases': symbol, 'build': build}) return res
Return a iterable with hgnc_genes. If the gene symbol is listed as primary the iterable will only have one result. If not the iterable will include all hgnc genes that have the symbol as an alias. Args: symbol(str) build(str) Returns: res(pymongo.Cursor(dict))
codesearchnet
def delete(self, model_name): full_name = model_name if not model_name.startswith('projects/'): full_name = ('projects/%s/models/%s' % (self._project_id, model_name)) response = self._api.projects().models().delete(name=full_name).execute() if 'name' not in response: raise Exception('Invalid response from service. "name" is not found.') _util.wait_for_long_running_operation(response['name'])
Delete a model. Args: model_name: the name of the model. It can be a model full name ("projects/[project_id]/models/[model_name]") or just [model_name].
juraj-google-style
def _slice_ragged_row_dimension(rt_input, row_key): if row_key.start is None and row_key.stop is None and (row_key.step is None): return rt_input new_starts = rt_input.row_splits[:-1][row_key] new_limits = rt_input.row_splits[1:][row_key] zero_pad = array_ops.zeros([1], rt_input.row_splits.dtype) if row_key.step is None or row_key.step == 1: new_splits = array_ops.concat([zero_pad[array_ops.size(new_starts):], new_starts[:1], new_limits], axis=0) values_start = new_splits[0] values_limit = new_splits[-1] return ragged_tensor.RaggedTensor.from_row_splits(rt_input.values[values_start:values_limit], new_splits - values_start, validate=False) else: return _build_ragged_tensor_from_value_ranges(new_starts, new_limits, 1, rt_input.values)
Slice the outer dimension of `rt_input` according to the given `slice`. Args: rt_input: The `RaggedTensor` to slice. row_key: The `slice` object that should be used to slice `rt_input`. Returns: A `RaggedTensor` containing the indicated slice of `rt_input`.
github-repos
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): line = clean_lines.elided[linenum] match = Search(pattern, line) if (not match): return False context = line[0:(match.start(1) - 1)] if Match('.*\\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\\s*$', context): return False if (linenum > 0): for i in xrange((linenum - 1), max(0, (linenum - 5)), (- 1)): context = (clean_lines.elided[i] + context) if Match('.*\\b[_A-Z][_A-Z0-9]*\\s*\\((?:\\([^()]*\\)|[^()])*$', context): return False if (context.endswith(' operator++') or context.endswith(' operator--')): return False remainder = line[match.end(0):] if Match('^\\s*(?:;|const\\b|throw\\b|final\\b|override\\b|[=>{),]|->)', remainder): return False error(filename, linenum, 'readability/casting', 4, ('Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1)))) return True
Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise.
codesearchnet
def in_labelset(xmrs, nodeids, label=None): nodeids = set(nodeids) if label is None: label = xmrs.ep(next(iter(nodeids))).label return nodeids.issubset(xmrs._vars[label]['refs']['LBL'])
Test if all nodeids share a label. Args: nodeids: iterable of nodeids label (str, optional): the label that all nodeids must share Returns: bool: `True` if all nodeids share a label, otherwise `False`
juraj-google-style
def to_schema(self, entry: dict, parents: dict={}) -> list: bigquery_schema = [] for key, value in entry.items(): if not isinstance(value, dict): continue if '$ref' in value: parents.setdefault(value['$ref'], 0) if parents[value['$ref']] < self.recursion_depth: parents[value['$ref']] += 1 bigquery_schema.append({'name': key, 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': self.to_schema(self.api_document['schemas'][value['$ref']]['properties'], parents)}) parents[value['$ref']] -= 1 elif 'items' in value: if '$ref' in value['items']: parents.setdefault(value['items']['$ref'], 0) if parents[value['items']['$ref']] < self.recursion_depth: parents[value['items']['$ref']] += 1 bigquery_schema.append({'name': key, 'type': 'RECORD', 'mode': 'REPEATED', 'fields': self.to_schema(self.api_document['schemas'][value['items']['$ref']]['properties'], parents)}) parents[value['items']['$ref']] -= 1 elif value['items']['type'] == 'object': bigquery_schema.append({'name': key, 'type': 'RECORD', 'mode': 'NULLABLE', 'fields': self.to_schema(value['items'], parents)}) else: bigquery_schema.append({'description': ','.join(value['items'].get('enum', []))[:DESCRIPTION_LENGTH], 'name': key, 'type': self.to_type(value['items']), 'mode': 'REPEATED'}) else: bigquery_schema.append({'description': ','.join(value.get('enum', []))[:DESCRIPTION_LENGTH], 'name': key, 'type': self.to_type(value), 'mode': 'NULLABLE'}) return bigquery_schema
Convert a Discovery API Document schema to a BigQuery schema. Recursively crawls the discovery document reference tree to build schema. Leverages recursion depth passed in constructor to stop if necessary. Args: entry: a discovery document schema definition. parents: used to track recursion depth for a specific schema branch Returns: A BigQuery schema object.
github-repos
def _update_hasher(hasher, data, types=True): if isinstance(data, (tuple, list, zip)): needs_iteration = True else: needs_iteration = any((check(data) for check in _HASHABLE_EXTENSIONS.iterable_checks)) if needs_iteration: SEP = b'_,_' ITER_PREFIX = b'_[_' ITER_SUFFIX = b'_]_' iter_ = iter(data) hasher.update(ITER_PREFIX) try: for item in iter_: (prefix, hashable) = _convert_to_hashable(item, types) binary_data = ((prefix + hashable) + SEP) hasher.update(binary_data) except TypeError: _update_hasher(hasher, item, types) for item in iter_: _update_hasher(hasher, item, types) hasher.update(SEP) hasher.update(ITER_SUFFIX) else: (prefix, hashable) = _convert_to_hashable(data, types) binary_data = (prefix + hashable) hasher.update(binary_data)
Converts `data` into a byte representation and calls update on the hasher `hashlib.HASH` algorithm. Args: hasher (HASH): instance of a hashlib algorithm data (object): ordered data with structure types (bool): include type prefixes in the hash Example: >>> hasher = hashlib.sha512() >>> data = [1, 2, ['a', 2, 'c']] >>> _update_hasher(hasher, data) >>> print(hasher.hexdigest()[0:8]) e2c67675 2ba8d82b
codesearchnet
def _get_syslog_format(event_type): syslog_format_template = get_template('syslog_format.json') fmt = syslog_format_template.render(event_type=event_type, host=dbconfig.get('instance_name', default='local')) return json.dumps(json.loads(fmt))
Take an event type argument and return a python logging format In order to properly format the syslog messages to current standard, load the template and perform necessary replacements and return the string. Args: event_type (str): Event type name Returns: `str`
codesearchnet
def _check_status(cls, response_json): status = response_json['status'] msg = response_json['msg'] if (status == 400): raise BadRequestException(msg) elif (status == 403): raise PermissionDeniedException(msg) elif (status == 404): raise FileNotFoundException(msg) elif (status == 451): raise UnavailableForLegalReasonsException(msg) elif (status == 509): raise BandwidthUsageExceeded(msg) elif (status >= 500): raise ServerErrorException(msg)
Check the status of the incoming response, raise exception if status is not 200. Args: response_json (dict): results of the response of the GET request. Returns: None
codesearchnet
def _subscribe(tensor, side_effects, control_cache): if not tensor.dtype.is_numpy_compatible: logging.debug('Tensor {} has an un-supported {} type and cannot be subscribed.'.format(tensor.name, tensor.dtype)) return tensor if _is_subscribed_identity(tensor): return _subscribe_extend(tensor, side_effects) name_scope = tensor.op.name + '/subscription/Identity' consumers = tensor.consumers() matching_ops = [op for op in consumers if op.name.startswith(name_scope)] assert len(matching_ops) <= 1, 'Op {} must only have one subscription op connected to it'.format(tensor.op.name) if len(matching_ops) == 1: candidate_tensor = matching_ops[0].outputs[0] if _is_subscribed_identity(candidate_tensor): return _subscribe_extend(candidate_tensor, side_effects) return _subscribe_new(tensor, side_effects, control_cache)
Helper method that subscribes a single tensor to a list of side_effects. This method will check if the given tensor has already been subscribed or if it's a tensor returned by a previous call to `subscribe()` and, if so, will reuse the existing identity op, appending the given side effects to the list of existing ones. Args: tensor: The `tf.Tensor` to be subscribed. side_effects: List of side_effect functions, see subscribe for details. control_cache: `_ControlOutputCache` helper to get control_outputs faster. Returns: The modified replacement to the passed in tensor which triggers the side effects or the given tensor, if it was already been subscribed.
github-repos
def load_model( self, the_metamodel, filename, is_main_model, encoding='utf-8', add_to_local_models=True): if not self.local_models.has_model(filename): if self.all_models.has_model(filename): new_model = self.all_models.filename_to_model[filename] else: new_model = the_metamodel.internal_model_from_file( filename, pre_ref_resolution_callback=lambda other_model: self.pre_ref_resolution_callback(other_model), is_main_model=is_main_model, encoding=encoding) self.all_models.filename_to_model[filename] = new_model if add_to_local_models: self.local_models.filename_to_model[filename] = new_model assert self.all_models.has_model(filename) return self.all_models.filename_to_model[filename]
load a single model Args: the_metamodel: the metamodel used to load the model filename: the model to be loaded (if not cached) Returns: the loaded/cached model
juraj-google-style
def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer): symbolic_weights = getattr(optimizer, 'weights') if symbolic_weights: weights_group = hdf5_group.create_group('optimizer_weights') weight_names = [str(w.name).encode('utf8') for w in symbolic_weights] save_attributes_to_hdf5_group(weights_group, 'weight_names', weight_names) weight_values = backend.batch_get_value(symbolic_weights) for name, val in zip(weight_names, weight_values): param_dset = weights_group.create_dataset(name, val.shape, dtype=val.dtype) if not val.shape: param_dset[()] = val else: param_dset[:] = val
Saves optimizer weights of a optimizer to a HDF5 group. Args: hdf5_group: HDF5 group. optimizer: optimizer instance.
github-repos
def link_asset_content_key(access_token, asset_id, encryptionkey_id, ams_redirected_rest_endpoint): path = '/Assets' full_path = ''.join([path, "('", asset_id, "')", "/$links/ContentKeys"]) full_path_encoded = urllib.parse.quote(full_path, safe='') endpoint = ''.join([ams_rest_endpoint, full_path_encoded]) uri = ''.join([ams_redirected_rest_endpoint, 'ContentKeys', "('", encryptionkey_id, "')"]) body = '{"uri": "' + uri + '"}' return do_ams_post(endpoint, full_path_encoded, body, access_token)
Link Media Service Asset and Content Key. Args: access_token (str): A valid Azure authentication token. asset_id (str): A Media Service Asset ID. encryption_id (str): A Media Service Encryption ID. ams_redirected_rest_endpoint (str): A Media Service Redirected Endpoint. Returns: HTTP response. JSON body.
juraj-google-style
def _run_in_hypothesis(self, mma, w_string, index): state = mma[0] for i in range(index): for arc in state: if mma.isyms.find(arc.ilabel) == w_string[i]: state = mma[arc.nextstate] s_index = arc.nextstate access_string = self.observation_table.sm_vector[s_index] logging.debug( 'Access string for %d: %s - %d ', index, access_string, s_index) return access_string
Run the string in the hypothesis automaton for index steps and then return the access string for the state reached concatanated with the rest of the string w. Args: mma (DFA): The hypothesis automaton w_string (str): The examined string to be consumed index (int): The index value for selecting the prefix of w Return: str: The access string
juraj-google-style
def projection(self, variables): variables = set(variables) if (not variables.issubset(self.variables)): raise ValueError('Cannot project to variables not in the constraint.') idxs = [i for (i, v) in enumerate(self.variables) if (v in variables)] configurations = frozenset((tuple((config[i] for i in idxs)) for config in self.configurations)) variables = tuple((self.variables[i] for i in idxs)) return self.from_configurations(configurations, variables, self.vartype)
Create a new constraint that is the projection onto a subset of the variables. Args: variables (iterable): Subset of the constraint's variables. Returns: :obj:`.Constraint`: A new constraint over a subset of the variables. Examples: >>> import dwavebinarycsp ... >>> const = dwavebinarycsp.Constraint.from_configurations([(0, 0), (0, 1)], ... ['a', 'b'], ... dwavebinarycsp.BINARY) >>> proj = const.projection(['a']) >>> proj.variables ['a'] >>> proj.configurations {(0,)}
codesearchnet
def top_and_tail(a): if np.all(np.isnan(a)): return np.array([]) nans = np.where(~np.isnan(a))[0] last = None if nans[-1]+1 == a.size else nans[-1]+1 return a[nans[0]:last]
Remove the NaNs from the top and tail (only) of a well log. Args: a (ndarray): An array. Returns: ndarray: The top and tailed array.
juraj-google-style
def restore_op(self, filename_tensor, saveable, preferred_shard): tensors = [] for spec in saveable.specs: tensors.append(io_ops.restore_v2(filename_tensor, [spec.name], [spec.slice_spec], [spec.dtype])[0]) return tensors
Create ops to restore 'saveable'. This is intended to be overridden by subclasses that want to generate different Ops. Args: filename_tensor: String Tensor. saveable: A BaseSaverBuilder.SaveableObject object. preferred_shard: Int. Shard to open first when loading a sharded file. Returns: A list of Tensors resulting from reading 'saveable' from 'filename'.
github-repos
def transform(self, X, y=None): word_ids = [self._word_vocab.doc2id(doc) for doc in X] word_ids = pad_sequences(word_ids, padding='post') char_ids = [[self._char_vocab.doc2id(w) for w in doc] for doc in X] char_ids = pad_nested_sequences(char_ids) character_ids = batch_to_ids(X) elmo_embeddings = self._elmo(character_ids)['elmo_representations'][1] elmo_embeddings = elmo_embeddings.detach().numpy() features = [word_ids, char_ids, elmo_embeddings] if y is not None: y = [self._label_vocab.doc2id(doc) for doc in y] y = pad_sequences(y, padding='post') y = to_categorical(y, self.label_size).astype(int) y = y if len(y.shape) == 3 else np.expand_dims(y, axis=0) return features, y else: return features
Transform documents to document ids. Uses the vocabulary learned by fit. Args: X : iterable an iterable which yields either str, unicode or file objects. y : iterabl, label strings. Returns: features: document id matrix. y: label id matrix.
juraj-google-style
def add_to_screen(self, screen_width, screen): for lineno, fields in enumerate(self.line_fields): for left, field in self.compute_positions(screen_width, fields): logger.debug( "Adding field %s to screen %s at x=%d->%d, y=%d", field, screen.ref, left, left + field.width - 1, 1 + lineno, ) self.widgets[field] = field.add_to_screen(screen, left, 1 + lineno) self.register_hooks(field)
Add the pattern to a screen. Also fills self.widgets. Args: screen_width (int): the width of the screen screen (lcdprod.Screen): the screen to fill.
juraj-google-style
def _ParseInfo2Record( self, parser_mediator, file_object, record_offset, record_size): record_data = self._ReadData(file_object, record_offset, record_size) record_map = self._GetDataTypeMap('recycler_info2_file_entry') try: record = self._ReadStructureFromByteStream( record_data, record_offset, record_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map record data at offset: 0x{0:08x} with error: ' '{1!s}').format(record_offset, exception)) codepage = parser_mediator.codepage or 'ascii' ascii_filename = record.original_filename.split(b'\x00')[0] try: ascii_filename = ascii_filename.decode(codepage) except UnicodeDecodeError: ascii_filename = ascii_filename.decode(codepage, errors='replace') parser_mediator.ProduceExtractionWarning( 'unable to decode original filename.') unicode_filename = None if record_size > 280: record_offset += 280 utf16_string_map = self._GetDataTypeMap( 'recycler_info2_file_entry_utf16le_string') try: unicode_filename = self._ReadStructureFromByteStream( record_data[280:], record_offset, utf16_string_map) except (ValueError, errors.ParseError) as exception: raise errors.ParseError(( 'Unable to map record data at offset: 0x{0:08x} with error: ' '{1!s}').format(record_offset, exception)) unicode_filename = unicode_filename.rstrip('\x00') if record.deletion_time == 0: date_time = dfdatetime_semantic_time.SemanticTime('Not set') else: date_time = dfdatetime_filetime.Filetime( timestamp=record.deletion_time) event_data = WinRecycleBinEventData() event_data.drive_number = record.drive_number event_data.original_filename = unicode_filename or ascii_filename event_data.file_size = record.original_file_size event_data.offset = record_offset event_data.record_index = record.index if ascii_filename != unicode_filename: event_data.short_filename = ascii_filename event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_DELETED) parser_mediator.ProduceEventWithEventData(event, event_data)
Parses an INFO-2 record. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. file_object (dfvfs.FileIO): file-like object. record_offset (int): record offset. record_size (int): record size. Raises: ParseError: if the record cannot be read.
juraj-google-style
def merge_styles(inline_style, new_styles, classes, remove_unset_properties=False): styles = OrderedDict([('', OrderedDict())]) for pc in set(classes): styles[pc] = OrderedDict() for (i, style) in enumerate(new_styles): for (k, v) in style: styles[classes[i]][k] = v if inline_style: for (k, v) in csstext_to_pairs(inline_style): styles[''][k] = v normal_styles = [] pseudo_styles = [] for (pseudoclass, kv) in styles.items(): if remove_unset_properties: kv = OrderedDict(((k, v) for (k, v) in kv.items() if (not (v.lower() == 'unset')))) if (not kv): continue if pseudoclass: pseudo_styles.append(('%s{%s}' % (pseudoclass, '; '.join((('%s:%s' % (k, v)) for (k, v) in kv.items()))))) else: normal_styles.append('; '.join((('%s:%s' % (k, v)) for (k, v) in kv.items()))) if pseudo_styles: all_styles = (([('{%s}' % ''.join(normal_styles))] + pseudo_styles) if normal_styles else pseudo_styles) else: all_styles = normal_styles return ' '.join(all_styles).strip()
This will merge all new styles where the order is important The last one will override the first When that is done it will apply old inline style again The old inline style is always important and override all new ones. The inline style must be valid. Args: inline_style(str): the old inline style of the element if there is one new_styles: a list of new styles, each element should be a list of tuple classes: a list of classes which maps new_styles, important! remove_unset_properties(bool): Allow us to remove certain CSS properties with rules that set their value to 'unset' Returns: str: the final style
codesearchnet
def create_run(cmd, project, exp, grp): from benchbuild.utils import schema as s session = s.Session() run = s.Run(command=str(cmd), project_name=project.name, project_group=project.group, experiment_name=exp, run_group=str(grp), experiment_group=project.experiment.id) session.add(run) session.commit() return (run, session)
Create a new 'run' in the database. This creates a new transaction in the database and creates a new run in this transaction. Afterwards we return both the transaction as well as the run itself. The user is responsible for committing it when the time comes. Args: cmd: The command that has been executed. prj: The project this run belongs to. exp: The experiment this run belongs to. grp: The run_group (uuid) we blong to. Returns: The inserted tuple representing the run and the session opened with the new run. Don't forget to commit it at some point.
codesearchnet
def mount(self, app=None): for endpoint in self._routes: endpoint.register_app(app) return self
Mounts all registered routes to a bottle.py application instance. Args: app (instance): A `bottle.Bottle()` application instance. Returns: The Router instance (for chaining purposes).
juraj-google-style
def __init__(self, text: str, sn: "DataNode"): super().__init__(text) self.schema_node = sn
Extend the superclass method. Args: sn: Schema node from which the path starts.
juraj-google-style
def plot_chmap(cube, kidid, ax=None, **kwargs): if (ax is None): ax = plt.gca() index = np.where((cube.kidid == kidid))[0] if (len(index) == 0): raise KeyError('Such a kidid does not exist.') index = int(index) im = ax.pcolormesh(cube.x, cube.y, cube[(:, :, index)].T, **kwargs) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_title('intensity map ch return im
Plot an intensity map. Args: cube (xarray.DataArray): Cube which the spectrum information is included. kidid (int): Kidid. ax (matplotlib.axes): Axis the figure is plotted on. kwargs (optional): Plot options passed to ax.imshow().
codesearchnet
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices): summed_grad, unique_indices = _deduplicate_indexed_slices(values=grad, indices=indices) return self._resource_apply_sparse(summed_grad, handle, unique_indices)
Add ops to apply sparse gradients to `handle`, with repeated indices. Optimizers which override this method must deal with repeated indices. See the docstring of `_apply_sparse_duplicate_indices` for details. By default the correct behavior, to sum non-unique indices and their associated gradients, is enforced by first pre-processing `grad` and `indices` and passing them on to `_resource_apply_sparse`. Optimizers which deal correctly with duplicate indices may instead override this method to avoid the overhead of summing. Args: grad: a `Tensor` representing the gradient for the affected indices. handle: a `Tensor` of dtype `resource` which points to the variable to be updated. indices: a `Tensor` of integral type representing the indices for which the gradient is nonzero. Indices may be repeated. Returns: An `Operation` which updates the value of the variable.
github-repos
def calculate_embedding(self, batch_image_bytes): return self.tf_session.run( self.embedding, feed_dict={self.input_jpeg: batch_image_bytes})
Get the embeddings for a given JPEG image. Args: batch_image_bytes: As if returned from [ff.read() for ff in file_list]. Returns: The Inception embeddings (bottleneck layer output)
juraj-google-style
def _convert_concrete_functions_to_saved_model(self, output_dir): if len(self._funcs) == 0: raise ValueError('No ConcreteFunction is specified.') if not self.experimental_lower_to_saved_model: return (None, None, None) if not self._trackable_obj or isinstance(self._trackable_obj, (_function.ConcreteFunction, _def_function.Function)): return (None, None, None) signatures = {} signature_keys = [] try: if len(self._funcs) == 1: signatures[_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = self._funcs[0] signature_keys = [_signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] else: for func in self._funcs: signatures[func.graph.name] = func signature_keys.append(func.graph.name) _save.save(self._trackable_obj, output_dir, signatures=signatures, options=_save_options.SaveOptions(save_debug_info=True)) except Exception: return (None, None, None) self.saved_model_dir = output_dir self._saved_model_tags = set([_tag_constants.SERVING]) self._saved_model_exported_names = signature_keys self._parse_saved_model_args(always_enable_saved_model_import=True) if self.saved_model_dir: graph_def, input_tensors, output_tensors = self._load_saved_model(self.saved_model_dir, self._saved_model_tags) self._trackable_obj = _load(self.saved_model_dir, self._saved_model_tags) return (graph_def, input_tensors, output_tensors) return (None, None, None)
Save concrete functions to the SavedModel format. Args: output_dir: The output directory to save the SavedModel. Returns: graph_def: The frozen GraphDef. input_tensors: List of input tensors. output_tensors: List of output tensors.
github-repos
def series_with_permutation(self, other): combined_permutation = tuple([self.permutation[p] for p in other.permutation]) return CPermutation.create(combined_permutation)
Compute the series product with another channel permutation circuit Args: other (CPermutation): Returns: Circuit: The composite permutation circuit (could also be the identity circuit for n channels)
codesearchnet
def sheets_clear(config, auth, sheet_url_or_name, sheet_tab, sheet_range): if config.verbose: print('SHEETS CLEAR', sheet_url_or_name, sheet_tab, sheet_range) sheet_id = sheets_id(config, auth, sheet_url_or_name) if sheet_id: API_Sheets(config, auth).spreadsheets().values().clear(spreadsheetId=sheet_id, range=sheets_tab_range(sheet_tab, sheet_range), body={}).execute() else: raise ValueError('Sheet does not exist for %s: %s' % (config, auth, sheet_url_or_name))
Clear a sheet in the specified range. Args: config - see starthinker/util/configuration.py auth - user or service url_or_name - one of: URL, document title, or id sheet_tab - name of tab to get id for sheet_range - A1 notation or blank if whole sheet No Return
github-repos
def site_specific_nn_occupation( self ): to_return = { l : 0 for l in set( ( site.label for site in self.p_neighbours ) ) } for site in self.p_neighbours: if site.is_occupied: to_return[ site.label ] += 1 return to_return
Returns the number of occupied nearest neighbour sites, classified by site type. Args: None Returns: (Dict(Str:Int)): Dictionary of nearest-neighbour occupied site numbers, classified by site label, e.g. { 'A' : 2, 'B' : 1 }.
juraj-google-style
def _parse_method_block_line(self, instrumentation_block, line): if line.startswith(_InstrumentationStructurePrefixes.STATUS): instrumentation_block.set_key(_InstrumentationStructurePrefixes.STATUS, line) return instrumentation_block elif line.startswith(_InstrumentationStructurePrefixes.STATUS_CODE): instrumentation_block.set_status_code(line) return self._transition_instrumentation_block(instrumentation_block) elif line.startswith(_InstrumentationStructurePrefixes.RESULT): instrumentation_block.set_key(_InstrumentationStructurePrefixes.RESULT, line) return self._parse_result_line(self._transition_instrumentation_block(instrumentation_block, new_state=_InstrumentationBlockStates.RESULT), line) else: instrumentation_block.add_value(line) return instrumentation_block
Parses the instrumnetation method block's line. Args: instrumentation_block: _InstrumentationBlock, the current instrumentation method block. line: string, the raw instrumentation output line to parse. Returns: The next instrumentation block, which should be used to continue parsing instrumentation output.
github-repos
def pie(self, key='wall_time', minfract=0.05, ax=None, **kwargs): (ax, fig, plt) = get_ax_fig_plt(ax=ax) ax.axis('equal') (labels, vals) = self.names_and_values(key, minfract=minfract) ax.pie(vals, explode=None, labels=labels, autopct='%1.1f%%', shadow=True) return fig
Plot pie chart for this timer. Args: key: Keyword used to extract data from the timer. minfract: Don't show sections whose relative weight is less that minfract. ax: matplotlib :class:`Axes` or None if a new figure should be created. Returns: `matplotlib` figure
codesearchnet
def _process_event(self, event): if (not event.is_directory and not event.src_path.endswith(BATCH_EXTENSION)): self._logger.info('Detected file change: %s', event.src_path) self._batch.process_file(event.src_path)
Process received events. Process events received, applying normalization for those events referencing a new or changed file and only if it's not the result of a previous normalization. Args: event: Event to process.
juraj-google-style
def get_nac_eigendisplacements_along_dir(self, direction): versor = [i / np.linalg.norm(direction) for i in direction] for d, e in self.nac_eigendisplacements: if np.allclose(versor, d): return e return None
Returns the nac_eigendisplacements for the given direction (not necessarily a versor). None if the direction is not present or nac_eigendisplacements has not been calculated. Args: direction: the direction as a list of 3 elements Returns: the eigendisplacements as a numpy array of complex numbers with shape (3*len(structure), len(structure), 3). None if not found.
juraj-google-style
def csv_to_num_matrix(csv_file_path): mtx = [] with open(csv_file_path) as csv_data_file: for row in csv_data_file: mtx.append([float(val) for val in row.split(',')]) return mtx
Load a CSV file consisting only of numbers into a Python matrix of floats. Args: csv_file_path: Full path to a valid CSV file (e.g. c:/ladybug/test.csv)
juraj-google-style
def get_storage(request): storage_model = oauth2_settings.storage_model user_property = oauth2_settings.storage_model_user_property credentials_property = oauth2_settings.storage_model_credentials_property if storage_model: (module_name, class_name) = storage_model.rsplit('.', 1) module = importlib.import_module(module_name) storage_model_class = getattr(module, class_name) return storage.DjangoORMStorage(storage_model_class, user_property, request.user, credentials_property) else: return dictionary_storage.DictionaryStorage(request.session, key=_CREDENTIALS_KEY)
Gets a Credentials storage object provided by the Django OAuth2 Helper object. Args: request: Reference to the current request object. Returns: An :class:`oauth2.client.Storage` object.
codesearchnet
def bitwise_left_shift(x, y): if any_symbolic_tensors((x, y)): return BitwiseLeftShift().symbolic_call(x, y) return backend.numpy.bitwise_left_shift(x, y)
Shift the bits of an integer to the left. Bits are shifted to the left by appending `y` 0s at the right of `x`. Since the internal representation of numbers is in binary format, this operation is equivalent to multiplying `x` by `2**y`. Args: x: Input integer tensor. y: Input integer tensor. Returns: Result tensor.
github-repos
def showAddColumnDialog(self, triggered): if triggered: dialog = AddAttributesDialog(self) dialog.accepted.connect(self.addColumn) dialog.rejected.connect(self.uncheckButton) dialog.show()
Display the dialog to add a column to the model. This method is also a slot. Args: triggered (bool): If the corresponding button was activated, the dialog will be created and shown.
juraj-google-style
async def populate_jsone_context(chain, parent_link, decision_link, tasks_for): task_ids = {'default': parent_link.task_id, 'decision': decision_link.task_id} source_url = get_source_url(decision_link) project = get_and_check_project(chain.context.config['valid_vcs_rules'], source_url) log.debug('task_ids: {}'.format(task_ids)) jsone_context = {'now': parent_link.task['created'], 'as_slugid': (lambda x: task_ids.get(x, task_ids['default'])), 'tasks_for': tasks_for, 'repository': {'url': get_repo(decision_link.task, decision_link.context.config['source_env_prefix']), 'project': project}, 'ownTaskId': parent_link.task_id, 'taskId': None} if (chain.context.config['cot_product'] in ('mobile', 'application-services')): if (tasks_for == 'github-release'): jsone_context.update((await _get_additional_github_releases_jsone_context(decision_link))) elif (tasks_for == 'cron'): jsone_context.update(_get_additional_git_cron_jsone_context(decision_link)) elif (tasks_for == 'github-pull-request'): jsone_context.update((await _get_additional_github_pull_request_jsone_context(decision_link))) elif (tasks_for == 'github-push'): jsone_context.update((await _get_additional_github_push_jsone_context(decision_link))) else: raise CoTError('Unknown tasks_for "{}" for cot_product "mobile"!'.format(tasks_for)) else: jsone_context['repository']['level'] = (await get_scm_level(chain.context, project)) if (tasks_for == 'action'): jsone_context.update((await _get_additional_hg_action_jsone_context(parent_link, decision_link))) elif (tasks_for == 'hg-push'): jsone_context.update((await _get_additional_hg_push_jsone_context(parent_link, decision_link))) elif (tasks_for == 'cron'): jsone_context.update((await _get_additional_hg_cron_jsone_context(parent_link, decision_link))) else: raise CoTError('Unknown tasks_for {}!'.format(tasks_for)) log.debug('{} json-e context:'.format(parent_link.name)) log.debug(pprint.pformat(jsone_context)) return jsone_context
Populate the json-e context to rebuild ``parent_link``'s task definition. This defines the context that `.taskcluster.yml` expects to be rendered with. See comments at the top of that file for details. Args: chain (ChainOfTrust): the chain of trust to add to. parent_link (LinkOfTrust): the parent link to test. decision_link (LinkOfTrust): the parent link's decision task link. tasks_for (str): the reason the parent link was created (cron, hg-push, action) Raises: CoTError, KeyError, ValueError: on failure. Returns: dict: the json-e context.
codesearchnet
def add_state_sensors(self, agent_name, sensors): if isinstance(sensors, list): for sensor in sensors: self.add_state_sensors(agent_name, sensor) else: if (agent_name not in self._sensor_map): self._sensor_map[agent_name] = dict() self._sensor_map[agent_name][sensors] = self._client.malloc(((agent_name + '_') + Sensors.name(sensors)), Sensors.shape(sensors), Sensors.dtype(sensors))
Adds a sensor to a particular agent. This only works if the world you are running also includes that particular sensor on the agent. Args: agent_name (str): The name of the agent to add the sensor to. sensors (:obj:`HolodeckSensor` or list of :obj:`HolodeckSensor`): Sensors to add to the agent. Should be objects that inherit from :obj:`HolodeckSensor`.
codesearchnet
def __init__(self, server): self.ready = False self.server = server self.requests_seen = 0 self.bytes_read = 0 self.bytes_written = 0 self.start_time = None self.work_time = 0 self.stats = { 'Requests': lambda s: self.requests_seen + ( self.start_time is None and trueyzero or self.conn.requests_seen ), 'Bytes Read': lambda s: self.bytes_read + ( self.start_time is None and trueyzero or self.conn.rfile.bytes_read ), 'Bytes Written': lambda s: self.bytes_written + ( self.start_time is None and trueyzero or self.conn.wfile.bytes_written ), 'Work Time': lambda s: self.work_time + ( self.start_time is None and trueyzero or time.time() - self.start_time ), 'Read Throughput': lambda s: s['Bytes Read'](s) / ( s['Work Time'](s) or 1e-6 ), 'Write Throughput': lambda s: s['Bytes Written'](s) / ( s['Work Time'](s) or 1e-6 ), } threading.Thread.__init__(self)
Initialize WorkerThread instance. Args: server (cheroot.server.HTTPServer): web server object receiving this request
juraj-google-style
def wait_for_registration(self, processor_type): with self._condition: self._condition.wait_for((lambda : ((processor_type in self) or self._cancelled_event.is_set()))) if self._cancelled_event.is_set(): raise WaitCancelledException()
Waits for a particular processor type to register or until is_cancelled is True. is_cancelled cannot be part of this class since we aren't cancelling all waiting for a processor_type, but just this particular wait. Args: processor_type (ProcessorType): The family, and version of the transaction processor. Returns: None
codesearchnet
def set_member_roles(self, guild_id: int, member_id: int, roles: List[int]): self._query(f'guilds/{guild_id}/members/{member_id}', 'PATCH', {'roles': roles}, expected_status=204)
Set the member's roles This method takes a list of **role ids** that you want the user to have. This method will **overwrite** all of the user's current roles with the roles in the passed list of roles. When calling this method, be sure that the list of roles that you're setting for this user is complete, not just the roles that you want to add or remove. For assistance in just adding or just removing roles, set the ``add_member_roles`` and ``remove_member_roles`` methods. Args: guild_id: snowflake id of the guild member_id: snowflake id of the member roles: list of snowflake ids of roles to set
codesearchnet
def _finalize_job(cls, mapreduce_spec, mapreduce_state): config = util.create_datastore_write_config(mapreduce_spec) queue_name = util.get_queue_name(mapreduce_spec.params.get(model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE)) done_callback = mapreduce_spec.params.get(model.MapreduceSpec.PARAM_DONE_CALLBACK) done_task = None if done_callback: done_task = taskqueue.Task(url=done_callback, headers=util._get_task_headers(mapreduce_spec.mapreduce_id, util.CALLBACK_MR_ID_TASK_HEADER), method=mapreduce_spec.params.get('done_callback_method', 'POST')) @db.transactional(retries=5) def _put_state(): 'Helper to store state.' fresh_state = model.MapreduceState.get_by_job_id(mapreduce_spec.mapreduce_id) if (not fresh_state.active): logging.warning('Job %s is not active. Looks like spurious task execution. Dropping task.', mapreduce_spec.mapreduce_id) return mapreduce_state.put(config=config) if (done_task and (not _run_task_hook(mapreduce_spec.get_hooks(), 'enqueue_done_task', done_task, queue_name))): done_task.add(queue_name, transactional=True) _put_state() logging.info("Final result for job '%s' is '%s'", mapreduce_spec.mapreduce_id, mapreduce_state.result_status) cls._clean_up_mr(mapreduce_spec)
Finalize job execution. Invokes done callback and save mapreduce state in a transaction, and schedule necessary clean ups. This method is idempotent. Args: mapreduce_spec: an instance of MapreduceSpec mapreduce_state: an instance of MapreduceState
codesearchnet
def absolute_name(self, depth=0): (node, node_depth) = (self, self.depth) if (depth < 1): depth = node_depth while ((node_depth > depth) and (node.package is not None)): node = node.package node_depth -= 1 names = [] while (node is not None): names.append(node.name) node = node.package return '.'.join(reversed(names))
Return the absolute name of the node. Concatenate names from root to self within depth. Args: depth (int): maximum depth to go to. Returns: str: absolute name of the node (until given depth is reached).
codesearchnet
def build(cls: Type[AN], node: ast.stmt) -> List[AN]: if node_is_result_assignment(node): return [cls(node, ActNodeType.result_assignment)] if node_is_pytest_raises(node): return [cls(node, ActNodeType.pytest_raises)] if node_is_unittest_raises(node): return [cls(node, ActNodeType.unittest_raises)] token = node.first_token if token.line.strip().endswith(' return [cls(node, ActNodeType.marked_act)] if isinstance(node, ast.With): return cls.build_body(node.body) return []
Starting at this ``node``, check if it's an act node. If it's a context manager, recurse into child nodes. Returns: List of all act nodes found.
codesearchnet
def _set_read_only_resource_inputs_attr(op: ops.Operation, branch_graphs): read_only_indices = set(range(len(op.inputs))) for branch_graph in branch_graphs: if not read_only_indices: break branch_read_only_indices = acd.get_read_only_resource_input_indices_graph(branch_graph) read_only_indices = read_only_indices.intersection(branch_read_only_indices) ops.set_int_list_attr(op, acd.READ_ONLY_RESOURCE_INPUTS_ATTR, sorted(read_only_indices))
Sets the list of resource inputs which are read-only. This is used by AutomaticControlDependencies. Args: op: While Operation. branch_graphs: List of branch FuncGraphs.
github-repos
def create_task(*args, **kwargs) -> asyncio.Task: tg = task_group() if tg is None: task = asyncio.create_task(*args, **kwargs) _without_context_background_tasks.add(task) task.add_done_callback(_without_context_background_tasks.discard) return task return tg.create_task(*args, **kwargs)
Creates a task that uses the context TaskGroup. If no context is available then `asyncio.create_task` will be used. Args: *args: Positional arguments to pass to `asyncio.create_task`. **kwargs: Keyword arguments to pass to `asyncio.create_task`. Returns: An asyncio task.
github-repos
def divide_to_patches(image: Union[np.array, 'torch.Tensor'], patch_size: int) -> list[Union[np.array, 'torch.Tensor']]: patches = [] height, width = get_image_size(image, channel_dim=ChannelDimension.FIRST) for i in range(0, height, patch_size): for j in range(0, width, patch_size): patch = image[:, i:i + patch_size, j:j + patch_size] patches.append(patch) return patches
Divides an image into patches of a specified size. Args: image (`Union[np.array, "torch.Tensor"]`): The input image. patch_size (`int`): The size of each patch. Returns: list: A list of Union[np.array, "torch.Tensor"] representing the patches.
github-repos
def _AddProvidesEdges(self, rdf_artifact): for attribute in rdf_artifact.provides: self._AddEdge(rdf_artifact.name, attribute)
Add an edge for every attribute the given artifact provides. This method adds a directed edge from the artifact node to every attribute this artifact provides. Args: rdf_artifact: The artifact object.
codesearchnet
def cos(cls, x: 'TensorFluent') -> 'TensorFluent': return cls._unary_op(x, tf.cos, tf.float32)
Returns a TensorFluent for the cos function. Args: x: The input fluent. Returns: A TensorFluent wrapping the cos function.
codesearchnet
def find_sorted_task_dependencies(task, task_name, task_id): log.info("find_sorted_task_dependencies {} {}".format(task_name, task_id)) cot_input_dependencies = [ _craft_dependency_tuple(task_name, task_type, task_id) for task_type, task_id in task['extra'].get('chainOfTrust', {}).get('inputs', {}).items() ] upstream_artifacts_dependencies = [ _craft_dependency_tuple(task_name, artifact_dict['taskType'], artifact_dict['taskId']) for artifact_dict in task.get('payload', {}).get('upstreamArtifacts', []) ] dependencies = [*cot_input_dependencies, *upstream_artifacts_dependencies] dependencies = _sort_dependencies_by_name_then_task_id(dependencies) parent_task_id = get_parent_task_id(task) or get_decision_task_id(task) parent_task_type = 'parent' parent_tuple = _craft_dependency_tuple(task_name, parent_task_type, parent_task_id) dependencies.insert(0, parent_tuple) log.info('found dependencies: {}'.format(dependencies)) return dependencies
Find the taskIds of the chain of trust dependencies of a given task. Args: task (dict): the task definition to inspect. task_name (str): the name of the task, for logging and naming children. task_id (str): the taskId of the task. Returns: list: tuples associating dependent task ``name`` to dependent task ``taskId``.
juraj-google-style
def resolve_class(classref): if classref is None: return None elif isinstance(classref, six.class_types): return classref elif isinstance(classref, six.string_types): return import_class(classref) else: raise ValueError("Unable to resolve class for '%s'" % classref)
Attempt to return a Python class for the input class reference. If `classref` is a class or None, return it. If `classref` is a python classpath (e.g., "foo.bar.MyClass") import the class and return it. Args: classref: A fully-qualified Python path to class, or a Python class. Returns: A class.
juraj-google-style
def handle_one_of(schema, field, validator, parent_schema): if validator.choices: schema['enum'] = list(validator.choices) schema['enumNames'] = list(validator.labels) return schema
Adds the validation logic for ``marshmallow.validate.OneOf`` by setting the JSONSchema `enum` property to the allowed choices in the validator. Args: schema (dict): The original JSON schema we generated. This is what we want to post-process. field (fields.Field): The field that generated the original schema and who this post-processor belongs to. validator (marshmallow.validate.OneOf): The validator attached to the passed in field. parent_schema (marshmallow.Schema): The Schema instance that the field belongs to. Returns: dict: A, possibly, new JSON Schema that has been post processed and altered.
codesearchnet
def get_card(self, card_id, **query_params): card_json = self.fetch_json(uri_path=((self.base_uri + '/cards/') + card_id)) return self.create_card(card_json)
Get a Card for a given card id. Returns a Card object. Returns: Card: The card with the given card_id
codesearchnet
def report_list(config, auth): for query in API_DBM(config, auth, iterate=True).queries().list().execute(): yield query
Lists all the DBM report configurations for the current credentials. Args: * auth: (string) Either user or service. Returns: * Iterator of JSONs.
github-repos
def check_data_type(self): metadata_type = self.column_metadata.get('type') if self.type != metadata_type and metadata_type not in self.type: raise ValueError('Types of transformer don\'t match')
Check the type of the transformer and column match. Args: column_metadata(dict): Metadata of the column. Raises a ValueError if the types don't match
juraj-google-style
def deserialize(self, stamp_token, serialized_proto): return gen_boosted_trees_ops.boosted_trees_deserialize_ensemble(self.resource_handle, stamp_token, serialized_proto)
Deserialize the input proto and resets the ensemble from it. Args: stamp_token: int64 scalar Tensor to denote the stamp of the resource. serialized_proto: string scalar Tensor of the serialized proto. Returns: Operation (for dependencies).
github-repos
def from_pb(cls, pb): obj = cls._from_pb(pb) obj._pb = pb return obj
Instantiate the object from a protocol buffer. Args: pb (protobuf) Save a reference to the protocol buffer on the object.
codesearchnet
def get_regularization_losses(scope=None): return ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES, scope)
Gets the list of regularization losses. Args: scope: An optional scope name for filtering the losses to return. Returns: A list of regularization losses as Tensors.
github-repos
def view_page(name=None): if (request.method == 'POST'): if (name is None): if (len(request.forms.filename) > 0): name = request.forms.filename if (name is not None): filename = '{0}.rst'.format(name) file_handle = open(filename, 'w') file_handle.write(request.forms.content.encode('utf-8')) file_handle.close() add_file_to_repo(filename) commit(filename) response.set_header('Cache-control', 'no-cache') response.set_header('Pragma', 'no-cache') if (name is None): index_files = glob.glob('./[Ii][Nn][Dd][Ee][Xx].rst') if (len(index_files) == 0): return view_meta_index() else: name = index_files[0][2:(- 4)] files = glob.glob('{0}.rst'.format(name)) if (len(files) > 0): file_handle = open(files[0], 'r') html_body = publish_parts(file_handle.read(), writer=AttowikiWriter(), settings=None, settings_overrides=None)['html_body'] history = commit_history('{0}.rst'.format(name)) return template('page', type='view', name=name, extended_name=None, is_repo=check_repo(), history=history, gitref=None, content=html_body) else: return static_file(name, '')
Serve a page name. .. note:: this is a bottle view * if the view is called with the POST method, write the new page content to the file, commit the modification and then display the html rendering of the restructured text file * if the view is called with the GET method, directly display the html rendering of the restructured text file Keyword Arguments: :name: (str) -- name of the rest file (without the .rst extension) OPTIONAL if no filename is given, first try to find a "index.rst" file in the directory and serve it. If not found, serve the meta page __index__ Returns: bottle response object
codesearchnet