code stringlengths 20 4.93k | docstring stringlengths 33 1.27k | source stringclasses 3 values |
|---|---|---|
def text(self, value):
self._text = value
self.timestamps.edited = datetime.datetime.utcnow()
self.touch(True) | Set the text value.
Args:
value (str): Text value. | juraj-google-style |
def is_outlier(df, item_id, segment_id, price):
if (segment_id, item_id) not in df.index:
return False
mean = df.loc[(segment_id, item_id)]['mean']
std = df.loc[(segment_id, item_id)]['std']
return gaussian_outlier.is_outlier(
x=price, mean=mean, standard_deviation=std
) | Verify if a item is an outlier compared to the
other occurrences of the same item, based on his price.
Args:
item_id: idPlanilhaItens
segment_id: idSegmento
price: VlUnitarioAprovado | juraj-google-style |
def reindex(self, kdims=[], force=False):
if not isinstance(kdims, list):
kdims = [kdims]
kdims = [self.get_dimension(kd, strict=True) for kd in kdims]
dropped = [kd for kd in self.kdims if kd not in kdims]
if dropped:
raise ValueError("DynamicMap does not allow dropping dimensions, "
"reindex may only be used to reorder dimensions.")
return super(DynamicMap, self).reindex(kdims, force) | Reorders key dimensions on DynamicMap
Create a new object with a reordered set of key dimensions.
Dropping dimensions is not allowed on a DynamicMap.
Args:
kdims: List of dimensions to reindex the mapping with
force: Not applicable to a DynamicMap
Returns:
Reindexed DynamicMap | juraj-google-style |
def _run_eager_benchmark(self, iterable, iters, warmup):
deltas = []
if not context.executing_eagerly():
raise RuntimeError('Eager mode benchmarking is not supported in graph mode.')
for _ in range(iters):
if warmup:
iterator = iter(iterable)
next(iterator)
iterator = iter(iterable)
start = time.time()
next(iterator)
end = time.time()
deltas.append(end - start)
return np.median(deltas) | Benchmark the iterable in eager mode.
Runs the iterable `iters` times. In each iteration, the benchmark measures
the time it takes to go execute the iterable.
Args:
iterable: The tf op or tf.data Dataset to benchmark.
iters: Number of times to repeat the timing.
warmup: If true, warms up the session caches by running an untimed run.
Returns:
A float, representing the median time (with respect to `iters`)
it takes for the iterable to be executed `iters` num of times.
Raises:
RuntimeError: When executed in graph mode. | github-repos |
def key_validation_check(tweet_keys_list, superset_keys, minset_keys):
tweet_keys = set(tweet_keys_list)
minset_overlap = tweet_keys & minset_keys
if minset_overlap != minset_keys:
raise UnexpectedFormatError("keys ({}) missing from Tweet (Public API data is not supported)"
.format(minset_keys - tweet_keys))
unexpected_keys = tweet_keys - superset_keys
if len(unexpected_keys) > 0:
raise UnexpectedFormatError("Unexpected keys ({}) are in this Tweet"
.format(unexpected_keys))
return 0 | Validates the keys present in a Tweet.
Args:
tweet_keys_list (list): the keys present in a tweet
superset_keys (set): the set of all possible keys for a tweet
minset_keys (set): the set of minimal keys expected in a tweet.
Returns:
0 if no errors
Raises:
UnexpectedFormatError on any mismatch of keys. | juraj-google-style |
def __init__(self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool=False):
thresholds = thresholds[:]
if thresholds[0] < 0:
raise ValueError('Thresholds should be positive')
thresholds.insert(0, -float('inf'))
thresholds.append(float('inf'))
if not all((low <= high for low, high in zip(thresholds[:-1], thresholds[1:]))):
raise ValueError('Thresholds should be sorted.')
if not all((l in [-1, 0, 1] for l in labels)):
raise ValueError('All labels should be either -1, 0 or 1')
if len(labels) != len(thresholds) - 1:
raise ValueError('Number of labels should be equal to number of thresholds - 1')
self.thresholds = thresholds
self.labels = labels
self.allow_low_quality_matches = allow_low_quality_matches | Args:
thresholds (`list[float]`):
A list of thresholds used to stratify predictions into levels.
labels (`list[int`):
A list of values to label predictions belonging at each level. A label can be one of {-1, 0, 1}
signifying {ignore, negative class, positive class}, respectively.
allow_low_quality_matches (`bool`, *optional*, defaults to `False`):
If `True`, produce additional matches for predictions with maximum match quality lower than
high_threshold. See `set_low_quality_matches_` for more details.
For example,
thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and
thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will
be marked with -1 and thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and
thus will be considered as true positives. | github-repos |
def signature_type(self):
if (not self.mardata.signatures):
return None
for sig in self.mardata.signatures.sigs:
if (sig.algorithm_id == 1):
return 'sha1'
elif (sig.algorithm_id == 2):
return 'sha384'
else:
return 'unknown' | Return the signature type used in this MAR.
Returns:
One of None, 'unknown', 'sha1', or 'sha384' | codesearchnet |
def resize(self, image: np.ndarray, size: Dict[str, int], size_divisor: int=0, resample: PILImageResampling=PILImageResampling.BILINEAR, data_format=None, input_data_format: Optional[Union[str, ChannelDimension]]=None, **kwargs) -> np.ndarray:
max_size = kwargs.pop('max_size', None)
size = get_size_dict(size, max_size=max_size, default_to_square=False)
if 'shortest_edge' in size and 'longest_edge' in size:
size, max_size = (size['shortest_edge'], size['longest_edge'])
elif 'height' in size and 'width' in size:
size = (size['height'], size['width'])
max_size = None
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got {size.keys()}.")
size = get_mask2former_resize_output_image_size(image=image, size=size, max_size=max_size, size_divisor=size_divisor, default_to_square=False, input_data_format=input_data_format)
image = resize(image, size=size, resample=resample, data_format=data_format, input_data_format=input_data_format, **kwargs)
return image | Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`np.ndarray`):
Image to resize.
size (`Dict[str, int]`):
The size of the output image.
size_divisor (`int`, *optional*, defaults to 0):
If `size_divisor` is given, the output image size will be divisible by the number.
resample (`PILImageResampling` resampling filter, *optional*, defaults to `PILImageResampling.BILINEAR`):
Resampling filter to use when resizing the image.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred. | github-repos |
def GetAPFSVolumeByPathSpec(self, path_spec):
volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec)
if volume_index is None:
return None
return self._fsapfs_container.get_volume(volume_index) | Retrieves an APFS volume for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
pyfsapfs.volume: an APFS volume or None if not available. | juraj-google-style |
def transform(self, new_frame):
steps = self.__class__.steps(new_frame)
orbit = self.orbit
for (_from, _to) in steps:
from_obj = _from(self.date, orbit)
direct = ('_to_%s' % _to)
if hasattr(from_obj, direct):
(rotation, offset) = getattr(from_obj, direct)()
else:
to_obj = _to(self.date, orbit)
inverse = ('_to_%s' % _from)
if hasattr(to_obj, inverse):
(rotation, offset) = getattr(to_obj, inverse)()
rotation = rotation.T
offset = (- offset)
else:
raise NotImplementedError('Unknown transformation {} to {}'.format(_from, _to))
if getattr(_from, '_rotation_before_translation', False):
orbit = (offset + (rotation @ orbit))
else:
orbit = (rotation @ (offset + orbit))
return orbit | Change the frame of the orbit
Args:
new_frame (str)
Return:
numpy.ndarray | codesearchnet |
def get_summed_cohp_by_label_list(self, label_list, divisor=1):
first_cohpobject = self.get_cohp_by_label(label_list[0])
summed_cohp = first_cohpobject.cohp.copy()
summed_icohp = first_cohpobject.icohp.copy()
for label in label_list[1:]:
cohp_here = self.get_cohp_by_label(label)
summed_cohp[Spin.up] = np.sum([summed_cohp[Spin.up], cohp_here.cohp[Spin.up]], axis=0)
if Spin.down in summed_cohp:
summed_cohp[Spin.down] = np.sum([summed_cohp[Spin.down], cohp_here.cohp[Spin.down]], axis=0)
summed_icohp[Spin.up] = np.sum([summed_icohp[Spin.up], cohp_here.icohp[Spin.up]], axis=0)
if Spin.down in summed_icohp:
summed_icohp[Spin.down] = np.sum([summed_icohp[Spin.down], cohp_here.icohp[Spin.down]], axis=0)
divided_cohp = {}
divided_icohp = {}
divided_cohp[Spin.up] = np.divide(summed_cohp[Spin.up], divisor)
divided_icohp[Spin.up] = np.divide(summed_icohp[Spin.up], divisor)
if Spin.down in summed_cohp:
divided_cohp[Spin.down] = np.divide(summed_cohp[Spin.down], divisor)
divided_icohp[Spin.down] = np.divide(summed_icohp[Spin.down], divisor)
return Cohp(efermi=first_cohpobject.efermi, energies=first_cohpobject.energies, cohp=divided_cohp,
are_coops=first_cohpobject.are_coops,
icohp=divided_icohp) | Returns a COHP object that includes a summed COHP divided by divisor
Args:
label_list: list of labels for the COHP that should be included in the summed cohp
divisor: float/int, the summed cohp will be divided by this divisor
Returns:
Returns a COHP object including a summed COHP | juraj-google-style |
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding, expected):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
x1 = np.array([f for f in range(1, total_size_1 + 1)])
x1 = x1.astype(np.uint8).reshape(tensor_in_sizes)
x1_min = 0.0
x1_max = 255.0
x2 = np.array([f for f in range(1, total_size_2 + 1)]).astype(np.uint8)
x2 = x2.astype(np.uint8).reshape(filter_in_sizes)
x2_min = 0.0
x2_max = 255.0
with self.cached_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtypes.quint8)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtypes.quint8)
conv = nn_ops.quantized_conv2d(t1, t2, out_type=dtypes.qint32, strides=[1, stride, stride, 1], padding=padding, min_input=x1_min, max_input=x1_max, min_filter=x2_min, max_filter=x2_max)
value = self.evaluate(conv)
quantized_output = value[0]
output_min = value[1]
output_max = value[2]
float_output = self._QuantizedOutputToFloat(quantized_output, output_min, output_max)
self.assertArrayNear(expected, float_output.flatten(), 1.0)
self.assertEqual(value[0].shape, conv[0].get_shape()) | Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs. | github-repos |
def __eq__(self, other: Any) -> bool:
if self is other:
return True
if isinstance(other, MissingValue):
return self._value_spec == other.value_spec
return MISSING_VALUE == other | Operator ==.
NOTE: `MissingValue(value_spec) and `utils.MissingValue` are
considered equal, but `MissingValue(value_spec1)` and
`MissingValue(value_spec2)` are considered different. That being said,
the 'eq' operation is not transitive.
However in practice this is not a problem, since user always compare
against `schema.MISSING_VALUE` which is `utils.MissingValue`.
Therefore the `__hash__` function returns the same value with
`utils.MissingValue`.
Args:
other: the value to compare against.
Returns:
True if the other value is a general MissingValue or MissingValue of the
same value spec. | github-repos |
def get_classes(tensors):
return nest.pack_sequence_as(tensors, [sparse_tensor.SparseTensor if isinstance(tensor, sparse_tensor.SparseTensor) else tensor_lib.Tensor for tensor in nest.flatten(tensors)]) | Gets classes for a structure of tensors.
Args:
tensors: the tensor structure to get classes for.
Returns:
a structure matching the nested structure of `tensors`, containing
`tf.sparse.SparseTensor` at positions where `tensors` contains a sparse
tensor and `tf.Tensor` otherwise. | github-repos |
def StatFS(self, path=None):
if (platform.system() == 'Windows'):
raise RuntimeError('os.statvfs not available on Windows')
local_path = client_utils.CanonicalPathToLocalPath((path or self.path))
return os.statvfs(local_path) | Call os.statvfs for a given list of rdf_paths.
OS X and Linux only.
Note that a statvfs call for a network filesystem (e.g. NFS) that is
unavailable, e.g. due to no network, will result in the call blocking.
Args:
path: a Unicode string containing the path or None. If path is None the
value in self.path is used.
Returns:
posix.statvfs_result object
Raises:
RuntimeError: if called on windows | codesearchnet |
def __init__(self, input_dim: int, hidden_dim: int, output_dim: int, num_layers: int=3):
super().__init__()
in_dims = [input_dim] + [hidden_dim] * (num_layers - 1)
out_dims = [hidden_dim] * (num_layers - 1) + [output_dim]
layers = []
for i, (in_dim, out_dim) in enumerate(zip(in_dims, out_dims)):
layers.append(PredictionBlock(in_dim, out_dim, activation=nn.ReLU() if i < num_layers - 1 else nn.Identity()))
self.layers = nn.Sequential(*layers) | A classic Multi Layer Perceptron (MLP).
Args:
input_dim (`int`):
The input dimensions.
hidden_dim (`int`):
The hidden dimensions.
output_dim (`int`):
The output dimensions.
num_layers (int, *optional*, defaults to 3):
The number of layers. | github-repos |
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
super(SignatureVerifyRequestPayload, self).read(input_stream, kmip_version=kmip_version)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_identifier = primitives.TextString(tag=enums.Tags.UNIQUE_IDENTIFIER)
self._unique_identifier.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.CRYPTOGRAPHIC_PARAMETERS, local_stream):
self._cryptographic_parameters = attributes.CryptographicParameters()
self._cryptographic_parameters.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.DATA, local_stream):
self._data = primitives.ByteString(tag=enums.Tags.DATA)
self._data.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.DIGESTED_DATA, local_stream):
self._digested_data = primitives.ByteString(tag=enums.Tags.DIGESTED_DATA)
self._digested_data.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.SIGNATURE_DATA, local_stream):
self._signature_data = primitives.ByteString(tag=enums.Tags.SIGNATURE_DATA)
self._signature_data.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.CORRELATION_VALUE, local_stream):
self._correlation_value = primitives.ByteString(tag=enums.Tags.CORRELATION_VALUE)
self._correlation_value.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.INIT_INDICATOR, local_stream):
self._init_indicator = primitives.Boolean(tag=enums.Tags.INIT_INDICATOR)
self._init_indicator.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.FINAL_INDICATOR, local_stream):
self._final_indicator = primitives.Boolean(tag=enums.Tags.FINAL_INDICATOR)
self._final_indicator.read(local_stream, kmip_version=kmip_version)
self.is_oversized(local_stream) | Read the data encoding the SignatureVerify request payload and decode
it into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is missing from the
encoded payload. | codesearchnet |
def read(self, size=None):
if not self._is_open:
raise IOError('Not opened.')
return self._fsapfs_file_entry.read(size=size) | Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed. | juraj-google-style |
def loads(s, single=False):
corpus = etree.fromstring(s)
if single:
ds = _deserialize_mrs(next(corpus))
else:
ds = (_deserialize_mrs(mrs_elem) for mrs_elem in corpus)
return ds | Deserialize MRX string representations
Args:
s (str): a MRX string
single (bool): if `True`, only return the first Xmrs object
Returns:
a generator of Xmrs objects (unless *single* is `True`) | codesearchnet |
def _safe_mean(losses, num_present):
total_loss = math_ops.reduce_sum(losses)
return math_ops.div_no_nan(total_loss, num_present, name='value') | Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
num_present: The number of measurable elements in `losses`.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned. | github-repos |
def _connect_nodes(self, first, second):
if isinstance(first, Node):
first.next.add(second)
second.prev.add(first)
self.forward_edges.add((first, second))
else:
for node in first:
self._connect_nodes(node, second) | Connects nodes to signify that control flows from first to second.
Args:
first: Union[Set[Node, ...], Node]
second: Node | github-repos |
def as_dict(self, voigt=False):
input_array = (self.voigt if voigt else self)
d = {'@module': self.__class__.__module__, '@class': self.__class__.__name__, 'input_array': input_array.tolist()}
if voigt:
d.update({'voigt': voigt})
return d | Serializes the tensor object
Args:
voigt (bool): flag for whether to store entries in
voigt-notation. Defaults to false, as information
may be lost in conversion.
Returns (Dict):
serialized format tensor object | codesearchnet |
def _parse_trunk_groups(self, config):
values = re.findall(r'switchport trunk group ([^\s]+)', config, re.M)
return dict(trunk_groups=values) | Scans the specified config and parses the trunk group values
Args:
config (str): The interface configuraiton blcok
Returns:
A dict object with the trunk group values that can be merged
into the resource dict | juraj-google-style |
def publish_traceback(debug_server_urls, graph, feed_dict, fetches, old_graph_version):
from tensorflow.python.debug.lib import source_remote
if graph.version > old_graph_version:
run_key = common.get_run_key(feed_dict, fetches)
source_remote.send_graph_tracebacks(debug_server_urls, run_key, traceback.extract_stack(), graph, send_source=True)
return graph.version
else:
return old_graph_version | Publish traceback and source code if graph version is new.
`graph.version` is compared with `old_graph_version`. If the former is higher
(i.e., newer), the graph traceback and the associated source code is sent to
the debug server at the specified gRPC URLs.
Args:
debug_server_urls: A single gRPC debug server URL as a `str` or a `list` of
debug server URLs.
graph: A Python `tf.Graph` object.
feed_dict: Feed dictionary given to the `Session.run()` call.
fetches: Fetches from the `Session.run()` call.
old_graph_version: Old graph version to compare to.
Returns:
If `graph.version > old_graph_version`, the new graph version as an `int`.
Else, the `old_graph_version` is returned. | github-repos |
def parse_xml_to_obj(self, xml_file, check_version=True, check_root=True, encoding=None):
root = get_etree_root(xml_file, encoding=encoding)
if check_root:
self._check_root_tag(root)
if check_version:
self._check_version(root)
entity_class = self.get_entity_class(root.tag)
entity_obj = entity_class._binding_class.factory()
entity_obj.build(root)
return entity_obj | Creates a STIX binding object from the supplied xml file.
Args:
xml_file: A filename/path or a file-like object representing a STIX
instance document
check_version: Inspect the version before parsing.
check_root: Inspect the root element before parsing.
encoding: The character encoding of the input `xml_file`.
Raises:
.UnknownVersionError: If `check_version` is ``True`` and `xml_file`
does not contain STIX version information.
.UnsupportedVersionError: If `check_version` is ``False`` and
`xml_file` contains an unsupported STIX version.
.UnsupportedRootElement: If `check_root` is ``True`` and `xml_file`
contains an invalid root element. | codesearchnet |
def __init__(self, element_type, dimensions, layout=None):
self.message = xla_data_pb2.ShapeProto()
self.message.element_type = element_type
if element_type == xla_data_pb2.TUPLE:
if not all((isinstance(subshape, Shape) for subshape in dimensions)):
raise ValueError('XLA tuple requires sequence of Shape objects as dimensions')
self._tuple_shapes = tuple(dimensions)
for component_shape in self._tuple_shapes:
component_message = self.message.tuple_shapes.add()
component_message.CopyFrom(component_shape.message)
else:
self.message.dimensions.extend(dimensions)
if layout is None:
layout = list(reversed(range(len(dimensions))))
self.message.layout.minor_to_major.extend(layout) | Creates a new XLA Shape.
Args:
element_type: element type from xla_data_pb2.
dimensions: sequence of dimensions sizes (integers), or sequence
of Shapes in the case of a tuple, i.e. when element_type is
TUPLE.
layout: optional minor_to_major sequence for layout. If not given, the
default major-to-minor layout is used.
Raises:
ValueError: if element_type is TUPLE but dimensions are not Shape objects. | github-repos |
def parse_GSE(filepath):
gpls = {}
gsms = {}
series_counter = 0
database = None
metadata = {}
gse_name = None
with utils.smart_open(filepath) as soft:
groupper = groupby(soft, lambda x: x.startswith("^"))
for is_new_entry, group in groupper:
if is_new_entry:
entry_type, entry_name = __parse_entry(next(group))
logger.debug("%s: %s" % (entry_type.upper(), entry_name))
if entry_type == "SERIES":
gse_name = entry_name
series_counter += 1
if series_counter > 1:
raise Exception(
"GSE file should contain only one series entry!")
is_data, data_group = next(groupper)
message = ("The key is not False, probably there is an "
"error in the SOFT file")
assert not is_data, message
metadata = parse_metadata(data_group)
elif entry_type == "SAMPLE":
is_data, data_group = next(groupper)
gsms[entry_name] = parse_GSM(data_group, entry_name)
elif entry_type == "PLATFORM":
is_data, data_group = next(groupper)
gpls[entry_name] = parse_GPL(data_group, entry_name)
elif entry_type == "DATABASE":
is_data, data_group = next(groupper)
database_metadata = parse_metadata(data_group)
database = GEODatabase(name=entry_name,
metadata=database_metadata)
else:
logger.error("Cannot recognize type %s" % entry_type)
gse = GSE(name=gse_name,
metadata=metadata,
gpls=gpls,
gsms=gsms,
database=database)
return gse | Parse GSE SOFT file.
Args:
filepath (:obj:`str`): Path to GSE SOFT file.
Returns:
:obj:`GEOparse.GSE`: A GSE object. | juraj-google-style |
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
if not self.name.strip():
raise ValueError(".name property must be set!")
if type(self.sub_trees) not in [list, tuple]:
raise ValueError(".sub_trees property must contain list/tuple!")
if type(self.sub_publications) not in [list, tuple]:
raise ValueError(".sub_trees property must contain list/tuple!")
if not self.path:
self.path = self.name
for sub_tree in self.sub_trees:
sub_tree.path = os.path.join(self.path, sub_tree.name) | Constructor.
Args:
name (str): Name of the periodical.
sub_trees (list): List of other trees.
sub_publications (list): List of sub-publication UUID's.
aleph_id (str): ID used in aleph.
issn (str): ISSN given to the periodical.
is_public (bool): Is the tree public?
Raises:
ValueError: In case that `name` is not set, or `sub_trees` or
`sub_publications` is not list/tuple. | juraj-google-style |
def __init__(self, contents, out=None, prompt=None):
self._contents = contents
self._out = out or sys.stdout
self._search_pattern = None
self._search_direction = None
self.prev_pos, self.prev_nxt = self.PREV_POS_NXT_REPRINT
self._attr = console_attr.GetConsoleAttr()
self._width, self._height = self._attr.GetTermSize()
if not prompt:
prompt = '{bold}--({{percent}}%)--{normal}'.format(bold=self._attr.GetFontCode(bold=True), normal=self._attr.GetFontCode())
self._clear = '\r{0}\r'.format(' ' * (self._attr.DisplayWidth(prompt) - 6))
self._prompt = prompt
self._lines = []
for line in contents.splitlines():
self._lines += self._attr.SplitLine(line, self._width) | Constructor.
Args:
contents: The entire contents of the text lines to page.
out: The output stream, log.out (effectively) if None.
prompt: The page break prompt, a default prompt is used if None.. | github-repos |
def get_cache_index_key(resource):
if isinstance(resource, APIResource):
attr, attr_value = list(resource.get_cache_index_keys().items())[0]
key = (type(resource), attr, attr_value)
else:
key = tuple(resource)
if len(key) != 3:
raise TypeError('Cache key must be tuple of (class, key, value), got `{!r}` instead'.format(key))
if not issubclass(key[0], APIResource):
raise TypeError('First value of cache key must be a subclass of APIResource, got `{!r}` instead'.format(key[0]))
return key | Return a usable cache lookup key for an already initialized resource
Args:
resource (APIResource|tuple): APIResource instance or 3-length tuple key returned from this function
Raises:
TypeError: If resource is not an APIResource instance or acceptable 3-length tuple cache key | juraj-google-style |
def roots_in_unit_interval(coeffs):
r
all_roots = polynomial.polyroots(coeffs)
all_roots = all_roots[
(_UNIT_INTERVAL_WIGGLE_START < all_roots.real)
& (all_roots.real < _UNIT_INTERVAL_WIGGLE_END)
]
real_inds = np.abs(all_roots.imag) < _IMAGINARY_WIGGLE
return all_roots[real_inds].real | r"""Compute roots of a polynomial in the unit interval.
Args:
coeffs (numpy.ndarray): A 1D array (size ``d + 1``) of coefficients in
monomial / power basis.
Returns:
numpy.ndarray: ``N``-array of real values in :math:`\left[0, 1\right]`. | juraj-google-style |
def chrome_tracing_dump(self, filename=None):
profile_table = self.profile_table()
all_events = []
for (component_id_hex, component_events) in profile_table.items():
component_type = component_events[0]['component_type']
if (component_type not in ['worker', 'driver']):
continue
for event in component_events:
new_event = {'cat': event['event_type'], 'name': event['event_type'], 'pid': event['node_ip_address'], 'tid': ((event['component_type'] + ':') + event['component_id']), 'ts': self._seconds_to_microseconds(event['start_time']), 'dur': self._seconds_to_microseconds((event['end_time'] - event['start_time'])), 'ph': 'X', 'cname': self._default_color_mapping[event['event_type']], 'args': event['extra_data']}
if ('cname' in event['extra_data']):
new_event['cname'] = event['extra_data']['cname']
if ('name' in event['extra_data']):
new_event['name'] = event['extra_data']['name']
all_events.append(new_event)
if (filename is not None):
with open(filename, 'w') as outfile:
json.dump(all_events, outfile)
else:
return all_events | Return a list of profiling events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file
by passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file.
Make sure to enable "Flow events" in the "View Options" menu.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling
events. Each profile event is a dictionary. | codesearchnet |
def require(builder_name):
reg = ComponentRegistry()
for (_name, autobuild_func) in reg.load_extensions('iotile.autobuild', name_filter=builder_name):
return autobuild_func
raise BuildError('Cannot find required autobuilder, make sure the distribution providing it is installed', name=builder_name) | Find an advertised autobuilder and return it
This function searches through all installed distributions to find
if any advertise an entry point with group 'iotile.autobuild' and
name equal to builder_name. The first one that is found is returned.
This function raises a BuildError if it cannot find the required
autobuild function
Args:
builder_name (string): The name of the builder to find
Returns:
callable: the autobuilder function found in the search | codesearchnet |
def acl_required(permission, context):
def decorator(func):
@wraps(func)
async def wrapper(*args):
request = args[(- 1)]
if callable(context):
context = context()
if (await get_permitted(request, permission, context)):
return (await func(*args))
raise web.HTTPForbidden()
return wrapper
return decorator | Returns a decorator that checks if a user has the requested permission
from the passed acl context.
This function constructs a decorator that can be used to check a aiohttp's
view for authorization before calling it. It uses the get_permission()
function to check the request against the passed permission and context. If
the user does not have the correct permission to run this function, it
raises HTTPForbidden.
Args:
permission: The specific permission requested.
context: Either a sequence of ACL tuples, or a callable that returns a
sequence of ACL tuples. For more information on ACL tuples, see
get_permission()
Returns:
A decorator which will check the request passed has the permission for
the given context. The decorator will raise HTTPForbidden if the user
does not have the correct permissions to access the view. | codesearchnet |
def gbest_idx(swarm):
best = 0
cmp = comparator(swarm[best].best_fitness)
for (idx, particle) in enumerate(swarm):
if cmp(particle.best_fitness, swarm[best].best_fitness):
best = idx
return best | gbest Neighbourhood topology function.
Args:
swarm: list: The list of particles.
Returns:
int: The index of the gbest particle. | codesearchnet |
def get_posts(self, num=None, tag=None, private=False):
posts = self.posts
if (not private):
posts = [post for post in posts if post.public]
if tag:
posts = [post for post in posts if (tag in post.tags)]
if num:
return posts[:num]
return posts | Get all the posts added to the blog.
Args:
num (int): Optional. If provided, only return N posts (sorted by date,
most recent first).
tag (Tag): Optional. If provided, only return posts that have a
specific tag.
private (bool): By default (if False), private posts are not included.
If set to True, private posts will also be included. | codesearchnet |
def _ReadConstantDataTypeDefinition(self, definitions_registry, definition_values, definition_name, is_member=False):
if is_member:
error_message = 'data type not supported as member'
raise errors.DefinitionReaderError(definition_name, error_message)
value = definition_values.get('value', None)
if (value is None):
error_message = 'missing value'
raise errors.DefinitionReaderError(definition_name, error_message)
definition_object = self._ReadSemanticDataTypeDefinition(definitions_registry, definition_values, data_types.ConstantDefinition, definition_name, self._SUPPORTED_DEFINITION_VALUES_CONSTANT)
definition_object.value = value
return definition_object | Reads a constant data type definition.
Args:
definitions_registry (DataTypeDefinitionsRegistry): data type definitions
registry.
definition_values (dict[str, object]): definition values.
definition_name (str): name of the definition.
is_member (Optional[bool]): True if the data type definition is a member
data type definition.
Returns:
ConstantDataTypeDefinition: constant data type definition.
Raises:
DefinitionReaderError: if the definitions values are missing or if
the format is incorrect. | codesearchnet |
def add_hgnc_id(self, genes):
genes_by_alias = self.genes_by_alias()
for gene in genes:
id_info = genes_by_alias.get(gene['hgnc_symbol'])
if (not id_info):
LOG.warning('Gene %s does not exist in scout', gene['hgnc_symbol'])
continue
gene['hgnc_id'] = id_info['true']
if (not id_info['true']):
if (len(id_info['ids']) > 1):
LOG.warning('Gene %s has ambiguous value, please choose one hgnc id in result', gene['hgnc_symbol'])
gene['hgnc_id'] = ','.join([str(hgnc_id) for hgnc_id in id_info['ids']]) | Add the correct hgnc id to a set of genes with hgnc symbols
Args:
genes(list(dict)): A set of genes with hgnc symbols only | codesearchnet |
def _make_mail(self, complete=True):
mail = {}
keys = get_mail_keys(self.message, complete)
for i in keys:
log.debug('Getting header or part {!r}'.format(i))
value = getattr(self, i)
if value:
mail[i] = value
mail['has_defects'] = self.has_defects
if self.has_defects:
mail['defects'] = self.defects
mail['defects_categories'] = list(self.defects_categories)
return mail | This method assigns the right values to all tokens of email.
Returns a parsed object
Keyword Arguments:
complete {bool} -- If True returns all mails parts
(default: {True})
Returns:
dict -- Parsed email object | codesearchnet |
def _check_approval_wrapper(self, grr_object, grr_function, *args, **kwargs):
approval_sent = False
while True:
try:
return grr_function(*args, **kwargs)
except grr_errors.AccessForbiddenError as exception:
print('No valid approval found: {0!s}'.format(exception))
if approval_sent:
print('Approval not yet granted, waiting {0:d}s'.format(self._CHECK_APPROVAL_INTERVAL_SEC))
time.sleep(self._CHECK_APPROVAL_INTERVAL_SEC)
continue
if (not self.approvers):
message = 'GRR needs approval but no approvers specified (hint: use --approvers)'
self.state.add_error(message, critical=True)
return None
grr_object.CreateApproval(reason=self.reason, notified_users=self.approvers)
approval_sent = True
print('{0!s}: approval request sent to: {1!s} (reason: {2:s})'.format(grr_object, self.approvers, self.reason)) | Wraps a call to GRR functions checking for approval.
Args:
grr_object: the GRR object to create the eventual approval on.
grr_function: The GRR function requiring approval.
*args: Positional arguments that are to be passed to `grr_function`.
**kwargs: Keyword arguments that are to be passed to `grr_function`.
Returns:
The return value of the execution of grr_function(*args, **kwargs). | codesearchnet |
def smart_init_mapping(candidate_mapping, instance1, instance2):
random.seed()
matched_dict = {}
result = []
no_word_match = []
for i, candidates in enumerate(candidate_mapping):
if not candidates:
result.append(-1)
continue
value1 = instance1[i][2]
for node_index in candidates:
value2 = instance2[node_index][2]
if value1 == value2:
if node_index not in matched_dict:
result.append(node_index)
matched_dict[node_index] = 1
break
if len(result) == i:
no_word_match.append(i)
result.append(-1)
for i in no_word_match:
candidates = list(candidate_mapping[i])
while candidates:
rid = random.randint(0, len(candidates) - 1)
candidate = candidates[rid]
if candidate in matched_dict:
candidates.pop(rid)
else:
matched_dict[candidate] = 1
result[i] = candidate
break
return result | Initialize mapping based on the concept mapping (smart initialization)
Arguments:
candidate_mapping: candidate node match list
instance1: instance triples of AMR 1
instance2: instance triples of AMR 2
Returns:
initialized node mapping between two AMRs | juraj-google-style |
def find_block_end(lines: List[str], start_index: int, indent: int) -> int:
indent = ' ' * indent
line_index = start_index + 1
while line_index < len(lines) and _should_continue(lines[line_index], indent):
line_index += 1
while len(lines[line_index - 1]) <= 1:
line_index -= 1
return line_index | Find the end of the class/func block starting at `start_index` in a source code (defined by `lines`).
Args:
lines (`List[str]`):
The source code, represented by a list of lines.
start_index (`int`):
The starting index of the target class/func block.
indent (`int`):
The indent of the class/func body.
Returns:
`int`: The index of the block's ending line plus by 1 (i.e. exclusive). | github-repos |
def lookup_id(self, group):
filter = ["(cn={})".format(group), "(objectclass=posixGroup)"]
results = self.client.search(filter, ['gidNumber'])
if len(results) < 1:
raise ldap_tools.exceptions.NoGroupsFound(
'No Groups Returned by LDAP')
elif len(results) > 1:
raise ldap_tools.exceptions.TooManyResults(
'Multiple groups found. Please narrow your search.')
else:
return results[0].gidNumber.value | Lookup GID for the given group.
Args:
group: Name of group whose ID needs to be looked up
Returns:
A bytestring representation of the group ID (gid)
for the group specified
Raises:
ldap_tools.exceptions.NoGroupsFound:
No Groups were returned by LDAP
ldap_tools.exceptions.TooManyResults:
More than one group was returned by LDAP | juraj-google-style |
def from_file(cls, filename, constant_lattice=True, **kwargs):
fname = os.path.basename(filename)
if fnmatch(fname, '*XDATCAR*'):
structures = Xdatcar(filename).structures
elif fnmatch(fname, 'vasprun*.xml*'):
structures = Vasprun(filename).structures
else:
raise ValueError('Unsupported file')
return cls.from_structures(structures, constant_lattice=constant_lattice, **kwargs) | Convenience constructor to obtain trajectory from XDATCAR or vasprun.xml file
Args:
filename (str): The filename to read from.
constant_lattice (bool): Whether the lattice changes during the simulation, such as in an NPT MD
simulation. True results in
Returns:
(Trajectory) | codesearchnet |
def kld(d1, d2):
d1, d2 = flatten(d1), flatten(d2)
return entropy(d1, d2, 2.0) | Return the Kullback-Leibler Divergence (KLD) between two distributions.
Args:
d1 (np.ndarray): The first distribution.
d2 (np.ndarray): The second distribution.
Returns:
float: The KLD of ``d1`` from ``d2``. | juraj-google-style |
def _get_elements(mol, label):
elements = [int(mol.GetAtom(i).GetAtomicNum()) for i in label]
return elements | The the elements of the atoms in the specified order
Args:
mol: The molecule. OpenBabel OBMol object.
label: The atom indices. List of integers.
Returns:
Elements. List of integers. | codesearchnet |
def _IsType(clean_lines, nesting_state, expr):
last_word = Match('^.*(\\b\\S+)$', expr)
if last_word:
token = last_word.group(1)
else:
token = expr
if _TYPES.match(token):
return True
typename_pattern = (('\\b(?:typename|class|struct)\\s+' + re.escape(token)) + '\\b')
block_index = (len(nesting_state.stack) - 1)
while (block_index >= 0):
if isinstance(nesting_state.stack[block_index], _NamespaceInfo):
return False
last_line = nesting_state.stack[block_index].starting_linenum
next_block_start = 0
if (block_index > 0):
next_block_start = nesting_state.stack[(block_index - 1)].starting_linenum
first_line = last_line
while (first_line >= next_block_start):
if (clean_lines.elided[first_line].find('template') >= 0):
break
first_line -= 1
if (first_line < next_block_start):
block_index -= 1
continue
for i in xrange(first_line, (last_line + 1), 1):
if Search(typename_pattern, clean_lines.elided[i]):
return True
block_index -= 1
return False | Check if expression looks like a type name, returns true if so.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
expr: The expression to check.
Returns:
True, if token looks like a type. | codesearchnet |
def from_json(cls, json, image_config=None):
cls.image_config = image_config
return cls(**{attr: json.get((attr if (key is None) else key)) for (attr, key) in cls.JSON_MAPPING.items()}) | Create a model instance
Arguments:
json (:py:class:`dict`): The parsed JSON data.
image_config (:py:class:`dict`): The API image configuration
data.
Returns:
:py:class:`BaseModel`: The model instance. | codesearchnet |
def to_jdbc_url(self) -> str:
url = f'jdbc:postgresql:
properties = {'socketFactory': 'com.google.cloud.alloydb.SocketFactory', 'alloydbInstanceName': self.instance_name, 'alloydbIpType': self.ip_type}
if self.enable_iam_auth:
properties['alloydbEnableIAMAuth'] = 'true'
if self.target_principal:
properties['alloydbTargetPrincipal'] = self.target_principal
if self.delegates:
properties['alloydbDelegates'] = ','.join(self.delegates)
if self.admin_service_endpoint:
properties['alloydbAdminServiceEndpoint'] = self.admin_service_endpoint
if self.quota_project:
properties['alloydbQuotaProject'] = self.quota_project
if self.additional_properties:
properties.update(self.additional_properties)
property_string = '&'.join((f'{k}={v}' for k, v in properties.items()))
return url + property_string | Convert options to a properly formatted JDBC URL.
Returns:
JDBC URL string configured with all options. | github-repos |
def delete_direct(self, addresses):
with self._lock:
for address in addresses:
self._validate_write(address)
if (address in self._state):
self._state[address].set_deleted()
else:
fut = _ContextFuture(address=address)
self._state[address] = fut
fut.set_deleted() | Called in the context manager's delete method to either
mark an entry for deletion , or create a new future and immediately
set it for deletion in the future.
Args:
address_list (list of str): The unique full addresses.
Raises:
AuthorizationException | codesearchnet |
def vgg13(pretrained=False, **kwargs):
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['B']), **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['vgg13']))
return model | VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet | juraj-google-style |
def max_sequence_length(self, dataset_split):
return {
problem.DatasetSplit.TRAIN: 64,
problem.DatasetSplit.EVAL: 128,
problem.DatasetSplit.TEST: 128
}[dataset_split] | Determine the maximum sequence length given a dataset_split.
Args:
dataset_split: A problem.DatasetSplit.
Returns:
The maximum length that a sequence can be for this dataset_split. | juraj-google-style |
def summary_dict(self):
d = {}
d['Requested'] = len(self.requested)
d['Executed'] = len(self.executed)
d['Passed'] = len(self.passed)
d['Failed'] = len(self.failed)
d['Skipped'] = len(self.skipped)
d['Error'] = len(self.error)
return d | Gets a dictionary that summarizes the stats of this test result.
The summary provides the counts of how many tests fall into each
category, like 'Passed', 'Failed' etc.
Returns:
A dictionary with the stats of this test result. | github-repos |
def from_compatible_tensor_list(element_spec, tensor_list):
return _from_tensor_list_helper(lambda spec, value: spec._from_compatible_tensor_list(value), element_spec, tensor_list) | Returns an element constructed from the given spec and tensor list.
Args:
element_spec: A nested structure of `tf.TypeSpec` objects representing to
element type specification.
tensor_list: A list of tensors to use for constructing the value.
Returns:
An element constructed from the given spec and tensor list.
Raises:
ValueError: If the number of tensors needed to construct an element for
the given spec does not match the given number of tensors. | github-repos |
def sign(self, message):
message = _helpers._to_bytes(message, encoding='utf-8')
return rsa.pkcs1.sign(message, self._key, 'SHA-256') | Signs a message.
Args:
message: bytes, Message to be signed.
Returns:
string, The signature of the message for the given key. | juraj-google-style |
def url(self, url, owner=None, **kwargs):
return URL(self.tcex, url, owner=owner, **kwargs) | Create the URL TI object.
Args:
owner:
url:
**kwargs:
Return: | juraj-google-style |
def get_out_of_order(list_of_numbers):
result = []
for i in range(len(list_of_numbers)):
if (i == 0):
continue
if (list_of_numbers[i] < list_of_numbers[(i - 1)]):
result.append((list_of_numbers[(i - 1)], list_of_numbers[i]))
return result | Returns elements that break the monotonically non-decreasing trend.
This is used to find instances of global step values that are "out-of-order",
which may trigger TensorBoard event discarding logic.
Args:
list_of_numbers: A list of numbers.
Returns:
A list of tuples in which each tuple are two elements are adjacent, but the
second element is lower than the first. | codesearchnet |
def update_info(self, custom=None):
self.figure.suptitle(self.info_string() if custom is None else custom) | Updates the figure's suptitle.
Calls self.info_string() unless custom is provided.
Args:
custom: Overwrite it with this string, unless None. | juraj-google-style |
def extract_formats(config_handle):
configurations = dict(config_handle)
formats = dict(configurations.get('formats', {}))
return formats | Get application formats.
See :class:`gogoutils.Formats` for available options.
Args:
config_handle (configparser.ConfigParser): Instance of configurations.
Returns:
dict: Formats in ``{$format_type: $format_pattern}``. | juraj-google-style |
def get_all_ad_units(inventory_service):
statement = (ad_manager.StatementBuilder(version='v201811')
.OrderBy('id', ascending=True))
keep_iterating = True
total_results = 0
found_ad_units = []
while keep_iterating:
page = inventory_service.getAdUnitsByStatement(statement.ToStatement())
if 'results' in page and len(page['results']):
total_results = page['totalResultSetSize']
found_ad_units.extend(page['results'])
statement.offset += statement.limit
keep_iterating = statement.offset < total_results
return found_ad_units | Download all ad units.
Args:
inventory_service: An instance of the InventoryService.
Returns:
A list containing all ad units. | juraj-google-style |
def string_handle(self, name=None):
if name is None:
return self._string_handle
else:
return gen_dataset_ops.iterator_to_string_handle(self._iterator_resource, name=name) | Returns a string-valued `tf.Tensor` that represents this iterator.
Args:
name: (Optional.) A name for the created operation.
Returns:
A scalar `tf.Tensor` of type `tf.string`. | github-repos |
def set(cls, values):
cls.mrc_out_el.text = values.get('mrc', '')
cls.oai_out_el.text = values.get('oai', '')
cls.dc_out_el.text = values.get('dc', '')
cls.filename = values.get('fn', 'fn')
cls.values = values | Set the elements from the data obtained from REST API.
Args:
values (dict): Dict with ``mrc``, ``oai``, ``dc`` and ``fn`` keys. | codesearchnet |
def getTextBlocks(page, images=False):
CheckParent(page)
dl = page.getDisplayList()
flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE
if images:
flags |= TEXT_PRESERVE_IMAGES
tp = dl.getTextPage(flags)
l = tp._extractTextBlocks_AsList()
del tp
del dl
return l | Return the text blocks on a page.
Notes:
Lines in a block are concatenated with line breaks.
Args:
images: (bool) also return meta data of any images.
Image data are never returned with this method.
Returns:
A list of the blocks. Each item contains the containing rectangle coordinates,
text lines, block type and running block number. | juraj-google-style |
def reset(self, indices, observations):
assert isinstance(indices, np.ndarray)
assert len(indices.shape) == 1
assert isinstance(observations, np.ndarray)
assert indices.shape[0] == observations.shape[0]
for index, observation in zip(indices, observations):
trajectory = self._trajectories[index]
if not trajectory.is_active:
trajectory.add_time_step(observation=observation)
continue
self._complete_trajectory(trajectory, index)
self._trajectories[index].add_time_step(observation=observation) | Resets trajectories at given indices and populates observations.
Reset can either be called right at the beginning, when there are no
time-steps, or to reset a currently active trajectory.
If resetting a currently active trajectory then we save it in
self._completed_trajectories.
Args:
indices: 1-D np.ndarray stating the indices to reset.
observations: np.ndarray of shape (indices len, obs.shape) of observations | juraj-google-style |
def authenticate(self, code: str) -> 'Preston':
headers = self._get_authorization_headers()
data = {
'grant_type': 'authorization_code',
'code': code
}
r = self.session.post(self.TOKEN_URL, headers=headers, data=data)
if not r.status_code == 200:
raise Exception(f'Could not authenticate, got repsonse code {r.status_code}')
new_kwargs = dict(self._kwargs)
response_data = r.json()
new_kwargs['access_token'] = response_data['access_token']
new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in'])
new_kwargs['refresh_token'] = response_data['refresh_token']
return Preston(**new_kwargs) | Authenticates using the code from the EVE SSO.
A new Preston object is returned; this object is not modified.
The intended usage is:
auth = preston.authenticate('some_code_here')
Args:
code: SSO code
Returns:
new Preston, authenticated | juraj-google-style |
def assign(self, institute, case, user, link):
LOG.info('Creating event for assigning {0} to {1}'.format(user['name'].encode('utf-8'), case['display_name']))
self.create_event(institute=institute, case=case, user=user, link=link, category='case', verb='assign', subject=case['display_name'])
LOG.info('Updating {0} to be assigned with {1}'.format(case['display_name'], user['name']))
updated_case = self.case_collection.find_one_and_update({'_id': case['_id']}, {'$addToSet': {'assignees': user['_id']}}, return_document=pymongo.ReturnDocument.AFTER)
return updated_case | Assign a user to a case.
This function will create an Event to log that a person has been assigned
to a case. Also the user will be added to case "assignees".
Arguments:
institute (dict): A institute
case (dict): A case
user (dict): A User object
link (str): The url to be used in the event
Returns:
updated_case(dict) | codesearchnet |
def groups_replies(self, *, channel: str, thread_ts: str, **kwargs) -> SlackResponse:
self._validate_xoxp_token()
kwargs.update({'channel': channel, 'thread_ts': thread_ts})
return self.api_call('groups.replies', http_verb='GET', params=kwargs) | Retrieve a thread of messages posted to a private channel
Args:
channel (str): The channel id. e.g. 'C1234567890'
thread_ts (str): The timestamp of an existing message with 0 or more replies.
e.g. '1234567890.123456' | codesearchnet |
def __init__(self, default: typing.Optional[bool]=MISSING_VALUE, is_noneable: bool=False, frozen: bool=False):
super().__init__(bool, default, is_noneable=is_noneable, frozen=frozen) | Constructor.
Args:
default: Default value for the value spec.
is_noneable: If True, None is acceptable.
frozen: If True, values other than the default value is not accceptable. | github-repos |
def ListChildPathInfos(self, client_id, path_type, components,
timestamp=None):
return self.ListDescendentPathInfos(
client_id, path_type, components, max_depth=1, timestamp=timestamp) | Lists path info records that correspond to children of given path.
Args:
client_id: An identifier string for a client.
path_type: A type of a path to retrieve path information for.
components: A tuple of path components of a path to retrieve child path
information for.
timestamp: If set, lists only descendants that existed only at that
timestamp.
Returns:
A list of `rdf_objects.PathInfo` instances sorted by path components. | juraj-google-style |
def users_setPhoto(self, *, image: Union[(str, IOBase)], **kwargs) -> SlackResponse:
self._validate_xoxp_token()
return self.api_call('users.setPhoto', files={'image': image}, data=kwargs) | Set the user profile photo
Args:
image (str): Supply the path of the image you'd like to upload.
e.g. 'myimage.png' | codesearchnet |
def __init__(self, name, segments):
self.name = name
self.meta = []
self.segments = sorted(segments, key=lambda s: s.points[0].time) | Constructor
When constructing a track it's not guaranteed that the segments
have their properties computed. Call preprocess method over this
class, or over each segment to guarantee it.
Args:
name (:obj:`str`)
segments(:obj:`list` of :obj:`Segment`) | juraj-google-style |
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = tensor_conversion.convert_to_tensor_v2_with_dispatch(target)
output = tensor_conversion.convert_to_tensor_v2_with_dispatch(output)
if hasattr(output, '_keras_logits'):
output = output._keras_logits
if from_logits:
warnings.warn('"`sparse_categorical_crossentropy` received `from_logits=True`, but the `output` argument was produced by a sigmoid or softmax activation and thus does not represent logits. Was this intended?"')
from_logits = True
elif (not from_logits and (not isinstance(output, (ops.EagerTensor, variables_module.Variable))) and (output.op.type == 'Softmax')) and (not hasattr(output, '_keras_history')):
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
from_logits = True
elif not from_logits:
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
if isinstance(output.shape, (tuple, list)):
output_rank = len(output.shape)
else:
output_rank = output.shape.ndims
if output_rank is not None:
axis %= output_rank
if axis != output_rank - 1:
permutation = list(itertools.chain(range(axis), range(axis + 1, output_rank), [axis]))
output = array_ops.transpose(output, perm=permutation)
elif axis != -1:
raise ValueError('Cannot compute sparse categorical crossentropy with `axis={}` on an output tensor with unknown rank'.format(axis))
target = cast(target, 'int64')
output_shape = array_ops.shape_v2(output)
target_rank = target.shape.ndims
update_shape = target_rank is not None and output_rank is not None and (target_rank != output_rank - 1)
if update_shape:
target = flatten(target)
output = array_ops.reshape(output, [-1, output_shape[-1]])
if py_any((_is_symbolic_tensor(v) for v in [target, output])):
with get_graph().as_default():
res = nn.sparse_softmax_cross_entropy_with_logits_v2(labels=target, logits=output)
else:
res = nn.sparse_softmax_cross_entropy_with_logits_v2(labels=target, logits=output)
if update_shape and output_rank >= 3:
return array_ops.reshape(res, output_shape[:-1])
else:
return res | Categorical crossentropy with integer targets.
Args:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last`, and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`. | github-repos |
def parse_results(lines):
idx = 0
batch, onednn, model = (None, None, None)
state = State.FIND_CONFIG_OR_MODEL
while idx < len(lines):
if state is State.FIND_CONFIG_OR_MODEL:
config = re.match("\\+ echo 'BATCH=(?P<batch>[\\d]+), ONEDNN=(?P<onednn>[\\d]+)", lines[idx])
if config:
batch = int(config.group('batch'))
onednn = int(config.group('onednn'))
batch_sizes.add(batch)
else:
model_re = re.search('tf-graphs\\/(?P<model>[\\w\\d_-]+).pb', lines[idx])
assert model_re
model = model_re.group('model')
models.add(model)
state = State.FIND_RUNNING_TIME
elif state is State.FIND_RUNNING_TIME:
match = re.search('no stats: (?P<avg>[\\d.]+)', lines[idx])
state = State.FIND_CONFIG_OR_MODEL
if match:
avg = float(match.group('avg'))
key = (model, batch, onednn)
assert None not in key
db[key] = avg
else:
continue
else:
raise RuntimeError('Reached the unreachable code.')
idx = idx + 1 | Parses benchmark results from run_onednn_benchmarks.sh.
Stores results in a global dict.
Args:
lines: Array of strings corresponding to each line of the output from
run_onednn_benchmarks.sh
Raises:
RuntimeError: If the program reaches an unknown state. | github-repos |
class _IdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters, stage, block, data_format):
super(_IdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
bn_axis = 1 if data_format == 'channels_first' else 3
self.conv2a = layers.Conv2D(filters1, (1, 1), name=conv_name_base + '2a', data_format=data_format)
self.bn2a = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')
self.conv2b = layers.Conv2D(filters2, kernel_size, padding='same', data_format=data_format, name=conv_name_base + '2b')
self.bn2b = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')
self.conv2c = layers.Conv2D(filters3, (1, 1), name=conv_name_base + '2c', data_format=data_format)
self.bn2c = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x) | _IdentityBlock is the block that has no conv layer at shortcut.
Args:
kernel_size: the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
data_format: data_format for the input ('channels_first' or
'channels_last'). | github-repos |
def mode(self, **kwargs):
axis = kwargs.get('axis', 0)
def mode_builder(df, **kwargs):
result = df.mode(**kwargs)
if ((not axis) and (len(df) != len(result))):
append_values = pandas.DataFrame(columns=result.columns, index=range(len(result), len(df)))
result = pandas.concat([result, append_values], ignore_index=True)
elif (axis and (len(df.columns) != len(result.columns))):
append_vals = pandas.DataFrame(columns=range(len(result.columns), len(df.columns)), index=result.index)
result = pandas.concat([result, append_vals], axis=1)
return pandas.DataFrame(result)
func = self._prepare_method(mode_builder, **kwargs)
new_data = self._map_across_full_axis(axis, func)
new_index = (pandas.RangeIndex(len(self.index)) if (not axis) else self.index)
new_columns = (self.columns if (not axis) else pandas.RangeIndex(len(self.columns)))
new_dtypes = self._dtype_cache
if (new_dtypes is not None):
new_dtypes.index = new_columns
return self.__constructor__(new_data, new_index, new_columns, new_dtypes).dropna(axis=axis, how='all') | Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated. | codesearchnet |
def pack_tag(field_number, wire_type):
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise errors.EncodeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type | Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants. | juraj-google-style |
def Process(self, parser_mediator, cookie_name, cookie_data, url, **kwargs):
if ((cookie_name is None) or (cookie_data is None)):
raise ValueError('Cookie name or data are not set.')
if (cookie_name != self.COOKIE_NAME):
raise errors.WrongPlugin('Not the correct cookie plugin for: {0:s} [{1:s}]'.format(cookie_name, self.NAME))
super(BaseCookiePlugin, self).Process(parser_mediator)
self.GetEntries(parser_mediator, cookie_data=cookie_data, url=url) | Determine if this is the right plugin for this cookie.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cookie_name (str): the name of the cookie value.
cookie_data (bytes): the cookie data, as a byte sequence.
url (str): the full URL or path where the cookie was set.
Raises:
errors.WrongPlugin: If the cookie name differs from the one
supplied in COOKIE_NAME.
ValueError: If cookie_name or cookie_data are not set. | codesearchnet |
def remove_instance(self, instance):
query = { "instance_id" : instance.instance_id, "binding_id" : { "$exists" : False } }
try:
result = self.broker.delete_one(query)
except:
raise ErrStorageMongoConnection("Remove Instance")
if result is not None and result.deleted_count == 1:
instance.provisioned = False
else:
raise ErrStorageRemoveInstance(instance.instance_id) | Remove an instance
Remove an object from the MongoDB storage for caching
Args:
instance (AtlasServiceInstance.Instance): instance
Raises:
ErrStorageMongoConnection: Error during MongoDB communication.
ErrStorageRemoveInstance: Failed to remove the instance. | juraj-google-style |
def __init__(self, filesystem, os_path_module=None):
self.filesystem = filesystem
self.sep = filesystem.path_separator
self.altsep = filesystem.alternative_path_separator
self.linesep = filesystem.line_separator()
self._os_module = os
if os_path_module is None:
self.path = FakePathModule(self.filesystem, self)
else:
warnings.warn(FAKE_PATH_MODULE_DEPRECATION, DeprecationWarning,
stacklevel=2)
self.path = os_path_module
if IS_PY2:
self.fdopen = self._fdopen_ver2
else:
self.fdopen = self._fdopen
self.__class__.devnull = ('/dev/nul' if filesystem.is_windows_fs
else '/dev/nul') | Also exposes self.path (to fake os.path).
Args:
filesystem: FakeFilesystem used to provide file system information
os_path_module: (deprecated) Optional FakePathModule instance | juraj-google-style |
def connect_to_websocket(self):
self.logger.info('Making websocket connection')
try:
if hasattr(self, '_ws'):
self._ws.close()
except:
self.logger.debug('Couldn\'t terminate previous websocket connection')
self._ws = websocket.WebSocketApp(
self._get_websocket_address() + '?v=6&encoding=json',
on_message=self._ws_on_message,
on_error=self._ws_on_error,
on_close=self._ws_on_close
)
self._ws.on_open = self._ws_on_open
self._ws_run_forever_wrapper = WebSocketRunForeverWrapper(self.logger, self._ws)
self._ws_run_forever_wrapper.start() | Call this method to make the connection to the Discord websocket
This method is not blocking, so you'll probably want to call it after
initializating your Pycord object, and then move on with your code. When
you want to block on just maintaining the websocket connection, then call
``keep_running``, and it'll block until your application is interrupted.
Args:
None | juraj-google-style |
def run_inference(self, batch: Sequence[Sequence[OpenAIChatMessage]], model: _VLLMModelServer, inference_args: Optional[dict[str, Any]]=None) -> Iterable[PredictionResult]:
return asyncio.run(self._async_run_inference(batch, model, inference_args)) | Runs inferences on a batch of text strings.
Args:
batch: A sequence of examples as OpenAI messages.
model: A _VLLMModelServer for connecting to the spun up server.
inference_args: Any additional arguments for an inference.
Returns:
An Iterable of type PredictionResult. | github-repos |
def update_ports(self, ports, id_or_uri):
ports = merge_default_values(ports, {'type': 'port'})
uri = self._client.build_uri(id_or_uri) + "/update-ports"
return self._client.update(uri=uri, resource=ports) | Updates the switch ports. Only the ports under the management of OneView and those that are unlinked are
supported for update.
Note:
This method is available for API version 300 or later.
Args:
ports: List of Switch Ports.
id_or_uri: Can be either the switch id or the switch uri.
Returns:
dict: Switch | juraj-google-style |
def major_complex(network, state):
log.info('Calculating major complex...')
result = complexes(network, state)
if result:
result = max(result)
else:
empty_subsystem = Subsystem(network, state, ())
result = _null_sia(empty_subsystem)
log.info("Finished calculating major complex.")
return result | Return the major complex of the network.
Args:
network (Network): The |Network| of interest.
state (tuple[int]): The state of the network (a binary tuple).
Returns:
SystemIrreducibilityAnalysis: The |SIA| for the |Subsystem| with
maximal |big_phi|. | juraj-google-style |
def create_binary(self, key, value):
data = None
if key is not None and value is not None:
try:
data = self.db.create(
key.strip(), json.dumps(base64.b64encode(bytes(value)).decode('utf-8'))
)
except TypeError:
data = self.db.create(
key.strip(), json.dumps(base64.b64encode(bytes(value, 'utf-8')).decode('utf-8'))
)
else:
self.tcex.log.warning(u'The key or value field was None.')
return data | Create method of CRUD operation for binary data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result of DB write. | juraj-google-style |
def parse_ped(ped_stream, family_type='ped'):
pedigree = FamilyParser(ped_stream, family_type=family_type)
if (len(pedigree.families) != 1):
raise PedigreeError('Only one case per ped file is allowed')
family_id = list(pedigree.families.keys())[0]
family = pedigree.families[family_id]
samples = [{'sample_id': ind_id, 'father': individual.father, 'mother': individual.mother, 'sex': SEX_MAP[individual.sex], 'phenotype': PHENOTYPE_MAP[int(individual.phenotype)]} for (ind_id, individual) in family.individuals.items()]
return (family_id, samples) | Parse out minimal family information from a PED file.
Args:
ped_stream(iterable(str))
family_type(str): Format of the pedigree information
Returns:
family_id(str), samples(list[dict]) | codesearchnet |
def with_start_after(self, after_namespace):
namespace_start = _ord_to_namespace(_namespace_to_ord(after_namespace) + 1)
return NamespaceRange(namespace_start, self.namespace_end, _app=self.app) | Returns a copy of this NamespaceName with a new namespace_start.
Args:
after_namespace: A namespace string.
Returns:
A NamespaceRange object whose namespace_start is the lexographically next
namespace after the given namespace string.
Raises:
ValueError: if the NamespaceRange includes only a single namespace. | juraj-google-style |
def __init__(self, cells, state_is_tuple=True):
logging.warning('`tf.nn.rnn_cell.MultiRNNCell` is deprecated. This class is equivalent as `tf.keras.layers.StackedRNNCells`, and will be replaced by that in Tensorflow 2.0.')
super(MultiRNNCell, self).__init__()
if not cells:
raise ValueError('Must specify at least one cell for MultiRNNCell.')
if not nest.is_nested(cells):
raise TypeError('cells must be a list or tuple, but saw: %s.' % cells)
if len(set((id(cell) for cell in cells))) < len(cells):
logging.log_first_n(logging.WARN, 'At least two cells provided to MultiRNNCell are the same object and will share weights.', 1)
self._cells = cells
for cell_number, cell in enumerate(self._cells):
if isinstance(cell, trackable.Trackable):
self._track_trackable(cell, name='cell-%d' % (cell_number,))
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any((nest.is_nested(c.state_size) for c in self._cells)):
raise ValueError('Some cells return tuples of states, but the flag state_is_tuple is not set. State sizes are: %s' % str([c.state_size for c in self._cells])) | Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all concatenated along the
column axis. This latter behavior will soon be deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`. | github-repos |
def find_log_dir_and_names(program_name=None, log_dir=None):
if (not program_name):
program_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
program_name = ('py_%s' % program_name)
actual_log_dir = find_log_dir(log_dir=log_dir)
try:
username = getpass.getuser()
except KeyError:
if hasattr(os, 'getuid'):
username = str(os.getuid())
else:
username = 'unknown'
hostname = socket.gethostname()
file_prefix = ('%s.%s.%s.log' % (program_name, hostname, username))
return (actual_log_dir, file_prefix, program_name) | Computes the directory and filename prefix for log file.
Args:
program_name: str|None, the filename part of the path to the program that
is running without its extension. e.g: if your program is called
'usr/bin/foobar.py' this method should probably be called with
program_name='foobar' However, this is just a convention, you can
pass in any string you want, and it will be used as part of the
log filename. If you don't pass in anything, the default behavior
is as described in the example. In python standard logging mode,
the program_name will be prepended with py_ if it is the program_name
argument is omitted.
log_dir: str|None, the desired log directory.
Returns:
(log_dir, file_prefix, symlink_prefix) | codesearchnet |
def __init__(self, size, dropout=None, lstmcell_args={}, named_tensors=None, scope='internal_lstm', summary_labels=()):
self.size = size
self.dropout = dropout
self.lstmcell_args = lstmcell_args
super(InternalLstm, self).__init__(named_tensors=named_tensors, scope=scope, summary_labels=summary_labels) | LSTM layer.
Args:
size: LSTM size.
dropout: Dropout rate. | juraj-google-style |
def from_api_repr(cls, api_repr):
mode = api_repr.get("mode", "NULLABLE")
description = api_repr.get("description")
fields = api_repr.get("fields", ())
return cls(
field_type=api_repr["type"].upper(),
fields=[cls.from_api_repr(f) for f in fields],
mode=mode.upper(),
description=description,
name=api_repr["name"],
) | Return a ``SchemaField`` object deserialized from a dictionary.
Args:
api_repr (Mapping[str, str]): The serialized representation
of the SchemaField, such as what is output by
:meth:`to_api_repr`.
Returns:
google.cloud.biquery.schema.SchemaField:
The ``SchemaField`` object. | juraj-google-style |
def _on_cancelok(self, cancel_frame):
_log.info("Consumer canceled; returning all unprocessed messages to the queue")
self._channel.basic_nack(delivery_tag=0, multiple=True, requeue=True) | Called when the server acknowledges a cancel request.
Args:
cancel_frame (pika.spec.Basic.CancelOk): The cancelok frame from
the server. | juraj-google-style |
def __init__(self, dataset_fn, coordinator):
def disallow_variable_creation(next_creator, **kwargs):
raise ValueError('Creating variables in `dataset_fn` is not allowed.')
if isinstance(dataset_fn, def_function.Function):
with variable_scope.variable_creator_scope(disallow_variable_creation):
dataset_fn = dataset_fn.get_concrete_function()
elif not isinstance(dataset_fn, tf_function.ConcreteFunction):
with variable_scope.variable_creator_scope(disallow_variable_creation):
dataset_fn = def_function.function(dataset_fn).get_concrete_function()
self._dataset_fn = dataset_fn
self._coordinator = coordinator
self._element_spec = None | Makes an iterable from datasets created by the given function.
Args:
dataset_fn: A function that returns a `Dataset`.
coordinator: a `ClusterCoordinator` object, used to create dataset
resources. | github-repos |
def rewards_to_go(rewards, mask, gamma=0.99):
r
B, T = rewards.shape
masked_rewards = rewards * mask
r2gs = [masked_rewards[:, -1]]
for t in reversed(range(T - 1)):
r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1]))
assert T == len(r2gs)
return np.flip(np.stack(r2gs, axis=1), axis=1) | r"""Computes rewards to go.
Reward to go is defined as follows, the discounted reward that we have to
yet collect, going forward from this point, i.e.:
r2g_t = \sum_{l=0}^{\infty} (\gamma^{l} * reward_{t+l})
Args:
rewards: np.ndarray of shape (B, T) of rewards.
mask: np.ndarray of shape (B, T) of mask for the rewards.
gamma: float, discount factor.
Returns:
rewards to go, np.ndarray of shape (B, T). | juraj-google-style |
def add_rec_new(self, k, val):
self.rec_new(val)
self[k] = val
return val | Recursively add a new value and its children to me, and assign a
variable to it.
Args:
k (str): The name of the variable to assign.
val (LispVal): The value to be added and assigned.
Returns:
LispVal: The added value. | codesearchnet |
def run(self, resources):
if (not resources['connection']._port.startswith('jlink')):
raise ArgumentError('FlashBoardStep is currently only possible through jlink', invalid_port=args['port'])
hwman = resources['connection']
debug = hwman.hwman.debug(self._debug_string)
debug.flash(self._file) | Runs the flash step
Args:
resources (dict): A dictionary containing the required resources that
we needed access to in order to perform this step. | codesearchnet |
def get_text_features(self, input_ids: Optional[torch.Tensor]=None, attention_mask: Optional[torch.Tensor]=None, position_ids: Optional[torch.Tensor]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None) -> torch.FloatTensor:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_outputs = self.text_model(input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = text_outputs[1]
text_features = self.text_projection(pooled_output)
return text_features | Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`CLIPSegTextModel`].
Examples:
```python
>>> from transformers import AutoTokenizer, CLIPSegModel
>>> tokenizer = AutoTokenizer.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> text_features = model.get_text_features(**inputs)
``` | github-repos |
def _all_gather(self, input_tensor: core.TensorLike, options: Optional[collective_util.Options]) -> core.Tensor:
instance_key = self._next_instance_key()
options = self._options.merge(options)
ordering_token = self._get_ordering_token()
with ops.device(self._device):
return collective_ops.all_gather_v2(input_tensor, self._group_size, self._group_key, instance_key, communication_hint=options.implementation.value, timeout=options.timeout_seconds, ordering_token=ordering_token) | All-gather a dense tensor.
Args:
input_tensor: a dense tensor. It must have the same shape on all replicas.
options: an optional tf.distribute.experimental.CommunicationOptions. If
provided, it overrides the default options.
Returns:
The reduced tensor. | github-repos |
def screenshot(path=None):
if (not _rootinitialized):
raise TDLError('Initialize first with tdl.init')
if isinstance(path, str):
_lib.TCOD_sys_save_screenshot(_encodeString(path))
elif (path is None):
filelist = _os.listdir('.')
n = 1
filename = ('screenshot%.3i.png' % n)
while (filename in filelist):
n += 1
filename = ('screenshot%.3i.png' % n)
_lib.TCOD_sys_save_screenshot(_encodeString(filename))
else:
tmpname = _os.tempnam()
_lib.TCOD_sys_save_screenshot(_encodeString(tmpname))
with tmpname as tmpfile:
path.write(tmpfile.read())
_os.remove(tmpname) | Capture the screen and save it as a png file.
If path is None then the image will be placed in the current
folder with the names:
``screenshot001.png, screenshot002.png, ...``
Args:
path (Optional[Text]): The file path to save the screenshot. | codesearchnet |
def is_periodic_image(self, other, tolerance=1e-8, check_lattice=True):
if check_lattice and self.lattice != other.lattice:
return False
if self.species != other.species:
return False
frac_diff = pbc_diff(self.frac_coords, other.frac_coords)
return np.allclose(frac_diff, [0, 0, 0], atol=tolerance) | Returns True if sites are periodic images of each other.
Args:
other (PeriodicSite): Other site
tolerance (float): Tolerance to compare fractional coordinates
check_lattice (bool): Whether to check if the two sites have the
same lattice.
Returns:
bool: True if sites are periodic images of each other. | juraj-google-style |
def from_dir(dirpath: Path, feat_type: str) -> None:
logger.info("Extracting features from directory {}".format(dirpath))
dirname = str(dirpath)
def all_wavs_processed() -> bool:
for fn in os.listdir(dirname):
prefix, ext = os.path.splitext(fn)
if ext == ".wav":
if not os.path.exists(
os.path.join(dirname, "%s.%s.npy" % (prefix, feat_type))):
return False
return True
if all_wavs_processed():
logger.info("All WAV files already preprocessed")
return
if feat_type == "pitch" or feat_type == "fbank_and_pitch":
kaldi_pitch(dirname, dirname)
for filename in os.listdir(dirname):
logger.info("Preparing %s features for %s", feat_type, filename)
path = os.path.join(dirname, filename)
if path.endswith(".wav"):
if empty_wav(path):
raise PersephoneException("Can't extract features for {} since it is an empty WAV file. Remove it from the corpus.".format(path))
if feat_type == "fbank":
fbank(path)
elif feat_type == "fbank_and_pitch":
fbank(path)
prefix = os.path.splitext(filename)[0]
combine_fbank_and_pitch(dirname, prefix)
elif feat_type == "pitch":
pass
elif feat_type == "mfcc13_d":
mfcc(path)
else:
logger.warning("Feature type not found: %s", feat_type)
raise PersephoneException("Feature type not found: %s" % feat_type) | Performs feature extraction from the WAV files in a directory.
Args:
dirpath: A `Path` to the directory where the WAV files reside.
feat_type: The type of features that are being used. | juraj-google-style |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.