hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
291831aa00a6766c53f2f4478976393b6e8a733d
| 1,681
|
py
|
Python
|
dfapp/views.py
|
dorican/django-dfapp
|
c70a1cc036cfe87a8075f76c39fdb437893351fe
|
[
"MIT"
] | 2
|
2020-05-25T07:51:47.000Z
|
2020-06-18T15:57:20.000Z
|
dfapp/views.py
|
dorican/django-dfapp
|
c70a1cc036cfe87a8075f76c39fdb437893351fe
|
[
"MIT"
] | null | null | null |
dfapp/views.py
|
dorican/django-dfapp
|
c70a1cc036cfe87a8075f76c39fdb437893351fe
|
[
"MIT"
] | null | null | null |
from django.views.generic.edit import FormView
from django.views.generic import DetailView
from django.contrib.contenttypes.models import ContentType
from django.http import JsonResponse
from django.contrib.admin.sites import site
from django.contrib import admin
class CheckCTView(DetailView):
"""
The view that gets the ContentType model and passes the getted model to the next view
"""
model = ContentType
form_class = None
def get(self, request, *args, **kwargs):
if request.is_ajax():
super().get(request, *args, **kwargs)
return RenderFormView(request=request, args=args, kwargs=kwargs, model=self.object.model_class(), form_class=self.form_class).post(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
class RenderFormView(FormView):
"""
The view that creates frontend form or backend form for getted model. This view is used for ajax form update.
"""
template_name = ''
response_class = JsonResponse
success_url = '/'
def get_form_class(self):
if 'admin' in self.request.META.get('HTTP_REFERER'):
self.form_class = (site._registry.get(self.model) or admin.ModelAdmin(self.model, site)).get_form(self.request)
return super().get_form_class()
def form_valid(self, form):
return self.form_invalid(form)
def render_to_response(self, context, **response_kwargs):
form = context['form']
return self.response_class(form.return_changed())
def get_prefix(self):
self.prefix = self.request.POST.get('prefix')
return super().get_prefix()
| 35.020833
| 168
| 0.69304
|
011f4fbc50b2b91713a1892d38d1dd747b6cdbec
| 586
|
py
|
Python
|
src/opera/parser/tosca/v_1_3/policy_definition.py
|
Legion2/xopera-opera
|
808f23cbac326b6d067e6ec531a0109ae02d0f5e
|
[
"Apache-2.0"
] | null | null | null |
src/opera/parser/tosca/v_1_3/policy_definition.py
|
Legion2/xopera-opera
|
808f23cbac326b6d067e6ec531a0109ae02d0f5e
|
[
"Apache-2.0"
] | null | null | null |
src/opera/parser/tosca/v_1_3/policy_definition.py
|
Legion2/xopera-opera
|
808f23cbac326b6d067e6ec531a0109ae02d0f5e
|
[
"Apache-2.0"
] | null | null | null |
from ..entity import Entity
from ..list import List
from ..map import Map
from ..reference import Reference, ReferenceXOR
from ..string import String
from ..void import Void
from .trigger_definition import TriggerDefinition
class PolicyDefinition(Entity):
ATTRS = dict(
type=Reference("policy_types"),
description=String,
metadata=Map(String),
properties=Map(Void),
targets=List(ReferenceXOR(("topology_template", "node_templates"), ("topology_template", "groups"))),
triggers=Map(TriggerDefinition),
)
REQUIRED = {"type"}
| 27.904762
| 109
| 0.699659
|
854e53a1a703310f0da8cca56b6fb958c993dd37
| 156,522
|
py
|
Python
|
tensorflow/python/data/ops/dataset_ops.py
|
18867664853/tensorflow
|
f4a49de0041f7e3f3e8c5cf04fb1862cd0944fdc
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/data/ops/dataset_ops.py
|
18867664853/tensorflow
|
f4a49de0041f7e3f3e8c5cf04fb1862cd0944fdc
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/data/ops/dataset_ops.py
|
18867664853/tensorflow
|
f4a49de0041f7e3f3e8c5cf04fb1862cd0944fdc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import enum
import functools
import sys
import threading
import warnings
import weakref
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from tensorflow.core.framework import graph_pb2
from tensorflow.python import tf2
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import optimization_options
from tensorflow.python.data.experimental.ops import stats_options
from tensorflow.python.data.experimental.ops import threading_options
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import options as options_lib
from tensorflow.python.data.util import random_seed
from tensorflow.python.data.util import sparse
from tensorflow.python.data.util import structure
from tensorflow.python.data.util import traverse
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed as core_random_seed
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as tracking_base
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import deprecation
from tensorflow.python.util import function_utils
from tensorflow.python.util import lazy_loader
from tensorflow.python.util import nest as tf_nest
from tensorflow.python.util.tf_export import tf_export
# Loaded lazily due to a circular dependency (roughly
# tf.function->wrap_function->dataset->autograph->tf.function).
# TODO(b/133251390): Use a regular import.
wrap_function = lazy_loader.LazyLoader(
"wrap_function", globals(),
"tensorflow.python.eager.wrap_function")
# TODO(mdan): Create a public API for this.
autograph_ctx = lazy_loader.LazyLoader(
"autograph_ctx", globals(),
"tensorflow.python.autograph.core.ag_ctx")
autograph = lazy_loader.LazyLoader(
"autograph", globals(),
"tensorflow.python.autograph.impl.api")
ops.NotDifferentiable("ReduceDataset")
# A constant that can be used to enable auto-tuning.
AUTOTUNE = -1
tf_export("data.experimental.AUTOTUNE").export_constant(__name__, "AUTOTUNE")
class AutotuneAlgorithm(enum.Enum):
HILL_CLIMB = 0
GRADIENT_DESCENT = 1
@tf_export("data.Dataset", v1=[])
@six.add_metaclass(abc.ABCMeta)
class DatasetV2(tracking_base.Trackable, composite_tensor.CompositeTensor):
"""Represents a potentially large set of elements.
A `Dataset` can be used to represent an input pipeline as a
collection of elements and a "logical plan" of transformations that act on
those elements.
A dataset contains elements that each have the same (nested) structure and the
individual components of the structure can be of any type representable by
`tf.TypeSpec`, including `tf.Tensor`, `tf.data.Dataset`, `tf.SparseTensor`,
`tf.RaggedTensor`, or `tf.TensorArray`.
Example elements:
```python
# Integer element
a = 1
# Float element
b = 2.0
# Tuple element with 2 components
c = (1, 2)
# Dict element with 3 components
d = {"a": (2, 2), "b": 3}
# Element containing a dataset
e = tf.data.Dataset.from_element(10)
```
"""
def __init__(self, variant_tensor):
"""Creates a DatasetV2 object.
This is a difference between DatasetV1 and DatasetV2. DatasetV1 does not
take anything in its constructor whereas in the DatasetV2, we expect
subclasses to create a variant_tensor and pass it in to the super() call.
Args:
variant_tensor: A DT_VARIANT tensor that represents the dataset.
"""
self._variant_tensor_attr = variant_tensor
weak_self = weakref.proxy(self)
self._variant_tracker = self._track_trackable(
_VariantTracker(
self._variant_tensor,
# _trace_variant_creation only works when executing eagerly, so we
# don't want to run it immediately. We also want the _VariantTracker
# to have a weak reference to the Dataset to avoid creating
# reference cycles and making work for the garbage collector.
lambda: weak_self._trace_variant_creation()()), # pylint: disable=unnecessary-lambda,protected-access
name="_variant_tracker")
self._graph_attr = ops.get_default_graph()
@property
def _variant_tensor(self):
return self._variant_tensor_attr
@_variant_tensor.setter
def _variant_tensor(self, _):
raise ValueError("The _variant_tensor property is read-only")
def _as_serialized_graph(self, allow_stateful=None):
"""Produces serialized graph representation of the dataset.
Args:
allow_stateful: If true, we allow stateful ops to be present in the graph
def. In that case, the state in these ops would be thrown away.
Returns:
A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a
serialized graph.
"""
if compat.forward_compatible(2019, 9, 16) or allow_stateful:
return gen_dataset_ops.dataset_to_graph(self._variant_tensor,
allow_stateful=allow_stateful)
else:
return gen_dataset_ops.dataset_to_graph(self._variant_tensor)
def _trace_variant_creation(self):
"""Traces a function which outputs a variant `tf.Tensor` for this dataset.
Note that creating this function involves evaluating an op, and is currently
only supported when executing eagerly.
Returns:
A zero-argument `ConcreteFunction` which outputs a variant `tf.Tensor`.
"""
variant = self._variant_tensor
if not isinstance(variant, ops.EagerTensor):
raise NotImplementedError(
"Can only export Datasets which were created executing eagerly. "
"Please file a feature request if this is important to you.")
with context.eager_mode(), ops.device("CPU"):
graph_def = graph_pb2.GraphDef().FromString(
self._as_serialized_graph().numpy()) # pylint: disable=protected-access
output_node_name = None
for node in graph_def.node:
if node.op == "_Retval":
if output_node_name is not None:
raise AssertionError(
"Found multiple return values from the dataset's graph, expected "
"only one.")
output_node_name, = node.input
if output_node_name is None:
raise AssertionError("Could not find the dataset's output node.")
# Add functions used in this Dataset to the function's graph, since they
# need to follow it around (and for example be added to a SavedModel which
# references the dataset).
variant_function = wrap_function.function_from_graph_def(
graph_def, inputs=[], outputs=output_node_name + ":0")
for used_function in self._functions():
used_function.function.add_to_graph(variant_function.graph)
return variant_function
@abc.abstractmethod
def _inputs(self):
"""Returns a list of the input datasets of the dataset."""
raise NotImplementedError("Dataset._inputs")
@property
def _graph(self):
return self._graph_attr
@_graph.setter
def _graph(self, _):
raise ValueError("The _graph property is read-only")
def _has_captured_ref(self):
"""Whether this dataset uses a function that captures ref variables.
Returns:
A boolean, which if true indicates that the dataset or one of its inputs
uses a function that captures ref variables.
"""
if context.executing_eagerly():
# RefVariables are not supported in eager mode
return False
def is_tensor_or_parent_ref(tensor):
if tensor.dtype._is_ref_dtype: # pylint: disable=protected-access
return True
# If the captured tensor is an eager tensor, we cannot trace its inputs.
if isinstance(tensor, ops._EagerTensorBase): # pylint: disable=protected-access
return False
return any([is_tensor_or_parent_ref(x) for x in tensor.op.inputs])
for fn in self._functions():
if any([is_tensor_or_parent_ref(t) for t in fn.function.captured_inputs]):
return True
return any(
[input_dataset._has_captured_ref() for input_dataset in self._inputs()]) # pylint: disable=protected-access
# TODO(jsimsa): Change this to be the transitive closure of functions used
# by this dataset and its inputs.
def _functions(self):
"""Returns a list of functions associated with this dataset.
Returns:
A list of `StructuredFunctionWrapper` objects.
"""
return []
def options(self):
"""Returns the options for this dataset and its inputs.
Returns:
A `tf.data.Options` object representing the dataset options.
"""
options = Options()
for input_dataset in self._inputs():
input_options = input_dataset.options()
if input_options is not None:
options = options.merge(input_options)
return options
def _apply_options(self):
"""Apply options, such as optimization configuration, to the dataset."""
dataset = self
options = self.options()
if options.experimental_threading is not None:
t_options = options.experimental_threading
if t_options.max_intra_op_parallelism is not None:
dataset = _MaxIntraOpParallelismDataset(
dataset, t_options.max_intra_op_parallelism)
if t_options.private_threadpool_size is not None:
dataset = _PrivateThreadPoolDataset(dataset,
t_options.private_threadpool_size)
# pylint: disable=protected-access
static_optimizations = options._static_optimizations()
static_optimization_configs = options._static_optimization_configs()
# pylint: enable=protected-access
if static_optimizations:
if self._has_captured_ref():
warnings.warn(
"tf.data static optimizations are not compatible with tf.Variable. "
"The following optimizations will be disabled: %s. To enable "
"optimizations, use resource variables instead by calling "
"`tf.enable_resource_variables()` at the start of the program." %
", ".join(static_optimizations))
else:
dataset = _OptimizeDataset(dataset, static_optimizations,
static_optimization_configs)
autotune = True
algorithm = AutotuneAlgorithm.HILL_CLIMB
cpu_budget = 0 # Indicates that all CPU cores should be used.
if options.experimental_optimization is not None:
if options.experimental_optimization.autotune is False: # pylint: disable=g-bool-id-comparison
autotune = False
if options.experimental_optimization.autotune_algorithm is not None:
algorithm = options.experimental_optimization.autotune_algorithm
if options.experimental_optimization.autotune_cpu_budget is not None:
cpu_budget = options.experimental_optimization.autotune_cpu_budget
if autotune:
dataset = _ModelDataset(dataset, algorithm, cpu_budget)
if options.experimental_stats and options.experimental_stats.aggregator: # pylint: disable=line-too-long
dataset = _SetStatsAggregatorDataset( # pylint: disable=protected-access
dataset, options.experimental_stats.aggregator,
options.experimental_stats.prefix,
options.experimental_stats.counter_prefix)
return dataset
def __iter__(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
The returned iterator implements the Python iterator protocol and therefore
can only be used in eager mode.
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If not inside of tf.function and not executing eagerly.
"""
if (context.executing_eagerly()
or ops.get_default_graph()._building_function): # pylint: disable=protected-access
return iterator_ops.IteratorV2(self)
else:
raise RuntimeError("__iter__() is only supported inside of tf.function "
"or when eager execution is enabled.")
@abc.abstractproperty
def element_spec(self):
"""The type specification of an element of this dataset.
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of this dataset and specifying the type of individual components.
"""
raise NotImplementedError("Dataset.element_spec")
def __repr__(self):
output_shapes = nest.map_structure(str, get_legacy_output_shapes(self))
output_shapes = str(output_shapes).replace("'", "")
output_types = nest.map_structure(repr, get_legacy_output_types(self))
output_types = str(output_types).replace("'", "")
return ("<%s shapes: %s, types: %s>" % (type(self).__name__, output_shapes,
output_types))
@property
def _flat_shapes(self):
"""Returns a list `tf.TensorShapes`s for the element tensor representation.
Returns:
A list `tf.TensorShapes`s for the element tensor representation.
"""
return structure.get_flat_tensor_shapes(self.element_spec)
@property
def _flat_types(self):
"""Returns a list `tf.DType`s for the element tensor representation.
Returns:
A list `tf.DType`s for the element tensor representation.
"""
return structure.get_flat_tensor_types(self.element_spec)
@property
def _flat_structure(self):
"""Helper for setting `output_shapes` and `output_types` attrs of an op.
Most dataset op constructors expect `output_shapes` and `output_types`
arguments that represent the flattened structure of an element. This helper
function generates these attrs as a keyword argument dictionary, allowing
`Dataset._variant_tensor` implementations to pass `**self._flat_structure`
to the op constructor.
Returns:
A dictionary of keyword arguments that can be passed to a dataset op
constructor.
"""
return {
"output_shapes": self._flat_shapes,
"output_types": self._flat_types,
}
@property
def _type_spec(self):
return DatasetSpec(self.element_spec)
@staticmethod
def from_tensors(tensors):
"""Creates a `Dataset` with a single element, comprising the given tensors.
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If `tensors`
contains one or more large NumPy arrays, consider the alternative described
in [this
guide](https://tensorflow.org/guide/datasets#consuming_numpy_arrays).
Args:
tensors: A dataset element.
Returns:
Dataset: A `Dataset`.
"""
return TensorDataset(tensors)
@staticmethod
def from_tensor_slices(tensors):
"""Creates a `Dataset` whose elements are slices of the given tensors.
The given tensors are sliced along their first dimension. This operation
preserves the structure of the input tensors, removing the first dimension
of each tensor and using it as the dataset dimension. All input tensors
must have the same size in their first dimensions.
```python
# Slicing a 1D tensor produces scalar tensor elements.
Dataset.from_tensor_slices([1, 2, 3]) # ==> [ 1, 2, 3 ]
# Slicing a 2D tensor produces 1D tensor elements.
Dataset.from_tensor_slices([[1, 2], [3, 4], [5, 6]])
# ==> [ [1, 2], [3, 4], [5, 6] ]
# Slicing a tuple of 1D tensors produces tuple elements containing scalar
tensors.
Dataset.from_tensor_slices(([1, 2], [3, 4], [5, 6]))
# ==> [ (1, 3, 5), (2, 4, 6) ]
# Dictionary structure is also preserved.
Dataset.from_tensor_slices({"a": [1, 2], "b": [3, 4], "c": [5, 6]})
# ==> [ {"a": 1, "b": 3, "c": 5}, {"a": 2, "b": 4, "c:" 6} ]
```
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
`tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If `tensors`
contains one or more large NumPy arrays, consider the alternative described
in [this guide](
https://tensorflow.org/guide/datasets#consuming_numpy_arrays).
Args:
tensors: A dataset element, with each component having the same size in
the first dimension.
Returns:
Dataset: A `Dataset`.
"""
return TensorSliceDataset(tensors)
class _GeneratorState(object):
"""Stores outstanding iterators created from a Python generator.
This class keeps track of potentially multiple iterators that may have
been created from a generator, e.g. in the case that the dataset is
repeated, or nested within a parallel computation.
"""
def __init__(self, generator):
self._generator = generator
self._lock = threading.Lock()
self._next_id = 0 # GUARDED_BY(self._lock)
self._args = {}
self._iterators = {}
def get_next_id(self, *args):
with self._lock:
ret = self._next_id
self._next_id += 1
self._args[ret] = args
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(ret, dtype=np.int64)
def get_iterator(self, iterator_id):
try:
return self._iterators[iterator_id]
except KeyError:
iterator = iter(self._generator(*self._args.pop(iterator_id)))
self._iterators[iterator_id] = iterator
return iterator
def iterator_completed(self, iterator_id):
del self._iterators[iterator_id]
@staticmethod
def from_generator(generator, output_types, output_shapes=None, args=None):
"""Creates a `Dataset` whose elements are generated by `generator`.
The `generator` argument must be a callable object that returns
an object that supports the `iter()` protocol (e.g. a generator function).
The elements generated by `generator` must be compatible with the given
`output_types` and (optional) `output_shapes` arguments.
For example:
```python
import itertools
tf.compat.v1.enable_eager_execution()
def gen():
for i in itertools.count(1):
yield (i, [1] * i)
ds = tf.data.Dataset.from_generator(
gen, (tf.int64, tf.int64), (tf.TensorShape([]), tf.TensorShape([None])))
for value in ds.take(2):
print value
# (1, array([1]))
# (2, array([1, 1]))
```
NOTE: The current implementation of `Dataset.from_generator()` uses
`tf.numpy_function` and inherits the same constraints. In particular, it
requires the `Dataset`- and `Iterator`-related operations to be placed
on a device in the same process as the Python program that called
`Dataset.from_generator()`. The body of `generator` will not be
serialized in a `GraphDef`, and you should not use this method if you
need to serialize your model and restore it in a different environment.
NOTE: If `generator` depends on mutable global variables or other external
state, be aware that the runtime may invoke `generator` multiple times
(in order to support repeating the `Dataset`) and at any time
between the call to `Dataset.from_generator()` and the production of the
first element from the generator. Mutating global variables or external
state can cause undefined behavior, and we recommend that you explicitly
cache any external state in `generator` before calling
`Dataset.from_generator()`.
Args:
generator: A callable object that returns an object that supports the
`iter()` protocol. If `args` is not specified, `generator` must take no
arguments; otherwise it must take as many arguments as there are values
in `args`.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element yielded by `generator`.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element yielded by `generator`.
args: (Optional.) A tuple of `tf.Tensor` objects that will be evaluated
and passed to `generator` as NumPy-array arguments.
Returns:
Dataset: A `Dataset`.
"""
if not callable(generator):
raise TypeError("`generator` must be callable.")
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if args is None:
args = ()
else:
args = tuple(ops.convert_n_to_tensor(args, name="args"))
flattened_types = [dtypes.as_dtype(dt) for dt in nest.flatten(output_types)]
flattened_shapes = nest.flatten(output_shapes)
generator_state = DatasetV2._GeneratorState(generator)
def get_iterator_id_fn(unused_dummy):
"""Creates a unique `iterator_id` for each pass over the dataset.
The returned `iterator_id` disambiguates between multiple concurrently
existing iterators.
Args:
unused_dummy: Ignored value.
Returns:
A `tf.int64` tensor whose value uniquely identifies an iterator in
`generator_state`.
"""
return script_ops.numpy_function(generator_state.get_next_id, args,
dtypes.int64)
def generator_next_fn(iterator_id_t):
"""Generates the next element from iterator with ID `iterator_id_t`.
We map this function across an infinite repetition of the
`iterator_id_t`, and raise `StopIteration` to terminate the iteration.
Args:
iterator_id_t: A `tf.int64` tensor whose value uniquely identifies the
iterator in `generator_state` from which to generate an element.
Returns:
The next element to generate from the iterator.
"""
def generator_py_func(iterator_id):
"""A `py_func` that will be called to invoke the iterator."""
# `next()` raises `StopIteration` when there are no more
# elements remaining to be generated.
values = next(generator_state.get_iterator(iterator_id))
# Use the same _convert function from the py_func() implementation to
# convert the returned values to arrays early, so that we can inspect
# their values.
try:
flattened_values = nest.flatten_up_to(output_types, values)
except (TypeError, ValueError):
six.reraise(TypeError, TypeError(
"`generator` yielded an element that did not match the expected "
"structure. The expected structure was %s, but the yielded "
"element was %s." % (output_types, values)), sys.exc_info()[2])
ret_arrays = []
for ret, dtype in zip(flattened_values, flattened_types):
try:
ret_arrays.append(script_ops.FuncRegistry._convert( # pylint: disable=protected-access
ret, dtype=dtype.as_numpy_dtype))
except (TypeError, ValueError):
six.reraise(TypeError, TypeError(
"`generator` yielded an element that could not be converted to "
"the expected type. The expected type was %s, but the yielded "
"element was %s." % (dtype.name, ret)), sys.exc_info()[2])
# Additional type and shape checking to ensure that the components
# of the generated element match the `output_types` and `output_shapes`
# arguments.
for (ret_array, expected_dtype, expected_shape) in zip(
ret_arrays, flattened_types, flattened_shapes):
if ret_array.dtype != expected_dtype.as_numpy_dtype:
raise TypeError(
"`generator` yielded an element of type %s where an element "
"of type %s was expected." % (ret_array.dtype,
expected_dtype.as_numpy_dtype))
if not expected_shape.is_compatible_with(ret_array.shape):
raise ValueError(
"`generator` yielded an element of shape %s where an element "
"of shape %s was expected." % (ret_array.shape, expected_shape))
return ret_arrays
flat_values = script_ops.numpy_function(generator_py_func,
[iterator_id_t], flattened_types)
# The `py_func()` op drops the inferred shapes, so we add them back in
# here.
if output_shapes is not None:
for ret_t, shape in zip(flat_values, flattened_shapes):
ret_t.set_shape(shape)
return nest.pack_sequence_as(output_types, flat_values)
def finalize_fn(iterator_id_t):
"""Releases host-side state for the iterator with ID `iterator_id_t`."""
def finalize_py_func(iterator_id):
generator_state.iterator_completed(iterator_id)
# We return a dummy value so that the `finalize_fn` has a valid
# signature.
# NOTE(mrry): Explicitly create an array of `np.int64` because implicit
# casting in `py_func()` will create an array of `np.int32` on Windows,
# leading to a runtime error.
return np.array(0, dtype=np.int64)
return script_ops.numpy_function(finalize_py_func, [iterator_id_t],
dtypes.int64)
# This function associates each traversal of `generator` with a unique
# iterator ID.
def flat_map_fn(dummy_arg):
# The `get_iterator_id_fn` gets a unique ID for the current instance of
# of the generator.
# The `generator_next_fn` gets the next element from the iterator with the
# given ID, and raises StopIteration when that iterator contains no
# more elements.
return _GeneratorDataset(dummy_arg, get_iterator_id_fn, generator_next_fn,
finalize_fn)
# A single-element dataset that, each time it is evaluated, contains a
# freshly-generated and unique (for the returned dataset) int64
# ID that will be used to identify the appropriate Python state, which
# is encapsulated in `generator_state`, and captured in
# `get_iterator_id_map_fn`.
dummy = 0
id_dataset = Dataset.from_tensors(dummy)
# A dataset that contains all of the elements generated by a
# single iterator created from `generator`, identified by the
# iterator ID contained in `id_dataset`. Lifting the iteration
# into a flat_map here enables multiple repetitions and/or nested
# versions of the returned dataset to be created, because it forces
# the generation of a new ID for each version.
return id_dataset.flat_map(flat_map_fn)
@staticmethod
def range(*args):
"""Creates a `Dataset` of a step-separated range of values.
For example:
```python
Dataset.range(5) == [0, 1, 2, 3, 4]
Dataset.range(2, 5) == [2, 3, 4]
Dataset.range(1, 5, 2) == [1, 3]
Dataset.range(1, 5, -2) == []
Dataset.range(5, 1) == []
Dataset.range(5, 1, -2) == [5, 3]
```
Args:
*args: follows the same semantics as python's xrange.
len(args) == 1 -> start = 0, stop = args[0], step = 1
len(args) == 2 -> start = args[0], stop = args[1], step = 1
len(args) == 3 -> start = args[0], stop = args[1, stop = args[2]
Returns:
Dataset: A `RangeDataset`.
Raises:
ValueError: if len(args) == 0.
"""
return RangeDataset(*args)
@staticmethod
def zip(datasets):
"""Creates a `Dataset` by zipping together the given datasets.
This method has similar semantics to the built-in `zip()` function
in Python, with the main difference being that the `datasets`
argument can be an arbitrary nested structure of `Dataset` objects.
For example:
```python
a = Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
b = Dataset.range(4, 7) # ==> [ 4, 5, 6 ]
c = Dataset.range(7, 13).batch(2) # ==> [ [7, 8], [9, 10], [11, 12] ]
d = Dataset.range(13, 15) # ==> [ 13, 14 ]
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
Dataset.zip((a, b)) # ==> [ (1, 4), (2, 5), (3, 6) ]
Dataset.zip((b, a)) # ==> [ (4, 1), (5, 2), (6, 3) ]
# The `datasets` argument may contain an arbitrary number of
# datasets.
Dataset.zip((a, b, c)) # ==> [ (1, 4, [7, 8]),
# (2, 5, [9, 10]),
# (3, 6, [11, 12]) ]
# The number of elements in the resulting dataset is the same as
# the size of the smallest dataset in `datasets`.
Dataset.zip((a, d)) # ==> [ (1, 13), (2, 14) ]
```
Args:
datasets: A nested structure of datasets.
Returns:
Dataset: A `Dataset`.
"""
return ZipDataset(datasets)
def concatenate(self, dataset):
"""Creates a `Dataset` by concatenating the given dataset with this dataset.
```python
a = Dataset.range(1, 4) # ==> [ 1, 2, 3 ]
b = Dataset.range(4, 8) # ==> [ 4, 5, 6, 7 ]
# The input dataset and dataset to be concatenated should have the same
# nested structures and output types.
# c = Dataset.range(8, 14).batch(2) # ==> [ [8, 9], [10, 11], [12, 13] ]
# d = Dataset.from_tensor_slices([14.0, 15.0, 16.0])
# a.concatenate(c) and a.concatenate(d) would result in error.
a.concatenate(b) # ==> [ 1, 2, 3, 4, 5, 6, 7 ]
```
Args:
dataset: `Dataset` to be concatenated.
Returns:
Dataset: A `Dataset`.
"""
return ConcatenateDataset(self, dataset)
def prefetch(self, buffer_size):
"""Creates a `Dataset` that prefetches elements from this dataset.
Note: Like other `Dataset` methods, prefetch operates on the
elements of the input dataset. It has no concept of examples vs. batches.
`examples.prefetch(2)` will prefetch two elements (2 examples),
while `examples.batch(20).prefetch(2)` will prefetch 2 elements
(2 batches, of 20 examples each).
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum
number of elements that will be buffered when prefetching.
Returns:
Dataset: A `Dataset`.
"""
return PrefetchDataset(self, buffer_size)
@staticmethod
def list_files(file_pattern, shuffle=None, seed=None):
"""A dataset of all files matching one or more glob patterns.
NOTE: The default behavior of this method is to return filenames in
a non-deterministic random shuffled order. Pass a `seed` or `shuffle=False`
to get results in a deterministic order.
Example:
If we had the following files on our filesystem:
- /path/to/dir/a.txt
- /path/to/dir/b.py
- /path/to/dir/c.py
If we pass "/path/to/dir/*.py" as the directory, the dataset
would produce:
- /path/to/dir/b.py
- /path/to/dir/c.py
Args:
file_pattern: A string, a list of strings, or a `tf.Tensor` of string type
(scalar or vector), representing the filename glob (i.e. shell wildcard)
pattern(s) that will be matched.
shuffle: (Optional.) If `True`, the file names will be shuffled randomly.
Defaults to `True`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
Returns:
Dataset: A `Dataset` of strings corresponding to file names.
"""
with ops.name_scope("list_files"):
if shuffle is None:
shuffle = True
file_pattern = ops.convert_to_tensor(
file_pattern, dtype=dtypes.string, name="file_pattern")
matching_files = gen_io_ops.matching_files(file_pattern)
# Raise an exception if `file_pattern` does not match any files.
condition = math_ops.greater(array_ops.shape(matching_files)[0], 0,
name="match_not_empty")
message = math_ops.add(
"No files matched pattern: ",
string_ops.reduce_join(file_pattern, separator=", "), name="message")
assert_not_empty = control_flow_ops.Assert(
condition, [message], summarize=1, name="assert_not_empty")
with ops.control_dependencies([assert_not_empty]):
matching_files = array_ops.identity(matching_files)
dataset = Dataset.from_tensor_slices(matching_files)
if shuffle:
# NOTE(mrry): The shuffle buffer size must be greater than zero, but the
# list of files might be empty.
buffer_size = math_ops.maximum(
array_ops.shape(matching_files, out_type=dtypes.int64)[0], 1)
dataset = dataset.shuffle(buffer_size, seed=seed)
return dataset
def repeat(self, count=None):
"""Repeats this dataset `count` times.
NOTE: If this dataset is a function of global state (e.g. a random number
generator), then different repetitions may produce different elements.
Args:
count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
number of times the dataset should be repeated. The default behavior (if
`count` is `None` or `-1`) is for the dataset be repeated indefinitely.
Returns:
Dataset: A `Dataset`.
"""
return RepeatDataset(self, count)
def enumerate(self, start=0):
"""Enumerates the elements of this dataset.
It is similar to python's `enumerate`.
For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
a = { 1, 2, 3 }
b = { (7, 8), (9, 10) }
# The nested structure of the `datasets` argument determines the
# structure of elements in the resulting dataset.
a.enumerate(start=5)) == { (5, 1), (6, 2), (7, 3) }
b.enumerate() == { (0, (7, 8)), (1, (9, 10)) }
```
Args:
start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
enumeration.
Returns:
Dataset: A `Dataset`.
"""
max_value = np.iinfo(dtypes.int64.as_numpy_dtype).max
return Dataset.zip((Dataset.range(start, max_value), self))
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
This dataset fills a buffer with `buffer_size` elements, then randomly
samples elements from this buffer, replacing the selected elements with new
elements. For perfect shuffling, a buffer size greater than or equal to the
full size of the dataset is required.
For instance, if your dataset contains 10,000 elements but `buffer_size` is
set to 1,000, then `shuffle` will initially select a random element from
only the first 1,000 elements in the buffer. Once an element is selected,
its space in the buffer is replaced by the next (i.e. 1,001-st) element,
maintaining the 1,000 element buffer.
`reshuffle_each_iteration` controls whether the shuffle order should be
different for each epoch. In TF 1.X, the idiomatic way to create epochs
was through the `repeat` transformation:
```python
d = tf.data.Dataset.range(3)
d = d.shuffle(3, reshuffle_each_iteration=True)
d = d.repeat(2) # ==> [ 1, 0, 2, 1, 2, 0 ]
d = tf.data.Dataset.range(3)
d = d.shuffle(3, reshuffle_each_iteration=False)
d = d.repeat(2) # ==> [ 1, 0, 2, 1, 0, 2 ]
```
In TF 2.0, tf.data.Dataset objects are Python iterables which makes it
possible to also create epochs through Python iteration:
```python
d = tf.data.Dataset.range(3)
d = d.shuffle(3, reshuffle_each_iteration=True)
for elem in d:
# ==> [ 1, 0, 2 ]
for elem in d:
# ==> [ 1, 2, 0 ]
d = tf.data.Dataset.range(3)
d = d.shuffle(3, reshuffle_each_iteration=False)
for elem in d:
# ==> [ 1, 0, 2 ]
for elem in d:
# ==> [ 1, 0, 2 ]
```
Args:
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements from this dataset from which the new dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
Dataset: A `Dataset`.
"""
return ShuffleDataset(self, buffer_size, seed, reshuffle_each_iteration)
def cache(self, filename=""):
"""Caches the elements in this dataset.
Args:
filename: A `tf.string` scalar `tf.Tensor`, representing the name of a
directory on the filesystem to use for caching elements in this Dataset.
If a filename is not provided, the dataset will be cached in memory.
Returns:
Dataset: A `Dataset`.
"""
return CacheDataset(self, filename)
def take(self, count):
"""Creates a `Dataset` with at most `count` elements from this dataset.
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be taken to form the new dataset.
If `count` is -1, or if `count` is greater than the size of this
dataset, the new dataset will contain all elements of this dataset.
Returns:
Dataset: A `Dataset`.
"""
return TakeDataset(self, count)
def skip(self, count):
"""Creates a `Dataset` that skips `count` elements from this dataset.
Args:
count: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements of this dataset that should be skipped to form the new dataset.
If `count` is greater than the size of this dataset, the new dataset
will contain no elements. If `count` is -1, skips the entire dataset.
Returns:
Dataset: A `Dataset`.
"""
return SkipDataset(self, count)
def shard(self, num_shards, index):
"""Creates a `Dataset` that includes only 1/`num_shards` of this dataset.
`shard` is a deterministic operator; the Dataset produced by
`A.shard(n, i)` will contain all elements of A whose index mod n = i.
```python
# Create a Dataset with 60 elements.
A = tf.data.Dataset.range(60) # ==> [0, 1, 2, 3, ..., 57, 58, 59]
# Create 3 Datasets, each with 20 elements from Dataset A.
B = A.shard(num_shards=3, index=0) # ==> [0, 3, 6, 9, ..., 51, 54, 57]
C = A.shard(num_shards=3, index=1) # ==> [1, 4, 7, 10, ..., 52, 55, 58]
D = A.shard(num_shards=3, index=2) # ==> [2, 5, 8, 11, ..., 53, 56, 59]
# There is no overlap between Datasets B, C and D.
```
This dataset operator is very useful when running distributed training, as
it allows each worker to read a unique subset.
When reading a single input file, you can skip elements as follows:
```python
d = tf.data.TFRecordDataset(input_file)
d = d.shard(num_workers, worker_index)
d = d.repeat(num_epochs)
d = d.shuffle(shuffle_buffer_size)
d = d.map(parser_fn, num_parallel_calls=num_map_threads)
```
Important caveats:
- Be sure to shard before you use any randomizing operator (such as
shuffle).
- Generally it is best if the shard operator is used early in the dataset
pipeline. For example, when reading from a set of TFRecord files, shard
before converting the dataset to input samples. This avoids reading every
file on every worker. The following is an example of an efficient
sharding strategy within a complete pipeline:
```python
d = Dataset.list_files(pattern)
d = d.shard(num_workers, worker_index)
d = d.repeat(num_epochs)
d = d.shuffle(shuffle_buffer_size)
d = d.interleave(tf.data.TFRecordDataset,
cycle_length=num_readers, block_length=1)
d = d.map(parser_fn, num_parallel_calls=num_map_threads)
```
Args:
num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of
shards operating in parallel.
index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.
Returns:
Dataset: A `Dataset`.
Raises:
InvalidArgumentError: if `num_shards` or `index` are illegal values.
Note: error checking is done on a best-effort basis, and errors aren't
guaranteed to be caught upon dataset creation. (e.g. providing in a
placeholder tensor bypasses the early checking, and will instead result
in an error during a session.run call.)
"""
return ShardDataset(self, num_shards, index)
def batch(self, batch_size, drop_remainder=False):
"""Combines consecutive elements of this dataset into batches.
The components of the resulting element will have an additional outer
dimension, which will be `batch_size` (or `N % batch_size` for the last
element if `batch_size` does not divide the number of input elements `N`
evenly and `drop_remainder` is `False`). If your program depends on the
batches having the same outer dimension, you should set the `drop_remainder`
argument to `True` to prevent the smaller batch from being produced.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
Dataset: A `Dataset`.
"""
return BatchDataset(self, batch_size, drop_remainder)
def padded_batch(self,
batch_size,
padded_shapes,
padding_values=None,
drop_remainder=False):
"""Combines consecutive elements of this dataset into padded batches.
This transformation combines multiple consecutive elements of the input
dataset into a single element.
Like `tf.data.Dataset.batch`, the components of the resulting element will
have an additional outer dimension, which will be `batch_size` (or
`N % batch_size` for the last element if `batch_size` does not divide the
number of input elements `N` evenly and `drop_remainder` is `False`). If
your program depends on the batches having the same outer dimension, you
should set the `drop_remainder` argument to `True` to prevent the smaller
batch from being produced.
Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
different shapes, and this transformation will pad each component to the
respective shape in `padding_shapes`. The `padding_shapes` argument
determines the resulting shape for each dimension of each component in an
output element:
* If the dimension is a constant (e.g. `tf.compat.v1.Dimension(37)`), the
component
will be padded out to that length in that dimension.
* If the dimension is unknown (e.g. `tf.compat.v1.Dimension(None)`), the
component
will be padded out to the maximum length of all elements in that
dimension.
See also `tf.data.experimental.dense_to_sparse_batch`, which combines
elements that may have different shapes into a `tf.SparseTensor`.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: A nested structure of `tf.TensorShape` or `tf.int64` vector
tensor-like objects representing the shape to which the respective
component of each input element should be padded prior to batching. Any
unknown dimensions (e.g. `tf.compat.v1.Dimension(None)` in a
`tf.TensorShape` or `-1` in a tensor-like object) will be padded to the
maximum size of that dimension in each batch.
padding_values: (Optional.) A nested structure of scalar-shaped
`tf.Tensor`, representing the padding values to use for the respective
components. Defaults are `0` for numeric types and the empty string for
string types.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether the last batch should be dropped in the case it has fewer than
`batch_size` elements; the default behavior is not to drop the smaller
batch.
Returns:
Dataset: A `Dataset`.
"""
return PaddedBatchDataset(self, batch_size, padded_shapes, padding_values,
drop_remainder)
def map(self, map_func, num_parallel_calls=None):
"""Maps `map_func` across the elements of this dataset.
This transformation applies `map_func` to each element of this dataset, and
returns a new dataset containing the transformed elements, in the same
order as they appeared in the input.
For example:
```python
a = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
a.map(lambda x: x + 1) # ==> [ 2, 3, 4, 5, 6 ]
```
The input signature of `map_func` is determined by the structure of each
element in this dataset. For example:
```python
# NOTE: The following examples use `{ ... }` to represent the
# contents of a dataset.
# Each element is a `tf.Tensor` object.
a = { 1, 2, 3, 4, 5 }
# `map_func` takes a single argument of type `tf.Tensor` with the same
# shape and dtype.
result = a.map(lambda x: ...)
# Each element is a tuple containing two `tf.Tensor` objects.
b = { (1, "foo"), (2, "bar"), (3, "baz") }
# `map_func` takes two arguments of type `tf.Tensor`.
result = b.map(lambda x_int, y_str: ...)
# Each element is a dictionary mapping strings to `tf.Tensor` objects.
c = { {"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}, {"a": 3, "b": "baz"} }
# `map_func` takes a single argument of type `dict` with the same keys as
# the elements.
result = c.map(lambda d: ...)
```
The value or values returned by `map_func` determine the structure of each
element in the returned dataset.
```python
# `map_func` returns a scalar `tf.Tensor` of type `tf.float32`.
def f(...):
return tf.constant(37.0)
result = dataset.map(f)
result.output_classes == tf.Tensor
result.output_types == tf.float32
result.output_shapes == [] # scalar
# `map_func` returns two `tf.Tensor` objects.
def g(...):
return tf.constant(37.0), tf.constant(["Foo", "Bar", "Baz"])
result = dataset.map(g)
result.output_classes == (tf.Tensor, tf.Tensor)
result.output_types == (tf.float32, tf.string)
result.output_shapes == ([], [3])
# Python primitives, lists, and NumPy arrays are implicitly converted to
# `tf.Tensor`.
def h(...):
return 37.0, ["Foo", "Bar", "Baz"], np.array([1.0, 2.0] dtype=np.float64)
result = dataset.map(h)
result.output_classes == (tf.Tensor, tf.Tensor, tf.Tensor)
result.output_types == (tf.float32, tf.string, tf.float64)
result.output_shapes == ([], [3], [2])
# `map_func` can return nested structures.
def i(...):
return {"a": 37.0, "b": [42, 16]}, "foo"
result.output_classes == ({"a": tf.Tensor, "b": tf.Tensor}, tf.Tensor)
result.output_types == ({"a": tf.float32, "b": tf.int32}, tf.string)
result.output_shapes == ({"a": [], "b": [2]}, [])
```
`map_func` can accept as arguments and return any type of dataset element.
Note that irrespective of the context in which `map_func` is defined (eager
vs. graph), tf.data traces the function and executes it as a graph. To use
Python code inside of the function you have two options:
1) Rely on AutoGraph to convert Python code into an equivalent graph
computation. The downside of this approach is that AutoGraph can convert
some but not all Python code.
2) Use `tf.py_function`, which allows you to write arbitrary Python code but
will generally result in worse performance than 1). For example:
```python
d = tf.data.Dataset.from_tensor_slices(['hello', 'world'])
# transform a string tensor to upper case string using a Python function
def upper_case_fn(t: tf.Tensor) -> str:
return t.numpy().decode('utf-8').upper()
d.map(lambda x: tf.py_function(func=upper_case_fn,
inp=[x], Tout=tf.string)) # ==> [ "HELLO", "WORLD" ]
```
Args:
map_func: A function mapping a dataset element to another dataset element.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If not specified, elements will be processed sequentially. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return MapDataset(self, map_func, preserve_cardinality=True)
else:
return ParallelMapDataset(
self, map_func, num_parallel_calls, preserve_cardinality=True)
def flat_map(self, map_func):
"""Maps `map_func` across this dataset and flattens the result.
Use `flat_map` if you want to make sure that the order of your dataset
stays the same. For example, to flatten a dataset of batches into a
dataset of their elements:
```python
a = Dataset.from_tensor_slices([ [1, 2, 3], [4, 5, 6], [7, 8, 9] ])
a.flat_map(lambda x: Dataset.from_tensor_slices(x + 1)) # ==>
# [ 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
```
`tf.data.Dataset.interleave()` is a generalization of `flat_map`, since
`flat_map` produces the same output as
`tf.data.Dataset.interleave(cycle_length=1)`
Args:
map_func: A function mapping a dataset element to a dataset.
Returns:
Dataset: A `Dataset`.
"""
return FlatMapDataset(self, map_func)
def interleave(self,
map_func,
cycle_length=AUTOTUNE,
block_length=1,
num_parallel_calls=None):
"""Maps `map_func` across this dataset, and interleaves the results.
For example, you can use `Dataset.interleave()` to process many input files
concurrently:
```python
# Preprocess 4 files concurrently, and interleave blocks of 16 records from
# each file.
filenames = ["/var/data/file1.txt", "/var/data/file2.txt", ...]
dataset = (Dataset.from_tensor_slices(filenames)
.interleave(lambda x:
TextLineDataset(x).map(parse_fn, num_parallel_calls=1),
cycle_length=4, block_length=16))
```
The `cycle_length` and `block_length` arguments control the order in which
elements are produced. `cycle_length` controls the number of input elements
that are processed concurrently. If you set `cycle_length` to 1, this
transformation will handle one input element at a time, and will produce
identical results to `tf.data.Dataset.flat_map`. In general,
this transformation will apply `map_func` to `cycle_length` input elements,
open iterators on the returned `Dataset` objects, and cycle through them
producing `block_length` consecutive elements from each iterator, and
consuming the next input element each time it reaches the end of an
iterator.
For example:
```python
a = Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
# NOTE: New lines indicate "block" boundaries.
a.interleave(lambda x: Dataset.from_tensors(x).repeat(6),
cycle_length=2, block_length=4) # ==> [1, 1, 1, 1,
# 2, 2, 2, 2,
# 1, 1,
# 2, 2,
# 3, 3, 3, 3,
# 4, 4, 4, 4,
# 3, 3,
# 4, 4,
# 5, 5, 5, 5,
# 5, 5]
```
NOTE: The order of elements yielded by this transformation is
deterministic, as long as `map_func` is a pure function. If
`map_func` contains any stateful operations, the order in which
that state is accessed is undefined.
Args:
map_func: A function mapping a dataset element to a dataset.
cycle_length: (Optional.) The number of input elements that will be
processed concurrently. If not specified, the value will be derived from
the number of available CPU cores. If the `num_parallel_calls` argument
is set to `tf.data.experimental.AUTOTUNE`, the `cycle_length` argument
also identifies the maximum degree of parallelism.
block_length: (Optional.) The number of consecutive elements to produce
from each input element before cycling to another input element.
num_parallel_calls: (Optional.) If specified, the implementation creates a
threadpool, which is used to fetch inputs from cycle elements
asynchronously and in parallel. The default behavior is to fetch inputs
from cycle elements synchronously with no parallelism. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return InterleaveDataset(self, map_func, cycle_length, block_length)
else:
return ParallelInterleaveDataset(self, map_func, cycle_length,
block_length, num_parallel_calls)
def filter(self, predicate):
"""Filters this dataset according to `predicate`.
```python
d = tf.data.Dataset.from_tensor_slices([1, 2, 3])
d = d.filter(lambda x: x < 3) # ==> [1, 2]
# `tf.math.equal(x, y)` is required for equality comparison
def filter_fn(x):
return tf.math.equal(x, 1)
d = d.filter(filter_fn) # ==> [1]
```
Args:
predicate: A function mapping a dataset element to a boolean.
Returns:
Dataset: The `Dataset` containing the elements of this dataset for which
`predicate` is `True`.
"""
return FilterDataset(self, predicate)
def apply(self, transformation_func):
"""Applies a transformation function to this dataset.
`apply` enables chaining of custom `Dataset` transformations, which are
represented as functions that take one `Dataset` argument and return a
transformed `Dataset`.
For example:
```
dataset = (dataset.map(lambda x: x ** 2)
.apply(group_by_window(key_func, reduce_func, window_size))
.map(lambda x: x ** 3))
```
Args:
transformation_func: A function that takes one `Dataset` argument and
returns a `Dataset`.
Returns:
Dataset: The `Dataset` returned by applying `transformation_func` to this
dataset.
"""
dataset = transformation_func(self)
if not isinstance(dataset, DatasetV2):
raise TypeError(
"`transformation_func` must return a Dataset. Got {}.".format(
dataset))
dataset._input_datasets = [self] # pylint: disable=protected-access
return dataset
def window(self, size, shift=None, stride=1, drop_remainder=False):
"""Combines (nests of) input elements into a dataset of (nests of) windows.
A "window" is a finite dataset of flat elements of size `size` (or possibly
fewer if there are not enough input elements to fill the window and
`drop_remainder` evaluates to false).
The `stride` argument determines the stride of the input elements, and the
`shift` argument determines the shift of the window.
For example, letting {...} to represent a Dataset:
- `tf.data.Dataset.range(7).window(2)` produces
`{{0, 1}, {2, 3}, {4, 5}, {6}}`
- `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces
`{{0, 1, 2}, {2, 3, 4}, {4, 5, 6}}`
- `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces
`{{0, 2, 4}, {1, 3, 5}, {2, 4, 6}}`
Note that when the `window` transformation is applied to a dataset of
nested elements, it produces a dataset of nested windows.
For example:
- `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)`
produces `{({0, 1}, {0, 1}), ({2, 3}, {2, 3})}`
- `tf.data.Dataset.from_tensor_slices({"a": range(4)}).window(2)`
produces `{{"a": {0, 1}}, {"a": {2, 3}}}`
Args:
size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements
of the input dataset to combine into a window.
shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
forward shift of the sliding window in each iteration. Defaults to
`size`.
stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
stride of the input elements in the sliding window.
drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
whether a window should be dropped in case its size is smaller than
`window_size`.
Returns:
Dataset: A `Dataset` of (nests of) windows -- a finite datasets of flat
elements created from the (nests of) input elements.
"""
if shift is None:
shift = size
return WindowDataset(self, size, shift, stride, drop_remainder)
def reduce(self, initial_state, reduce_func):
"""Reduces the input dataset to a single element.
The transformation calls `reduce_func` successively on every element of
the input dataset until the dataset is exhausted, aggregating information in
its internal state. The `initial_state` argument is used for the initial
state and the final state is returned as the result.
For example:
- `tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x + 1)`
produces `5`
- `tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x + y)`
produces `10`
Args:
initial_state: An element representing the initial state of the
transformation.
reduce_func: A function that maps `(old_state, input_element)` to
`new_state`. It must take two arguments and return a new element
The structure of `new_state` must match the structure of
`initial_state`.
Returns:
A dataset element corresponding to the final state of the transformation.
"""
with ops.name_scope("initial_state"):
initial_state = structure.normalize_element(initial_state)
state_structure = structure.type_spec_from_value(initial_state)
# Iteratively rerun the reduce function until reaching a fixed point on
# `state_structure`.
need_to_rerun = True
while need_to_rerun:
wrapped_func = StructuredFunctionWrapper(
reduce_func,
"reduce()",
input_structure=(state_structure, self.element_spec),
add_to_graph=False)
# Extract and validate class information from the returned values.
output_classes = wrapped_func.output_classes
state_classes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
state_structure)
for new_state_class, state_class in zip(
nest.flatten(output_classes), nest.flatten(state_classes)):
if not issubclass(new_state_class, state_class):
raise TypeError(
"The element classes for the new state must match the initial "
"state. Expected %s; got %s." %
(state_classes, wrapped_func.output_classes))
# Extract and validate type information from the returned values.
output_types = wrapped_func.output_types
state_types = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
state_structure)
for new_state_type, state_type in zip(
nest.flatten(output_types), nest.flatten(state_types)):
if new_state_type != state_type:
raise TypeError(
"The element types for the new state must match the initial "
"state. Expected %s; got %s." %
(state_types, wrapped_func.output_types))
# Extract shape information from the returned values.
output_shapes = wrapped_func.output_shapes
state_shapes = nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
state_structure)
flat_state_shapes = nest.flatten(state_shapes)
flat_new_state_shapes = nest.flatten(output_shapes)
weakened_state_shapes = [
original.most_specific_compatible_shape(new)
for original, new in zip(flat_state_shapes, flat_new_state_shapes)
]
need_to_rerun = False
for original_shape, weakened_shape in zip(flat_state_shapes,
weakened_state_shapes):
if original_shape.ndims is not None and (
weakened_shape.ndims is None or
original_shape.as_list() != weakened_shape.as_list()):
need_to_rerun = True
break
if need_to_rerun:
# TODO(b/110122868): Support a "most specific compatible structure"
# method for combining structures, to avoid using legacy structures
# here.
state_structure = structure.convert_legacy_structure(
state_types,
nest.pack_sequence_as(state_shapes, weakened_state_shapes),
state_classes)
reduce_func = wrapped_func.function
reduce_func.add_to_graph(ops.get_default_graph())
# TODO(b/141256846): Apply options once optimizing stateful input pipelines
# in tf.functions is supported.
# dataset = self._apply_options()
dataset = self
# pylint: disable=protected-access
return structure.from_compatible_tensor_list(
state_structure,
gen_dataset_ops.reduce_dataset(
dataset._variant_tensor,
structure.to_tensor_list(state_structure, initial_state),
reduce_func.captured_inputs,
f=reduce_func,
output_shapes=structure.get_flat_tensor_shapes(state_structure),
output_types=structure.get_flat_tensor_types(state_structure)))
def unbatch(self):
"""Splits elements of a dataset into multiple elements.
For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
where `B` may vary for each input element, then for each element in the
dataset, the unbatched dataset will contain `B` consecutive elements
of shape `[a0, a1, ...]`.
```python
# NOTE: The following example uses `{ ... }` to represent the contents
# of a dataset.
ds = { ['a', 'b', 'c'], ['a', 'b'], ['a', 'b', 'c', 'd'] }
ds.unbatch() == {'a', 'b', 'c', 'a', 'b', 'a', 'b', 'c', 'd'}
```
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
# NOTE(mrry): We must ensure that any non-tensor components in `dataset`
# are normalized to their dense tensor representation, so that the
# non-tensor oblivious unbatching logic will slice them appropriately.
# This leads to a somewhat inefficient re-encoding step for all non-tensor
# components.
#
# TODO(mrry): Consider optimizing this if it turns out to be a bottleneck.
def normalize(arg, *rest):
# pylint: disable=protected-access
if rest:
return structure.to_batched_tensor_list(self.element_spec,
(arg,) + rest)
else:
return structure.to_batched_tensor_list(self.element_spec, arg)
normalized_dataset = self.map(normalize)
# NOTE(mrry): Our `map()` has lost information about the structure of
# non-tensor components, so re-apply the structure of the original dataset.
restructured_dataset = _RestructuredDataset(normalized_dataset,
self.element_spec)
return _UnbatchDataset(restructured_dataset)
def with_options(self, options):
"""Returns a new `tf.data.Dataset` with the given options set.
The options are "global" in the sense they apply to the entire dataset.
If options are set multiple times, they are merged as long as different
options do not use different non-default values.
Args:
options: A `tf.data.Options` that identifies the options the use.
Returns:
Dataset: A `Dataset` with the given options.
Raises:
ValueError: when an option is set more than once to a non-default value
"""
return _OptionsDataset(self, options)
@tf_export(v1=["data.Dataset"])
class DatasetV1(DatasetV2):
"""Represents a potentially large set of elements.
A `Dataset` can be used to represent an input pipeline as a
collection of elements and a "logical plan" of transformations that act on
those elements.
"""
def __init__(self):
try:
variant_tensor = self._as_variant_tensor()
except AttributeError as e:
if "_as_variant_tensor" in str(e):
raise AttributeError("Please use _variant_tensor instead of "
"_as_variant_tensor() to obtain the variant "
"associated with a dataset")
raise AttributeError("{}: A likely cause of this error is that the super "
"call for this dataset is not the last line of the "
"__init__ method. The base class causes the "
"_as_variant_tensor call in its constructor and "
"if that uses attributes defined in the __init__ "
"method, those attrs need to be defined before the "
"super call.".format(e))
super(DatasetV1, self).__init__(variant_tensor)
@abc.abstractmethod
def _as_variant_tensor(self):
"""Creates a scalar `tf.Tensor` of `tf.variant` representing this dataset.
Returns:
A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.
"""
raise NotImplementedError("Dataset._as_variant_tensor")
@deprecation.deprecated(
None, "Use `for ... in dataset:` to iterate over a dataset. If using "
"`tf.estimator`, return the `Dataset` object directly from your input "
"function. As a last resort, you can use "
"`tf.compat.v1.data.make_one_shot_iterator(dataset)`.")
def make_one_shot_iterator(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not currently support re-initialization.
Returns:
An `Iterator` over the elements of this dataset.
"""
return self._make_one_shot_iterator()
def _make_one_shot_iterator(self): # pylint: disable=missing-docstring
if context.executing_eagerly():
return iterator_ops.IteratorV2(self)
_ensure_same_dataset_graph(self)
# Now that we create datasets at python object creation time, the capture
# by value _make_dataset() function would try to capture these variant
# tensor dataset inputs, which are marked as stateful ops and would throw
# an error if we try and capture them. We therefore traverse the graph
# to find all these ops and whitelist them so that the capturing
# logic instead of throwing an error recreates these ops which is what was
# happening before.
all_ds_ops = traverse.obtain_all_variant_tensor_ops(self)
graph_level_seed, op_level_seed = core_random_seed.get_seed(None)
# NOTE(mrry): We capture by value here to ensure that `_make_dataset()` is
# a 0-argument function.
@function.Defun(capture_by_value=True, whitelisted_stateful_ops=all_ds_ops)
def _make_dataset():
"""Factory function for a dataset."""
# NOTE(mrry): `Defun` does not capture the graph-level seed from the
# enclosing graph, so if a graph-level seed is present we set the local
# graph seed based on a combination of the graph- and op-level seeds.
if graph_level_seed is not None:
assert op_level_seed is not None
core_random_seed.set_random_seed(
(graph_level_seed + 87654321 * op_level_seed) % (2 ** 63 - 1))
dataset = self._apply_options()
return dataset._variant_tensor # pylint: disable=protected-access
try:
_make_dataset.add_to_graph(ops.get_default_graph())
except ValueError as err:
if "Cannot capture a stateful node" in str(err):
raise ValueError(
"Failed to create a one-shot iterator for a dataset. "
"`Dataset.make_one_shot_iterator()` does not support datasets that "
"capture stateful objects, such as a `Variable` or `LookupTable`. "
"In these cases, use `Dataset.make_initializable_iterator()`. "
"(Original error: %s)" % err)
else:
six.reraise(ValueError, err)
# pylint: disable=protected-access
return iterator_ops.Iterator(
gen_dataset_ops.one_shot_iterator(
dataset_factory=_make_dataset, **self._flat_structure), None,
get_legacy_output_types(self), get_legacy_output_shapes(self),
get_legacy_output_classes(self))
@deprecation.deprecated(
None, "Use `for ... in dataset:` to iterate over a dataset. If using "
"`tf.estimator`, return the `Dataset` object directly from your input "
"function. As a last resort, you can use "
"`tf.compat.v1.data.make_initializable_iterator(dataset)`.")
def make_initializable_iterator(self, shared_name=None):
"""Creates an `Iterator` for enumerating the elements of this dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = dataset.make_initializable_iterator()
# ...
sess.run(iterator.initializer)
```
Args:
shared_name: (Optional.) If non-empty, the returned iterator will be
shared under the given name across multiple sessions that share the same
devices (e.g. when using a remote server).
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If eager execution is enabled.
"""
return self._make_initializable_iterator(shared_name)
def _make_initializable_iterator(self, shared_name=None): # pylint: disable=missing-docstring
if context.executing_eagerly():
raise RuntimeError(
"dataset.make_initializable_iterator is not supported when eager "
"execution is enabled. Use `for element in dataset` instead.")
_ensure_same_dataset_graph(self)
dataset = self._apply_options()
if shared_name is None:
shared_name = ""
iterator_resource = gen_dataset_ops.iterator_v2(
container="", shared_name=shared_name, **self._flat_structure)
with ops.colocate_with(iterator_resource):
initializer = gen_dataset_ops.make_iterator(
dataset._variant_tensor, # pylint: disable=protected-access
iterator_resource)
# pylint: disable=protected-access
return iterator_ops.Iterator(
iterator_resource, initializer, get_legacy_output_types(dataset),
get_legacy_output_shapes(dataset), get_legacy_output_classes(dataset))
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_classes(dataset)`.")
def output_classes(self):
"""Returns the class of each component of an element of this dataset.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self.element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_shapes(dataset)`.")
def output_shapes(self):
"""Returns the shape of each component of an element of this dataset.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self.element_spec)
@property
@deprecation.deprecated(
None, "Use `tf.compat.v1.data.get_output_types(dataset)`.")
def output_types(self):
"""Returns the type of each component of an element of this dataset.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self.element_spec)
@property
def element_spec(self):
# TODO(b/110122868): Remove this override once all `Dataset` instances
# implement `element_structure`.
return structure.convert_legacy_structure(
self.output_types, self.output_shapes, self.output_classes)
@staticmethod
@functools.wraps(DatasetV2.from_tensors)
def from_tensors(tensors):
return DatasetV1Adapter(DatasetV2.from_tensors(tensors))
@staticmethod
@functools.wraps(DatasetV2.from_tensor_slices)
def from_tensor_slices(tensors):
return DatasetV1Adapter(DatasetV2.from_tensor_slices(tensors))
@staticmethod
@deprecation.deprecated(None, "Use `tf.data.Dataset.from_tensor_slices()`.")
def from_sparse_tensor_slices(sparse_tensor):
"""Splits each rank-N `tf.SparseTensor` in this dataset row-wise.
Args:
sparse_tensor: A `tf.SparseTensor`.
Returns:
Dataset: A `Dataset` of rank-(N-1) sparse tensors.
"""
return DatasetV1Adapter(SparseTensorSliceDataset(sparse_tensor))
@staticmethod
@functools.wraps(DatasetV2.from_generator)
def from_generator(generator, output_types, output_shapes=None, args=None):
return DatasetV1Adapter(DatasetV2.from_generator(
generator, output_types, output_shapes, args))
@staticmethod
@functools.wraps(DatasetV2.range)
def range(*args):
return DatasetV1Adapter(DatasetV2.range(*args))
@staticmethod
@functools.wraps(DatasetV2.zip)
def zip(datasets):
return DatasetV1Adapter(DatasetV2.zip(datasets))
@functools.wraps(DatasetV2.concatenate)
def concatenate(self, dataset):
return DatasetV1Adapter(super(DatasetV1, self).concatenate(dataset))
@functools.wraps(DatasetV2.prefetch)
def prefetch(self, buffer_size):
return DatasetV1Adapter(super(DatasetV1, self).prefetch(buffer_size))
@staticmethod
@functools.wraps(DatasetV2.list_files)
def list_files(file_pattern, shuffle=None, seed=None):
return DatasetV1Adapter(DatasetV2.list_files(file_pattern, shuffle, seed))
@functools.wraps(DatasetV2.repeat)
def repeat(self, count=None):
return DatasetV1Adapter(super(DatasetV1, self).repeat(count))
@functools.wraps(DatasetV2.shuffle)
def shuffle(self, buffer_size, seed=None, reshuffle_each_iteration=None):
return DatasetV1Adapter(super(DatasetV1, self).shuffle(
buffer_size, seed, reshuffle_each_iteration))
@functools.wraps(DatasetV2.cache)
def cache(self, filename=""):
return DatasetV1Adapter(super(DatasetV1, self).cache(filename))
@functools.wraps(DatasetV2.take)
def take(self, count):
return DatasetV1Adapter(super(DatasetV1, self).take(count))
@functools.wraps(DatasetV2.skip)
def skip(self, count):
return DatasetV1Adapter(super(DatasetV1, self).skip(count))
@functools.wraps(DatasetV2.shard)
def shard(self, num_shards, index):
return DatasetV1Adapter(super(DatasetV1, self).shard(num_shards, index))
@functools.wraps(DatasetV2.batch)
def batch(self, batch_size, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).batch(
batch_size, drop_remainder))
@functools.wraps(DatasetV2.padded_batch)
def padded_batch(self,
batch_size,
padded_shapes,
padding_values=None,
drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).padded_batch(
batch_size, padded_shapes, padding_values, drop_remainder))
@functools.wraps(DatasetV2.map)
def map(self, map_func, num_parallel_calls=None):
if num_parallel_calls is None:
return DatasetV1Adapter(
MapDataset(self, map_func, preserve_cardinality=False))
else:
return DatasetV1Adapter(
ParallelMapDataset(
self, map_func, num_parallel_calls, preserve_cardinality=False))
@deprecation.deprecated(None, "Use `tf.data.Dataset.map()")
def map_with_legacy_function(self, map_func, num_parallel_calls=None):
"""Maps `map_func` across the elements of this dataset.
NOTE: This is an escape hatch for existing uses of `map` that do not work
with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `map` as this method will be removed in V2.
Args:
map_func: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to
another nested structure of tensors.
num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
representing the number elements to process asynchronously in parallel.
If not specified, elements will be processed sequentially. If the value
`tf.data.experimental.AUTOTUNE` is used, then the number of parallel
calls is set dynamically based on available CPU.
Returns:
Dataset: A `Dataset`.
"""
if num_parallel_calls is None:
return DatasetV1Adapter(
MapDataset(
self,
map_func,
preserve_cardinality=False,
use_legacy_function=True))
else:
return DatasetV1Adapter(
ParallelMapDataset(
self,
map_func,
num_parallel_calls,
preserve_cardinality=False,
use_legacy_function=True))
@functools.wraps(DatasetV2.flat_map)
def flat_map(self, map_func):
return DatasetV1Adapter(super(DatasetV1, self).flat_map(map_func))
@functools.wraps(DatasetV2.interleave)
def interleave(self,
map_func,
cycle_length=AUTOTUNE,
block_length=1,
num_parallel_calls=None):
return DatasetV1Adapter(super(DatasetV1, self).interleave(
map_func, cycle_length, block_length, num_parallel_calls))
@functools.wraps(DatasetV2.filter)
def filter(self, predicate):
return DatasetV1Adapter(super(DatasetV1, self).filter(predicate))
@deprecation.deprecated(None, "Use `tf.data.Dataset.filter()")
def filter_with_legacy_function(self, predicate):
"""Filters this dataset according to `predicate`.
NOTE: This is an escape hatch for existing uses of `filter` that do not work
with V2 functions. New uses are strongly discouraged and existing uses
should migrate to `filter` as this method will be removed in V2.
Args:
predicate: A function mapping a nested structure of tensors (having shapes
and types defined by `self.output_shapes` and `self.output_types`) to a
scalar `tf.bool` tensor.
Returns:
Dataset: The `Dataset` containing the elements of this dataset for which
`predicate` is `True`.
"""
return FilterDataset(self, predicate, use_legacy_function=True)
@functools.wraps(DatasetV2.apply)
def apply(self, transformation_func):
return DatasetV1Adapter(super(DatasetV1, self).apply(transformation_func))
@functools.wraps(DatasetV2.window)
def window(self, size, shift=None, stride=1, drop_remainder=False):
return DatasetV1Adapter(super(DatasetV1, self).window(
size, shift, stride, drop_remainder))
@functools.wraps(DatasetV2.with_options)
def with_options(self, options):
return DatasetV1Adapter(super(DatasetV1, self).with_options(options))
if tf2.enabled():
Dataset = DatasetV2
else:
Dataset = DatasetV1
class DatasetV1Adapter(DatasetV1):
"""Wraps a V2 `Dataset` object in the `tf.compat.v1.data.Dataset` API."""
def __init__(self, dataset):
self._dataset = dataset
super(DatasetV1Adapter, self).__init__()
def _as_variant_tensor(self):
return self._dataset._variant_tensor # pylint: disable=protected-access
def _has_captured_ref(self):
return self._dataset._has_captured_ref() # pylint: disable=protected-access
def _inputs(self):
return self._dataset._inputs() # pylint: disable=protected-access
def _functions(self):
return self._dataset._functions() # pylint: disable=protected-access
def options(self):
return self._dataset.options()
@property
def element_spec(self):
return self._dataset.element_spec # pylint: disable=protected-access
def __iter__(self):
return iter(self._dataset)
def _ensure_same_dataset_graph(dataset):
"""Walks the dataset graph to ensure all datasets come from the same graph."""
current_graph = ops.get_default_graph()
bfs_q = Queue.Queue()
bfs_q.put(dataset) # pylint: disable=protected-access
visited = []
while not bfs_q.empty():
ds = bfs_q.get()
visited.append(ds)
ds_graph = ds._graph # pylint: disable=protected-access
if current_graph != ds_graph:
logging.warning("The graph (" + str(current_graph) + ") of the iterator "
"is different from the graph (" + str(ds_graph) + ") "
"the dataset: " + str(ds._variant_tensor) + " was " # pylint: disable=protected-access
"created in. If you are using the Estimator API, "
"make sure that no part of the dataset returned by the "
"`input_fn` function is defined outside the `input_fn` "
"function. Please ensure that all datasets in the "
"pipeline are created in the same graph as the iterator. "
"NOTE: This warning will become an error in future "
"versions of TensorFlow.")
for input_ds in ds._inputs(): # pylint: disable=protected-access
if input_ds not in visited:
bfs_q.put(input_ds)
@tf_export(v1=["data.make_one_shot_iterator"])
def make_one_shot_iterator(dataset):
"""Creates a `tf.compat.v1.data.Iterator` for enumerating the elements of a dataset.
Note: The returned iterator will be initialized automatically.
A "one-shot" iterator does not support re-initialization.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A `tf.compat.v1.data.Iterator` over the elements of this dataset.
"""
try:
# Call the defined `_make_one_shot_iterator()` if there is one, because some
# datasets (e.g. for prefetching) override its behavior.
return dataset._make_one_shot_iterator() # pylint: disable=protected-access
except AttributeError:
return DatasetV1Adapter(dataset)._make_one_shot_iterator() # pylint: disable=protected-access
@tf_export(v1=["data.make_initializable_iterator"])
def make_initializable_iterator(dataset, shared_name=None):
"""Creates a `tf.compat.v1.data.Iterator` for enumerating the elements of a dataset.
Note: The returned iterator will be in an uninitialized state,
and you must run the `iterator.initializer` operation before using it:
```python
dataset = ...
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
# ...
sess.run(iterator.initializer)
```
Args:
dataset: A `tf.data.Dataset`.
shared_name: (Optional.) If non-empty, the returned iterator will be shared
under the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
Returns:
A `tf.compat.v1.data.Iterator` over the elements of `dataset`.
Raises:
RuntimeError: If eager execution is enabled.
"""
try:
# Call the defined `_make_initializable_iterator()` if there is one, because
# some datasets (e.g. for prefetching) override its behavior.
return dataset._make_initializable_iterator(shared_name) # pylint: disable=protected-access
except AttributeError:
return DatasetV1Adapter(dataset)._make_initializable_iterator(shared_name) # pylint: disable=protected-access
@tf_export("data.experimental.get_structure")
def get_structure(dataset_or_iterator):
"""Returns the type specification of an element of a `Dataset` or `Iterator`.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of `tf.TypeSpec` objects matching the structure of an
element of `dataset_or_iterator` and spacifying the type of individal
components.
Raises:
TypeError: If `dataset_or_iterator` is not a `Dataset` or `Iterator` object.
"""
try:
return dataset_or_iterator.element_spec # pylint: disable=protected-access
except AttributeError:
raise TypeError("`dataset_or_iterator` must be a Dataset or Iterator "
"object, but got %s." % type(dataset_or_iterator))
@tf_export(v1=["data.get_output_classes"])
def get_legacy_output_classes(dataset_or_iterator):
"""Returns the output classes of a `Dataset` or `Iterator` elements.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_classes` property.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.IteratorV2`.
Returns:
A nested structure of Python `type` objects matching the structure of the
dataset / iterator elements and specifying the class of the individual
components.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
get_structure(dataset_or_iterator))
@tf_export(v1=["data.get_output_shapes"])
def get_legacy_output_shapes(dataset_or_iterator):
"""Returns the output shapes of a `Dataset` or `Iterator` elements.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_shapes` property.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of `tf.TensorShape` objects matching the structure of
the dataset / iterator elements and specifying the shape of the individual
components.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
get_structure(dataset_or_iterator))
@tf_export(v1=["data.get_output_types"])
def get_legacy_output_types(dataset_or_iterator):
"""Returns the output shapes of a `Dataset` or `Iterator` elements.
This utility method replaces the deprecated-in-V2
`tf.compat.v1.Dataset.output_types` property.
Args:
dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.
Returns:
A nested structure of `tf.DType` objects objects matching the structure of
dataset / iterator elements and specifying the shape of the individual
components.
"""
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
get_structure(dataset_or_iterator))
@tf_export("data.Options")
class Options(options_lib.OptionsBase):
"""Represents options for tf.data.Dataset.
An `Options` object can be, for instance, used to control which static
optimizations to apply or whether to use performance modeling to dynamically
tune the parallelism of operations such as `tf.data.Dataset.map` or
`tf.data.Dataset.interleave`.
"""
experimental_deterministic = options_lib.create_option(
name="experimental_deterministic",
ty=bool,
docstring=
"Whether the outputs need to be produced in deterministic order. If None,"
" defaults to True.")
experimental_distribute = options_lib.create_option(
name="experimental_distribute",
ty=distribute_options.DistributeOptions,
docstring=
"The distribution strategy options associated with the dataset. See "
"`tf.data.experimental.DistributeOptions` for more details.",
default_factory=distribute_options.DistributeOptions)
experimental_optimization = options_lib.create_option(
name="experimental_optimization",
ty=optimization_options.OptimizationOptions,
docstring=
"The optimization options associated with the dataset. See "
"`tf.data.experimental.OptimizationOptions` for more details.",
default_factory=optimization_options.OptimizationOptions)
experimental_slack = options_lib.create_option(
name="experimental_slack",
ty=bool,
docstring="Whether to introduce 'slack' in the last `prefetch` of the "
"input pipeline, if it exists. This may reduce CPU contention with "
"accelerator host-side activity at the start of a step. The slack "
"frequency is determined by the number of devices attached to this "
"input pipeline. If None, defaults to False.")
experimental_stats = options_lib.create_option(
name="experimental_stats",
ty=stats_options.StatsOptions,
docstring=
"The statistics options associated with the dataset. See "
"`tf.data.experimental.StatsOptions` for more details.",
default_factory=stats_options.StatsOptions)
experimental_threading = options_lib.create_option(
name="experimental_threading",
ty=threading_options.ThreadingOptions,
docstring=
"The threading options associated with the dataset. See "
"`tf.data.experimental.ThreadingOptions` for more details.",
default_factory=threading_options.ThreadingOptions)
experimental_allow_stateful = options_lib.create_option(
name="experimental_allow_stateful",
ty=bool,
docstring="By default, tf.data will refuse to serialize a dataset or "
"checkpoint its iterator if the dataset contains a stateful op as the "
"serialization / checkpointing won't be able to capture its state. "
"Users can -- at their own risk -- override this restriction by "
"explicitly specifying that they are fine throwing away the state "
"in these ops when they turn this option on.")
def _static_optimizations(self):
"""Produces the list of enabled static optimizations."""
result = []
result.extend(self.experimental_optimization._static_optimizations()) # pylint: disable=protected-access
if self.experimental_deterministic is False:
result.append("make_sloppy")
if self.experimental_stats and self.experimental_stats.latency_all_edges:
result.append("latency_all_edges")
if self.experimental_slack:
result.append("slack")
if (self.experimental_distribute and
self.experimental_distribute._make_stateless): # pylint: disable=protected-access
result.append("make_stateless")
return result
def _static_optimization_configs(self):
"""Produces the list of configurations for enabled static optimizations."""
result = []
if self.experimental_optimization:
result.extend(
self.experimental_optimization._static_optimization_configs()) # pylint: disable=protected-access
if self.experimental_slack:
num_devices = self.experimental_distribute.num_devices
if num_devices is None:
num_devices = 1
result.append("slack:slack_period:%d" % num_devices)
return result
def merge(self, options):
"""Merges itself with the given `tf.data.Options`.
The given `tf.data.Options` can be merged as long as there does not exist an
attribute that is set to different values in `self` and `options`.
Args:
options: a `tf.data.Options` to merge with
Raises:
ValueError: if the given `tf.data.Options` cannot be merged
Returns:
New `tf.data.Options()` object which is the result of merging self with
the input `tf.data.Options`.
"""
return options_lib.merge_options(self, options)
class DatasetSource(DatasetV2):
"""Abstract class representing a dataset with no inputs."""
def _inputs(self):
return []
class UnaryDataset(DatasetV2):
"""Abstract class representing a dataset with one input."""
def __init__(self, input_dataset, variant_tensor):
self._input_dataset = input_dataset
super(UnaryDataset, self).__init__(variant_tensor)
def _inputs(self):
return [self._input_dataset]
class UnaryUnchangedStructureDataset(UnaryDataset):
"""Represents a unary dataset with the same input and output structure."""
def __init__(self, input_dataset, variant_tensor):
self._input_dataset = input_dataset
super(UnaryUnchangedStructureDataset, self).__init__(
input_dataset, variant_tensor)
@property
def element_spec(self):
return self._input_dataset.element_spec
class TensorDataset(DatasetSource):
"""A `Dataset` with a single element."""
def __init__(self, element):
"""See `Dataset.from_tensors()` for details."""
element = structure.normalize_element(element)
self._structure = structure.type_spec_from_value(element)
self._tensors = structure.to_tensor_list(self._structure, element)
variant_tensor = gen_dataset_ops.tensor_dataset(
self._tensors,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(TensorDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class TensorSliceDataset(DatasetSource):
"""A `Dataset` of slices from a dataset element."""
def __init__(self, element):
"""See `Dataset.from_tensor_slices()` for details."""
element = structure.normalize_element(element)
batched_spec = structure.type_spec_from_value(element)
self._tensors = structure.to_batched_tensor_list(batched_spec, element)
self._structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), batched_spec) # pylint: disable=protected-access
batch_dim = tensor_shape.Dimension(tensor_shape.dimension_value(
self._tensors[0].get_shape()[0]))
for t in self._tensors[1:]:
batch_dim.assert_is_compatible_with(tensor_shape.Dimension(
tensor_shape.dimension_value(t.get_shape()[0])))
variant_tensor = gen_dataset_ops.tensor_slice_dataset(
self._tensors,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(TensorSliceDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class SparseTensorSliceDataset(DatasetSource):
"""A `Dataset` that splits a rank-N `tf.SparseTensor` into its rows."""
def __init__(self, sparse_tensor):
"""See `Dataset.from_sparse_tensor_slices()` for details."""
if not isinstance(sparse_tensor, sparse_tensor_lib.SparseTensor):
raise TypeError(
"`sparse_tensor` must be a `tf.SparseTensor` object. Was {}.".format(
sparse_tensor))
self._sparse_tensor = sparse_tensor
indices_shape = self._sparse_tensor.indices.get_shape()
shape_shape = self._sparse_tensor.dense_shape.get_shape()
rank = (indices_shape.dims[1] - 1).merge_with(shape_shape.dims[0] - 1)
self._structure = (tensor_spec.TensorSpec([None, rank], dtypes.int64),
tensor_spec.TensorSpec([None],
self._sparse_tensor.dtype),
tensor_spec.TensorSpec([rank], dtypes.int64))
variant_tensor = gen_dataset_ops.sparse_tensor_slice_dataset(
self._sparse_tensor.indices, self._sparse_tensor.values,
self._sparse_tensor.dense_shape)
super(SparseTensorSliceDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._structure
class _VariantDataset(DatasetV2):
"""A Dataset wrapper around a `tf.variant`-typed function argument."""
def __init__(self, dataset_variant, structure):
self._structure = structure
super(_VariantDataset, self).__init__(dataset_variant)
def _inputs(self):
return []
@property
def element_spec(self):
return self._structure
class _NestedVariant(composite_tensor.CompositeTensor):
def __init__(self, variant_tensor, element_spec, dataset_shape):
self._variant_tensor = variant_tensor
self._element_spec = element_spec
self._dataset_shape = dataset_shape
@property
def _type_spec(self):
return DatasetSpec(self._element_spec, self._dataset_shape)
@tf_export("data.experimental.from_variant")
def from_variant(variant, structure):
"""Constructs a dataset from the given variant and structure.
Args:
variant: A scalar `tf.variant` tensor representing a dataset.
structure: A `tf.data.experimental.Structure` object representing the
structure of each element in the dataset.
Returns:
A `tf.data.Dataset` instance.
"""
return _VariantDataset(variant, structure) # pylint: disable=protected-access
@tf_export("data.experimental.to_variant")
def to_variant(dataset):
"""Returns a variant representing the given dataset.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A scalar `tf.variant` tensor representing the given dataset.
"""
return dataset._variant_tensor # pylint: disable=protected-access
@tf_export(
"data.DatasetSpec",
v1=["data.DatasetSpec", "data.experimental.DatasetStructure"])
class DatasetSpec(type_spec.BatchableTypeSpec):
"""Type specification for `tf.data.Dataset`.
See `tf.TypeSpec` for more information about TensorFlow type specifications.
>>> dataset = tf.data.Dataset.range(3)
>>> tf.data.DatasetSpec.from_value(dataset)
DatasetSpec(TensorSpec(shape=(), dtype=tf.int64, name=None), TensorShape([]))
"""
__slots__ = ["_element_spec", "_dataset_shape"]
def __init__(self, element_spec, dataset_shape=()):
self._element_spec = element_spec
self._dataset_shape = tensor_shape.as_shape(dataset_shape)
@property
def value_type(self):
return _VariantDataset
def _serialize(self):
return (self._element_spec, self._dataset_shape)
@property
def _component_specs(self):
return tensor_spec.TensorSpec(self._dataset_shape, dtypes.variant)
def _to_components(self, value):
return value._variant_tensor # pylint: disable=protected-access
def _from_components(self, components):
# pylint: disable=protected-access
if self._dataset_shape.ndims == 0:
return _VariantDataset(components, self._element_spec)
else:
return _NestedVariant(components, self._element_spec, self._dataset_shape)
def _to_tensor_list(self, value):
return [
ops.convert_to_tensor(
tf_nest.map_structure(lambda x: x._variant_tensor, value)) # pylint: disable=protected-access
]
@staticmethod
def from_value(value):
"""Creates a `DatasetSpec` for the given `tf.data.Dataset` value."""
return DatasetSpec(value.element_spec) # pylint: disable=protected-access
def _batch(self, batch_size):
return DatasetSpec(
self._element_spec,
tensor_shape.TensorShape([batch_size]).concatenate(self._dataset_shape))
def _unbatch(self):
if self._dataset_shape.ndims == 0:
raise ValueError("Unbatching a dataset is only supported for rank >= 1")
return DatasetSpec(self._element_spec, self._dataset_shape[1:])
def _to_batched_tensor_list(self, value):
if self._dataset_shape.ndims == 0:
raise ValueError("Unbatching a dataset is only supported for rank >= 1")
return self._to_tensor_list(value)
def _to_legacy_output_types(self):
return self
def _to_legacy_output_shapes(self):
return self
def _to_legacy_output_classes(self):
return self
class StructuredFunctionWrapper(object):
"""A function wrapper that supports structured arguments and return values."""
# pylint: disable=protected-access
def __init__(self,
func,
transformation_name,
dataset=None,
input_classes=None,
input_shapes=None,
input_types=None,
input_structure=None,
add_to_graph=True,
use_legacy_function=False,
defun_kwargs=None):
"""Creates a new `StructuredFunctionWrapper` for the given function.
Args:
func: A function from a nested structure to another nested structure.
transformation_name: Human-readable name of the transformation in which
this function is being instantiated, for error messages.
dataset: (Optional.) A `tf.data.Dataset`. If given, the structure of this
dataset will be assumed as the structure for `func` arguments; otherwise
`input_classes`, `input_shapes`, and `input_types` must be defined.
input_classes: (Optional.) A nested structure of `type`. If given, this
argument defines the Python types for `func` arguments.
input_shapes: (Optional.) A nested structure of `tf.TensorShape`. If
given, this argument defines the shapes and structure for `func`
arguments.
input_types: (Optional.) A nested structure of `tf.DType`. If given, this
argument defines the element types and structure for `func` arguments.
input_structure: (Optional.) A `Structure` object. If given, this argument
defines the element types and structure for `func` arguments.
add_to_graph: (Optional.) If `True`, the function will be added to the
default graph.
use_legacy_function: (Optional.) A boolean that determines whether the
function be created using `tensorflow.python.eager.function.defun`
(default behavior) or `tensorflow.python.framework.function.Defun`
(legacy beheavior).
defun_kwargs: (Optional.) A dictionary mapping string argument names to
values. If supplied, will be passed to `function` as keyword arguments.
Raises:
ValueError: If an invalid combination of `dataset`, `input_classes`,
`input_shapes`, and `input_types` is passed.
"""
if input_structure is None:
if dataset is None:
if input_classes is None or input_shapes is None or input_types is None:
raise ValueError("Either `dataset`, `input_structure` or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = structure.convert_legacy_structure(
input_types, input_shapes, input_classes)
else:
if not (input_classes is None and input_shapes is None and
input_types is None):
raise ValueError("Either `dataset`, `input_structure` or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = dataset.element_spec
else:
if not (dataset is None and input_classes is None and input_shapes is None
and input_types is None):
raise ValueError("Either `dataset`, `input_structure`, or all of "
"`input_classes`, `input_shapes`, and `input_types` "
"must be specified.")
self._input_structure = input_structure
self._func = func
if defun_kwargs is None:
defun_kwargs = {}
readable_transformation_name = transformation_name.replace(
".", "_")[:-2] if len(transformation_name) > 2 else ""
func_name = "_".join(
[readable_transformation_name,
function_utils.get_func_name(func)])
ag_ctx = autograph_ctx.control_status_ctx()
def _warn_if_collections(transformation_name):
"""Prints a warning if the given graph uses common graph collections.
NOTE(mrry): Currently a warning is only generated for resources. Any
variables created will be automatically hoisted out to the outermost scope
using `init_scope()`. Some collections (such as for control-flow contexts)
are benign and should not generate a warning.
Args:
transformation_name: A human-readable name for the transformation.
"""
warnings.warn("Creating resources inside a function passed to %s "
"is not supported. Create each resource outside the "
"function, and capture it inside the function to use it." %
transformation_name, stacklevel=5)
def _wrapper_helper(*args):
"""Wrapper for passing nested structures to and from tf.data functions."""
nested_args = structure.from_compatible_tensor_list(
self._input_structure, args)
if not _should_unpack_args(nested_args):
nested_args = (nested_args,)
ret = autograph.tf_convert(func, ag_ctx)(*nested_args)
# If `func` returns a list of tensors, `nest.flatten()` and
# `ops.convert_to_tensor()` would conspire to attempt to stack
# those tensors into a single tensor, because the customized
# version of `nest.flatten()` does not recurse into lists. Since
# it is more likely that the list arose from returning the
# result of an operation (such as `tf.numpy_function()`) that returns a
# list of not-necessarily-stackable tensors, we treat the
# returned value is a `tuple` instead. A user wishing to pack
# the return value into a single tensor can use an explicit
# `tf.stack()` before returning.
if isinstance(ret, list):
ret = tuple(ret)
try:
self._output_structure = structure.type_spec_from_value(ret)
except (ValueError, TypeError):
six.reraise(
TypeError,
TypeError("Unsupported return value from function passed to "
"%s: %s." % (transformation_name, ret)),
sys.exc_info()[2])
return ret
if use_legacy_function:
func_name = func_name + "_" + str(ops.uid())
@function.Defun(
*structure.get_flat_tensor_types(self._input_structure),
func_name=func_name,
**defun_kwargs)
def wrapper_fn(*args):
ret = _wrapper_helper(*args)
# _warn_if_collections(transformation_name, ops.get_default_graph(), 0)
return structure.to_tensor_list(self._output_structure, ret)
self._function = wrapper_fn
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
else:
# Use the private method that will execute `wrapper_fn` but delay
# adding it to the graph in case (e.g.) we need to rerun the function.
self._function._create_definition_if_needed()
if resource_tracker.resources:
_warn_if_collections(transformation_name)
else:
defun_kwargs.update({"func_name": func_name})
# Note: _wrapper_helper will apply autograph based on context.
@eager_function.defun_with_attributes(
input_signature=structure.get_flat_tensor_specs(
self._input_structure),
autograph=False,
attributes=defun_kwargs)
def wrapper_fn(*args): # pylint: disable=missing-docstring
ret = _wrapper_helper(*args)
ret = structure.to_tensor_list(self._output_structure, ret)
return [ops.convert_to_tensor(t) for t in ret]
resource_tracker = tracking.ResourceTracker()
with tracking.resource_tracker_scope(resource_tracker):
self._function = (
wrapper_fn._get_concrete_function_internal_garbage_collected())
# TODO(jsimsa): Garbage collecting functions containing PyFunc nodes
# triggers use-after-free. Figure out why and stop excluding functions
# with PyFunc nodes from garbage collection.
for node in self._function.function_def.node_def:
if node.op in ("PyFunc", "PyFuncStateless", "EagerPyFunc"):
self._function._garbage_collector.release()
if add_to_graph:
self._function.add_to_graph(ops.get_default_graph())
if resource_tracker.resources:
_warn_if_collections(transformation_name)
outer_graph_seed = ops.get_default_graph().seed
if outer_graph_seed and self._function.graph.seed == outer_graph_seed:
if self._function.graph._seed_used:
warnings.warn(
"Seed %s from outer graph might be getting used by function %s, "
"if the random op has not been provided any seed. Explicitly set "
"the seed in the function if this is not the intended behavior."
%(outer_graph_seed, func_name), stacklevel=4)
# pylint: enable=protected-access
@property
def output_structure(self):
return self._output_structure
@property
def output_classes(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_classes(), # pylint: disable=protected-access
self._output_structure)
@property
def output_shapes(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access
self._output_structure)
@property
def output_types(self):
return nest.map_structure(
lambda component_spec: component_spec._to_legacy_output_types(), # pylint: disable=protected-access
self._output_structure)
@property
def function(self):
return self._function
class _GeneratorDataset(DatasetSource):
"""A `Dataset` that generates elements by invoking a function."""
def __init__(self, init_args, init_func, next_func, finalize_func):
"""Constructs a `_GeneratorDataset`.
Args:
init_args: A nested structure representing the arguments to `init_func`.
init_func: A TensorFlow function that will be called on `init_args` each
time a C++ iterator over this dataset is constructed. Returns a nested
structure representing the "state" of the dataset.
next_func: A TensorFlow function that will be called on the result of
`init_func` to produce each element, and that raises `OutOfRangeError`
to terminate iteration.
finalize_func: A TensorFlow function that will be called on the result of
`init_func` immediately before a C++ iterator over this dataset is
destroyed. The return value is ignored.
"""
self._init_args = init_args
self._init_structure = structure.type_spec_from_value(init_args)
self._init_func = StructuredFunctionWrapper(
init_func,
self._transformation_name(),
input_structure=self._init_structure)
self._next_func = StructuredFunctionWrapper(
next_func,
self._transformation_name(),
input_structure=self._init_func.output_structure)
self._finalize_func = StructuredFunctionWrapper(
finalize_func,
self._transformation_name(),
input_structure=self._init_func.output_structure)
variant_tensor = gen_dataset_ops.generator_dataset(
structure.to_tensor_list(self._init_structure, self._init_args) +
self._init_func.function.captured_inputs,
self._next_func.function.captured_inputs,
self._finalize_func.function.captured_inputs,
init_func=self._init_func.function,
next_func=self._next_func.function,
finalize_func=self._finalize_func.function,
**self._flat_structure)
super(_GeneratorDataset, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._next_func.output_structure
def _transformation_name(self):
return "Dataset.from_generator()"
class ZipDataset(DatasetV2):
"""A `Dataset` that zips its inputs together."""
def __init__(self, datasets):
"""See `Dataset.zip()` for details."""
for ds in nest.flatten(datasets):
if not isinstance(ds, DatasetV2):
if isinstance(ds, list):
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects. Nested structures do not "
"support Python lists; please use a tuple instead.")
else:
message = ("The argument to `Dataset.zip()` must be a nested "
"structure of `Dataset` objects.")
raise TypeError(message)
self._datasets = datasets
self._structure = nest.pack_sequence_as(
self._datasets,
[ds.element_spec for ds in nest.flatten(self._datasets)])
variant_tensor = gen_dataset_ops.zip_dataset(
[ds._variant_tensor for ds in nest.flatten(self._datasets)],
**self._flat_structure)
super(ZipDataset, self).__init__(variant_tensor)
def _inputs(self):
return nest.flatten(self._datasets)
@property
def element_spec(self):
return self._structure
class ConcatenateDataset(DatasetV2):
"""A `Dataset` that concatenates its input with given dataset."""
def __init__(self, input_dataset, dataset_to_concatenate):
"""See `Dataset.concatenate()` for details."""
self._input_dataset = input_dataset
self._dataset_to_concatenate = dataset_to_concatenate
output_types = get_legacy_output_types(input_dataset)
if output_types != get_legacy_output_types(dataset_to_concatenate):
raise TypeError(
"Two datasets to concatenate have different types %s and %s" %
(output_types, get_legacy_output_types(dataset_to_concatenate)))
output_classes = get_legacy_output_classes(input_dataset)
if output_classes != get_legacy_output_classes(dataset_to_concatenate):
raise TypeError(
"Two datasets to concatenate have different classes %s and %s" %
(output_classes, get_legacy_output_classes(dataset_to_concatenate)))
input_shapes = get_legacy_output_shapes(self._input_dataset)
output_shapes = nest.pack_sequence_as(input_shapes, [
ts1.most_specific_compatible_shape(ts2)
for (ts1, ts2) in zip(
nest.flatten(input_shapes),
nest.flatten(get_legacy_output_shapes(
self._dataset_to_concatenate)))
])
self._structure = structure.convert_legacy_structure(
output_types, output_shapes, output_classes)
self._input_datasets = [input_dataset, dataset_to_concatenate]
# pylint: disable=protected-access
variant_tensor = gen_dataset_ops.concatenate_dataset(
input_dataset._variant_tensor, dataset_to_concatenate._variant_tensor,
**self._flat_structure)
# pylint: enable=protected-access
super(ConcatenateDataset, self).__init__(variant_tensor)
def _inputs(self):
return self._input_datasets
@property
def element_spec(self):
return self._structure
class RepeatDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that repeats its input several times."""
def __init__(self, input_dataset, count):
"""See `Dataset.repeat()` for details."""
self._input_dataset = input_dataset
if count is None:
self._count = constant_op.constant(-1, dtype=dtypes.int64, name="count")
else:
self._count = ops.convert_to_tensor(
count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.repeat_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(RepeatDataset, self).__init__(input_dataset, variant_tensor)
class RangeDataset(DatasetSource):
"""A `Dataset` of a step separated range of values."""
def __init__(self, *args):
"""See `Dataset.range()` for details."""
self._parse_args(*args)
self._structure = tensor_spec.TensorSpec([], dtypes.int64)
variant_tensor = gen_dataset_ops.range_dataset(
start=self._start,
stop=self._stop,
step=self._step,
**self._flat_structure)
super(RangeDataset, self).__init__(variant_tensor)
def _parse_args(self, *args):
"""Parse arguments according to the same rules as the `range()` builtin."""
if len(args) == 1:
self._start = self._build_tensor(0, "start")
self._stop = self._build_tensor(args[0], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 2:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(1, "step")
elif len(args) == 3:
self._start = self._build_tensor(args[0], "start")
self._stop = self._build_tensor(args[1], "stop")
self._step = self._build_tensor(args[2], "step")
else:
raise ValueError("Invalid arguments to RangeDataset: %s" % str(args))
def _build_tensor(self, int64_value, name):
return ops.convert_to_tensor(int64_value, dtype=dtypes.int64, name=name)
@property
def element_spec(self):
return self._structure
class _MemoryCacheDeleter(object):
"""An object which cleans up an anonymous memory cache resource.
An alternative to defining a __del__ method on an object. Even if the parent
object is part of a reference cycle, the cycle will be collectable.
"""
def __init__(self, handle, device, deleter):
self._deleter = deleter
self._handle = handle
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_memory_cache(
handle=self._handle, deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_memory_cache(
handle=self._handle, deleter=self._deleter)
class _MemoryCache(object):
"""Represents a memory cache resource."""
def __init__(self):
super(_MemoryCache, self).__init__()
self._device = context.context().device_name
self._handle, self._deleter = (gen_dataset_ops.anonymous_memory_cache())
self._resource_deleter = _MemoryCacheDeleter(
handle=self._handle, device=self._device, deleter=self._deleter)
@property
def handle(self):
return self._handle
class CacheDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that caches elements of its input."""
def __init__(self, input_dataset, filename):
"""See `Dataset.cache()` for details."""
self._input_dataset = input_dataset
self._filename = ops.convert_to_tensor(
filename, dtype=dtypes.string, name="filename")
if tf2.enabled() and (context.executing_eagerly() or
ops.get_default_graph()._building_function): # pylint: disable=protected-access
self._cache = _MemoryCache()
variant_tensor = gen_dataset_ops.cache_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
filename=self._filename,
cache=self._cache.handle,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.cache_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
filename=self._filename,
**self._flat_structure)
super(CacheDataset, self).__init__(input_dataset, variant_tensor)
class _RandomSeedGeneratorDeleter(object):
"""An object which cleans up an anonymous random seed generator resource.
An alternative to defining a __del__ method on an object. Even if the parent
object is part of a reference cycle, the cycle will be collectable.
"""
def __init__(self, handle, device, deleter):
self._deleter = deleter
self._handle = handle
self._device = device
self._eager_mode = context.executing_eagerly()
def __del__(self):
with ops.device(self._device):
# Make sure the resource is deleted in the same mode as it was created in.
if self._eager_mode:
with context.eager_mode():
gen_dataset_ops.delete_random_seed_generator(
handle=self._handle, deleter=self._deleter)
else:
with context.graph_mode():
gen_dataset_ops.delete_random_seed_generator(
handle=self._handle, deleter=self._deleter)
class _RandomSeedGenerator(object):
"""Represents a random seed generator resource."""
def __init__(self, seed, seed2):
super(_RandomSeedGenerator, self).__init__()
self._device = context.context().device_name
self._handle, self._deleter = (
gen_dataset_ops.anonymous_random_seed_generator(seed=seed, seed2=seed2))
self._resource_deleter = _RandomSeedGeneratorDeleter(
handle=self._handle, device=self._device, deleter=self._deleter)
@property
def handle(self):
return self._handle
class ShuffleDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that randomly shuffles the elements of its input."""
def __init__(self,
input_dataset,
buffer_size,
seed=None,
reshuffle_each_iteration=None):
"""Randomly shuffles the elements of this dataset.
Args:
input_dataset: The input dataset.
buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
elements from this dataset from which the new dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
seed that will be used to create the distribution. See
`tf.compat.v1.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
Returns:
A `Dataset`.
Raises:
ValueError: if invalid arguments are provided.
"""
self._input_dataset = input_dataset
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
self._seed, self._seed2 = random_seed.get_seed(seed)
if reshuffle_each_iteration is None:
self._reshuffle_each_iteration = True
else:
self._reshuffle_each_iteration = reshuffle_each_iteration
if tf2.enabled() and self._reshuffle_each_iteration and (
context.executing_eagerly() or
ops.get_default_graph()._building_function): # pylint: disable=protected-access
self._seed_generator = _RandomSeedGenerator(self._seed, self._seed2)
variant_tensor = gen_dataset_ops.shuffle_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed_generator=self._seed_generator.handle,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.shuffle_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
seed=self._seed,
seed2=self._seed2,
reshuffle_each_iteration=self._reshuffle_each_iteration,
**self._flat_structure)
super(ShuffleDataset, self).__init__(input_dataset, variant_tensor)
class TakeDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` containing the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.take()` for details."""
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.take_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(TakeDataset, self).__init__(input_dataset, variant_tensor)
class SkipDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` skipping the first `count` elements from its input."""
def __init__(self, input_dataset, count):
"""See `Dataset.skip()` for details."""
self._input_dataset = input_dataset
self._count = ops.convert_to_tensor(count, dtype=dtypes.int64, name="count")
variant_tensor = gen_dataset_ops.skip_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
count=self._count,
**self._flat_structure)
super(SkipDataset, self).__init__(input_dataset, variant_tensor)
class ShardDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` for sharding its input."""
def __init__(self, input_dataset, num_shards, index):
"""See `Dataset.shard()` for details."""
self._input_dataset = input_dataset
self._num_shards = ops.convert_to_tensor(
num_shards, dtype=dtypes.int64, name="num_shards")
self._index = ops.convert_to_tensor(index, dtype=dtypes.int64, name="index")
variant_tensor = gen_dataset_ops.shard_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
num_shards=self._num_shards,
index=self._index,
**self._flat_structure)
super(ShardDataset, self).__init__(input_dataset, variant_tensor)
class BatchDataset(UnaryDataset):
"""A `Dataset` that batches contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, drop_remainder):
"""See `Dataset.batch()` for details."""
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
constant_drop_remainder = tensor_util.constant_value(self._drop_remainder)
# pylint: disable=protected-access
if constant_drop_remainder:
# NOTE(mrry): `constant_drop_remainder` may be `None` (unknown statically)
# or `False` (explicitly retaining the remainder).
# pylint: disable=g-long-lambda
self._structure = nest.map_structure(
lambda component_spec: component_spec._batch(
tensor_util.constant_value(self._batch_size)),
input_dataset.element_spec)
else:
self._structure = nest.map_structure(
lambda component_spec: component_spec._batch(None),
input_dataset.element_spec)
variant_tensor = gen_dataset_ops.batch_dataset_v2(
input_dataset._variant_tensor,
batch_size=self._batch_size,
drop_remainder=self._drop_remainder,
**self._flat_structure)
super(BatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _VariantTracker(tracking.CapturableResource):
"""Allows export of functions capturing a Dataset in SavedModels.
When saving a SavedModel, `tf.saved_model.save` traverses the object
graph. Since Datasets reference _VariantTracker objects, that traversal will
find a _VariantTracker for each Dataset and so know how to save and restore
functions which reference the Dataset's variant Tensor.
"""
def __init__(self, variant_tensor, resource_creator):
"""Record that `variant_tensor` is associated with `resource_creator`.
Args:
variant_tensor: The variant-dtype Tensor associated with the Dataset. This
Tensor will be a captured input to functions which use the Dataset, and
is used by saving code to identify the corresponding _VariantTracker.
resource_creator: A zero-argument function which creates a new
variant-dtype Tensor. This function will be included in SavedModels and
run to re-create the Dataset's variant Tensor on restore.
"""
super(_VariantTracker, self).__init__(device="CPU")
self._resource_handle = variant_tensor
self._create_resource = resource_creator
def _is_padded_shape_compatible_with(padded_shape, input_component_shape):
"""Returns `True` if `input_component_shape` can be padded to `padded_shape`.
Args:
padded_shape: A `tf.TensorShape`.
input_component_shape: A `tf.TensorShape`.
Returns:
`True` if `input_component_shape` can be padded to `padded_shape`, otherwise
`False`.
"""
if padded_shape.dims is None or input_component_shape.dims is None:
return True
if len(padded_shape.dims) != len(input_component_shape.dims):
return False
for padded_dim, input_dim in zip(
padded_shape.dims, input_component_shape.dims):
if (padded_dim.value is not None and input_dim.value is not None
and padded_dim.value < input_dim.value):
return False
return True
def _padded_shape_to_tensor(padded_shape, input_component_shape):
"""Converts `padded_shape` to a `tf.Tensor` representing that shape.
Args:
padded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python
sequence, or a 1-D `tf.Tensor` of `tf.int64` elements.
input_component_shape: A `tf.TensorShape`, with which `padded_shape` must
be compatible.
Returns:
A 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`.
Raises:
ValueError: If `padded_shape` is not a shape or not compatible with
`input_component_shape`.
TypeError: If `padded_shape` is not convertible to a `tf.int64` tensor.
"""
try:
# Try to convert the `padded_shape` to a `tf.TensorShape`
padded_shape_as_shape = tensor_shape.as_shape(padded_shape)
# We will return the "canonical" tensor representation, which uses
# `-1` in place of `None`.
ret = ops.convert_to_tensor(
[dim if dim is not None else -1
for dim in padded_shape_as_shape.as_list()], dtype=dtypes.int64)
except (TypeError, ValueError):
# The argument was not trivially convertible to a
# `tf.TensorShape`, so fall back on the conversion to tensor
# machinery.
ret = ops.convert_to_tensor(padded_shape, preferred_dtype=dtypes.int64)
if ret.shape.dims is not None and len(ret.shape.dims) != 1:
six.reraise(ValueError, ValueError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but its "
"shape was %s." % (padded_shape, ret.shape)), sys.exc_info()[2])
if ret.dtype != dtypes.int64:
six.reraise(
TypeError,
TypeError(
"Padded shape %s must be a 1-D tensor of tf.int64 values, but "
"its element type was %s." % (padded_shape, ret.dtype.name)),
sys.exc_info()[2])
padded_shape_as_shape = tensor_util.constant_value_as_shape(ret)
if not _is_padded_shape_compatible_with(padded_shape_as_shape,
input_component_shape):
raise ValueError("The padded shape %s is not compatible with the "
"corresponding input component shape %s."
% (padded_shape_as_shape, input_component_shape))
return ret
def _padding_value_to_tensor(value, output_type):
"""Converts the padding value to a tensor.
Args:
value: The padding value.
output_type: Its expected dtype.
Returns:
A scalar `Tensor`.
Raises:
ValueError: if the padding value is not a scalar.
TypeError: if the padding value's type does not match `output_type`.
"""
value = ops.convert_to_tensor(value, name="padding_value")
if not value.shape.is_compatible_with(tensor_shape.TensorShape([])):
raise ValueError("Padding value should be a scalar, but is not: %s" % value)
if value.dtype != output_type:
raise TypeError("Padding value tensor (%s) does not match output type: %s" %
(value, output_type))
return value
def _default_padding(input_dataset):
"""Returns default padding tensors in a structure matching `input_dataset`."""
def make_zero(t):
if t.base_dtype == dtypes.string:
return ""
elif t.base_dtype == dtypes.variant:
error_msg = ("Unable to create padding for field of type 'variant' "
"because t.base_type == dtypes.variant == "
"{}.".format(
t.base_dtype))
raise TypeError(error_msg)
else:
return np.zeros_like(t.as_numpy_dtype())
return nest.map_structure(
make_zero, get_legacy_output_types(input_dataset))
class PaddedBatchDataset(UnaryDataset):
"""A `Dataset` that batches and pads contiguous elements from its input."""
def __init__(self, input_dataset, batch_size, padded_shapes, padding_values,
drop_remainder):
"""See `Dataset.batch()` for details."""
self._input_dataset = input_dataset
if sparse.any_sparse(get_legacy_output_classes(input_dataset)):
# TODO(b/63669786): support batching of sparse tensors
raise TypeError(
"Batching of padded sparse tensors is not currently supported")
self._input_dataset = input_dataset
self._batch_size = ops.convert_to_tensor(
batch_size, dtype=dtypes.int64, name="batch_size")
padding_values = (
padding_values
if padding_values is not None else _default_padding(input_dataset))
input_shapes = get_legacy_output_shapes(input_dataset)
flat_padded_shapes = nest.flatten_up_to(input_shapes, padded_shapes)
flat_padded_shapes_as_tensors = []
for input_component_shape, padded_shape in zip(
nest.flatten(input_shapes), flat_padded_shapes):
flat_padded_shapes_as_tensors.append(
_padded_shape_to_tensor(padded_shape, input_component_shape))
self._padded_shapes = nest.pack_sequence_as(input_shapes,
flat_padded_shapes_as_tensors)
self._padding_values = nest.map_structure_up_to(
input_shapes, _padding_value_to_tensor, padding_values,
get_legacy_output_types(input_dataset))
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
def _padded_shape_to_batch_shape(s):
return tensor_shape.TensorShape([
tensor_util.constant_value(self._batch_size)
if smart_cond.smart_constant_value(self._drop_remainder) else None
]).concatenate(tensor_util.constant_value_as_shape(s))
output_shapes = nest.map_structure(
_padded_shape_to_batch_shape, self._padded_shapes)
self._structure = structure.convert_legacy_structure(
get_legacy_output_types(self._input_dataset), output_shapes,
get_legacy_output_classes(self._input_dataset))
# pylint: disable=protected-access
# TODO(jsimsa): Switch to using v2 only any time after 6/30/2018.
if smart_cond.smart_constant_value(self._drop_remainder) is False:
variant_tensor = gen_dataset_ops.padded_batch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
output_shapes=structure.get_flat_tensor_shapes(self._structure))
else:
variant_tensor = gen_dataset_ops.padded_batch_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
batch_size=self._batch_size,
padded_shapes=[
ops.convert_to_tensor(s, dtype=dtypes.int64)
for s in nest.flatten(self._padded_shapes)
],
padding_values=nest.flatten(self._padding_values),
drop_remainder=self._drop_remainder,
output_shapes=structure.get_flat_tensor_shapes(self._structure))
super(PaddedBatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
def _should_unpack_args(args):
"""Returns `True` if `args` should be `*args` when passed to a callable."""
return type(args) is tuple # pylint: disable=unidiomatic-typecheck
class MapDataset(UnaryDataset):
"""A `Dataset` that maps a function over elements in its input."""
def __init__(self,
input_dataset,
map_func,
use_inter_op_parallelism=True,
preserve_cardinality=False,
use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._preserve_cardinality = preserve_cardinality
self._map_func = StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
variant_tensor = gen_dataset_ops.map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**self._flat_structure)
super(MapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._map_func.output_structure
def _transformation_name(self):
return "Dataset.map()"
class ParallelMapDataset(UnaryDataset):
"""A `Dataset` that maps a function over elements in its input in parallel."""
def __init__(self,
input_dataset,
map_func,
num_parallel_calls,
use_inter_op_parallelism=True,
preserve_cardinality=False,
use_legacy_function=False):
"""See `Dataset.map()` for details."""
self._input_dataset = input_dataset
self._use_inter_op_parallelism = use_inter_op_parallelism
self._map_func = StructuredFunctionWrapper(
map_func,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int32, name="num_parallel_calls")
self._preserve_cardinality = preserve_cardinality
variant_tensor = gen_dataset_ops.parallel_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
num_parallel_calls=self._num_parallel_calls,
use_inter_op_parallelism=self._use_inter_op_parallelism,
preserve_cardinality=self._preserve_cardinality,
**self._flat_structure)
super(ParallelMapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._map_func.output_structure
def _transformation_name(self):
return "Dataset.map()"
class FlatMapDataset(UnaryDataset):
"""A `Dataset` that maps a function over its input and flattens the result."""
def __init__(self, input_dataset, map_func):
"""See `Dataset.flat_map()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
variant_tensor = gen_dataset_ops.flat_map_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs,
f=self._map_func.function,
**self._flat_structure)
super(FlatMapDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.flat_map()"
class InterleaveDataset(UnaryDataset):
"""A `Dataset` that interleaves the result of transformed inputs."""
def __init__(self, input_dataset, map_func, cycle_length, block_length):
"""See `Dataset.interleave()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
variant_tensor = gen_dataset_ops.interleave_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
f=self._map_func.function,
**self._flat_structure)
super(InterleaveDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.interleave()"
class ParallelInterleaveDataset(UnaryDataset):
"""A `Dataset` that maps a function over its input and interleaves the result."""
def __init__(self, input_dataset, map_func, cycle_length, block_length,
num_parallel_calls):
"""See `Dataset.interleave()` for details."""
self._input_dataset = input_dataset
self._map_func = StructuredFunctionWrapper(
map_func, self._transformation_name(), dataset=input_dataset)
if not isinstance(self._map_func.output_structure, DatasetSpec):
raise TypeError(
"`map_func` must return a `Dataset` object. Got {}".format(
type(self._map_func.output_structure)))
self._structure = self._map_func.output_structure._element_spec # pylint: disable=protected-access
self._cycle_length = ops.convert_to_tensor(
cycle_length, dtype=dtypes.int64, name="cycle_length")
self._block_length = ops.convert_to_tensor(
block_length, dtype=dtypes.int64, name="block_length")
self._num_parallel_calls = ops.convert_to_tensor(
num_parallel_calls, dtype=dtypes.int64, name="num_parallel_calls")
variant_tensor = gen_dataset_ops.parallel_interleave_dataset_v2(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._map_func.function.captured_inputs, # pylint: disable=protected-access
self._cycle_length,
self._block_length,
self._num_parallel_calls,
f=self._map_func.function,
**self._flat_structure)
super(ParallelInterleaveDataset, self).__init__(input_dataset,
variant_tensor)
def _functions(self):
return [self._map_func]
@property
def element_spec(self):
return self._structure
def _transformation_name(self):
return "Dataset.interleave()"
class FilterDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that filters its input according to a predicate function."""
def __init__(self, input_dataset, predicate, use_legacy_function=False):
"""See `Dataset.filter()` for details."""
self._input_dataset = input_dataset
wrapped_func = StructuredFunctionWrapper(
predicate,
self._transformation_name(),
dataset=input_dataset,
use_legacy_function=use_legacy_function)
if not wrapped_func.output_structure.is_compatible_with(
tensor_spec.TensorSpec([], dtypes.bool)):
error_msg = ("`predicate` return type must be convertible to a scalar "
"boolean tensor. Was {}.").format(
wrapped_func.output_structure)
raise ValueError(error_msg)
self._predicate = wrapped_func
variant_tensor = gen_dataset_ops.filter_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
other_arguments=self._predicate.function.captured_inputs,
predicate=self._predicate.function,
**self._flat_structure)
super(FilterDataset, self).__init__(input_dataset, variant_tensor)
def _functions(self):
return [self._predicate]
def _transformation_name(self):
return "Dataset.filter()"
class PrefetchDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that asynchronously prefetches its input."""
def __init__(self, input_dataset, buffer_size, slack_period=None):
"""See `Dataset.prefetch()` for details.
Args:
input_dataset: The input dataset.
buffer_size: See `Dataset.prefetch()` for details.
slack_period: (Optional.) An integer. If non-zero, determines the number
of GetNext calls before injecting slack into the execution. This may
reduce CPU contention at the start of a step. Note that a tensorflow
user should not have to set this manually; enable this behavior
automatically via `tf.data.Options.experimental_slack` instead. Defaults
to None.
"""
self._input_dataset = input_dataset
if buffer_size is None:
buffer_size = -1 # This is the sentinel for auto-tuning.
self._buffer_size = ops.convert_to_tensor(
buffer_size, dtype=dtypes.int64, name="buffer_size")
variant_tensor = gen_dataset_ops.prefetch_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
buffer_size=self._buffer_size,
slack_period=slack_period,
**self._flat_structure)
super(PrefetchDataset, self).__init__(input_dataset, variant_tensor)
class WindowDataset(UnaryDataset):
"""A dataset that creates window datasets from the input elements."""
def __init__(self, input_dataset, size, shift, stride, drop_remainder):
"""See `window_dataset()` for more details."""
self._input_dataset = input_dataset
self._size = ops.convert_to_tensor(size, dtype=dtypes.int64, name="size")
self._shift = ops.convert_to_tensor(shift, dtype=dtypes.int64, name="shift")
self._stride = ops.convert_to_tensor(
stride, dtype=dtypes.int64, name="stride")
self._drop_remainder = ops.convert_to_tensor(
drop_remainder, dtype=dtypes.bool, name="drop_remainder")
self._structure = nest.pack_sequence_as(
get_legacy_output_classes(input_dataset), [
DatasetSpec( # pylint: disable=g-complex-comprehension
structure.convert_legacy_structure(
output_type, output_shape, output_class))
for output_class, output_shape, output_type in zip(
nest.flatten(get_legacy_output_classes(input_dataset)),
nest.flatten(get_legacy_output_shapes(input_dataset)),
nest.flatten(get_legacy_output_types(input_dataset)))
])
variant_tensor = gen_dataset_ops.window_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._size,
self._shift,
self._stride,
self._drop_remainder,
**self._flat_structure)
super(WindowDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _OptionsDataset(UnaryUnchangedStructureDataset):
"""An identity `Dataset` that stores options."""
def __init__(self, input_dataset, options):
self._input_dataset = input_dataset
self._options = input_dataset.options()
if self._options:
self._options = self._options.merge(options)
else:
self._options = options
variant_tensor = input_dataset._variant_tensor # pylint: disable=protected-access
super(_OptionsDataset, self).__init__(input_dataset, variant_tensor)
def options(self):
return self._options
class _ModelDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and models performance."""
def __init__(self, input_dataset, algorithm, cpu_budget):
self._input_dataset = input_dataset
# TODO(jsimsa): This check is introduced for forward compatibility and can
# be removed after 7/24/2019. At that point, all servers are expected to
# recognize the `algorithm` attribute.
if algorithm != AutotuneAlgorithm.HILL_CLIMB:
variant_tensor = gen_dataset_ops.model_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
algorithm=algorithm,
cpu_budget=cpu_budget,
**self._flat_structure)
else:
variant_tensor = gen_dataset_ops.model_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
cpu_budget=cpu_budget,
**self._flat_structure)
super(_ModelDataset, self).__init__(input_dataset, variant_tensor)
class _OptimizeDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and applies optimizations."""
def __init__(self, input_dataset, optimizations, optimization_configs=None):
self._input_dataset = input_dataset
if optimizations is None:
optimizations = []
if optimization_configs is None:
optimization_configs = []
self._optimizations = ops.convert_to_tensor(
optimizations, dtype=dtypes.string, name="optimizations")
variant_tensor = gen_dataset_ops.optimize_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._optimizations,
optimization_configs=optimization_configs,
**self._flat_structure)
super(_OptimizeDataset, self).__init__(input_dataset, variant_tensor)
class _SetStatsAggregatorDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, and sets a stats aggregator."""
def __init__(self, input_dataset, aggregator, prefix, counter_prefix):
self._input_dataset = input_dataset
self._stats_aggregator = aggregator
self._prefix = prefix
self._counter_prefix = counter_prefix
variant_tensor = ged_ops.set_stats_aggregator_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._stats_aggregator._resource, # pylint: disable=protected-access
self._prefix,
self._counter_prefix,
**self._flat_structure)
super(_SetStatsAggregatorDataset, self).__init__(input_dataset,
variant_tensor)
class _MaxIntraOpParallelismDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, overriding intra-op parallelism."""
def __init__(self, input_dataset, max_intra_op_parallelism):
self._input_dataset = input_dataset
self._max_intra_op_parallelism = ops.convert_to_tensor(
max_intra_op_parallelism,
dtype=dtypes.int64,
name="max_intra_op_parallelism")
variant_tensor = ged_ops.max_intra_op_parallelism_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._max_intra_op_parallelism,
**self._flat_structure)
super(_MaxIntraOpParallelismDataset, self).__init__(input_dataset,
variant_tensor)
class _PrivateThreadPoolDataset(UnaryUnchangedStructureDataset):
"""A `Dataset` that acts as an identity, setting a private threadpool."""
def __init__(self, input_dataset, num_threads):
self._input_dataset = input_dataset
self._num_threads = ops.convert_to_tensor(
num_threads, dtype=dtypes.int64, name="num_threads")
variant_tensor = ged_ops.private_thread_pool_dataset(
input_dataset._variant_tensor, # pylint: disable=protected-access
self._num_threads,
**self._flat_structure)
super(_PrivateThreadPoolDataset, self).__init__(input_dataset,
variant_tensor)
class _RestructuredDataset(UnaryDataset):
"""An internal helper for changing the structure and shape of a dataset."""
def __init__(self, dataset, structure):
self._input_dataset = dataset
self._structure = structure
variant_tensor = self._input_dataset._variant_tensor # pylint: disable=protected-access
super(_RestructuredDataset, self).__init__(dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
class _UnbatchDataset(UnaryDataset):
"""A dataset that splits the elements of its input into multiple elements."""
def __init__(self, input_dataset):
"""See `unbatch()` for more details."""
flat_shapes = input_dataset._flat_shapes # pylint: disable=protected-access
if any(s.ndims == 0 for s in flat_shapes):
raise ValueError("Cannot unbatch an input with scalar components.")
known_batch_dim = tensor_shape.Dimension(None)
for s in flat_shapes:
try:
known_batch_dim = known_batch_dim.merge_with(s[0])
except ValueError:
raise ValueError("Cannot unbatch an input whose components have "
"different batch sizes.")
self._input_dataset = input_dataset
self._structure = nest.map_structure(
lambda component_spec: component_spec._unbatch(), # pylint: disable=protected-access
get_structure(input_dataset))
variant_tensor = ged_ops.unbatch_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**self._flat_structure)
super(_UnbatchDataset, self).__init__(input_dataset, variant_tensor)
@property
def element_spec(self):
return self._structure
| 39.525758
| 116
| 0.691845
|
7cdfae988ee44edc9d0ad77ebf895365e8efc116
| 184
|
py
|
Python
|
Semana_6/errores.py
|
eovallea3786/ProgramacionEAN20191
|
0f48a842195de29a3141d5012085e49d4b13a7d4
|
[
"MIT"
] | null | null | null |
Semana_6/errores.py
|
eovallea3786/ProgramacionEAN20191
|
0f48a842195de29a3141d5012085e49d4b13a7d4
|
[
"MIT"
] | null | null | null |
Semana_6/errores.py
|
eovallea3786/ProgramacionEAN20191
|
0f48a842195de29a3141d5012085e49d4b13a7d4
|
[
"MIT"
] | null | null | null |
numero = input('ingresa tu numero ')
try:
print('tu numero {0} al cuadrado es {1}'.format(numero, float(numero) ** 2))
except:
print("'{0}' no es un numero".format(numero))
| 20.444444
| 80
| 0.630435
|
b7be2e550e48ad551e8767466fd293832a83c9d3
| 8,569
|
py
|
Python
|
config/settings/common.py
|
AlexPylaev/mrpulse
|
4a4cdff3ed3d32f81f6c60264663b24b02738743
|
[
"MIT"
] | null | null | null |
config/settings/common.py
|
AlexPylaev/mrpulse
|
4a4cdff3ed3d32f81f6c60264663b24b02738743
|
[
"MIT"
] | null | null | null |
config/settings/common.py
|
AlexPylaev/mrpulse
|
4a4cdff3ed3d32f81f6c60264663b24b02738743
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Django settings for mrpulse project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (mrpulse/config/settings/common.py - 3 = mrpulse/)
APPS_DIR = ROOT_DIR.path('mrpulse')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
# custom users app
'mrpulse.users.apps.UsersConfig',
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'mrpulse.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Alex Pylaev""", 'aip@yandex.ru'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'id1795_mrpulsedb',
'USER': 'id1795_mrpulsedbu',
'PASSWORD': 'tramparam2015',
'HOST': 'localhost',
'PORT': '',
}
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Moscow'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'ru-ru'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'mrpulse.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'mrpulse.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| 35.556017
| 98
| 0.612907
|
58fa010d7047602090531482ad2e85e9b8d397e4
| 9,469
|
py
|
Python
|
optuna_core/pruners/_successive_halving.py
|
hvy/optuna-core
|
be9df49424aa4022cfcec7d9423768cc39c73ae6
|
[
"MIT"
] | 1
|
2020-10-09T02:35:25.000Z
|
2020-10-09T02:35:25.000Z
|
optuna_core/pruners/_successive_halving.py
|
hvy/optuna-core
|
be9df49424aa4022cfcec7d9423768cc39c73ae6
|
[
"MIT"
] | null | null | null |
optuna_core/pruners/_successive_halving.py
|
hvy/optuna-core
|
be9df49424aa4022cfcec7d9423768cc39c73ae6
|
[
"MIT"
] | null | null | null |
import math
from typing import List
from typing import Optional
from typing import Union
import optuna_core
from optuna_core.pruners._base import BasePruner
from optuna_core.trial._state import TrialState
class SuccessiveHalvingPruner(BasePruner):
"""Pruner using Asynchronous Successive Halving Algorithm.
`Successive Halving <https://arxiv.org/abs/1502.07943>`_ is a bandit-based algorithm to
identify the best one among multiple configurations. This class implements an asynchronous
version of Successive Halving. Please refer to the paper of
`Asynchronous Successive Halving <http://arxiv.org/abs/1810.05934>`_ for detailed descriptions.
Note that, this class does not take care of the parameter for the maximum
resource, referred to as :math:`R` in the paper. The maximum resource allocated to a trial is
typically limited inside the objective function (e.g., ``step`` number in `simple.py
<https://github.com/optuna/optuna/tree/c5777b3e/examples/pruning/simple.py#L31>`_,
``EPOCH`` number in `chainer_integration.py
<https://github.com/optuna/optuna/tree/c5777b3e/examples/pruning/chainer_integration.py#L65>`_).
.. seealso::
Please refer to :meth:`~optuna.trial.Trial.report`.
Example:
We minimize an objective function with ``SuccessiveHalvingPruner``.
.. testcode::
import numpy as np
from sklearn.datasets import load_iris
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
import optuna
X, y = load_iris(return_X_y=True)
X_train, X_valid, y_train, y_valid = train_test_split(X, y)
classes = np.unique(y)
def objective(trial):
alpha = trial.suggest_uniform("alpha", 0.0, 1.0)
clf = SGDClassifier(alpha=alpha)
n_train_iter = 100
for step in range(n_train_iter):
clf.partial_fit(X_train, y_train, classes=classes)
intermediate_value = clf.score(X_valid, y_valid)
trial.report(intermediate_value, step)
if trial.should_prune():
raise optuna.TrialPruned()
return clf.score(X_valid, y_valid)
study = optuna.create_study(
direction="maximize", pruner=optuna.pruners.SuccessiveHalvingPruner()
)
study.optimize(objective, n_trials=20)
Args:
min_resource:
A parameter for specifying the minimum resource allocated to a trial
(in the `paper <http://arxiv.org/abs/1810.05934>`_ this parameter is
referred to as :math:`r`).
This parameter defaults to 'auto' where the value is determined based on a heuristic
that looks at the number of required steps for the first trial to complete.
A trial is never pruned until it executes
:math:`\\mathsf{min}\\_\\mathsf{resource} \\times
\\mathsf{reduction}\\_\\mathsf{factor}^{
\\mathsf{min}\\_\\mathsf{early}\\_\\mathsf{stopping}\\_\\mathsf{rate}}`
steps (i.e., the completion point of the first rung). When the trial completes
the first rung, it will be promoted to the next rung only
if the value of the trial is placed in the top
:math:`{1 \\over \\mathsf{reduction}\\_\\mathsf{factor}}` fraction of
the all trials that already have reached the point (otherwise it will be pruned there).
If the trial won the competition, it runs until the next completion point (i.e.,
:math:`\\mathsf{min}\\_\\mathsf{resource} \\times
\\mathsf{reduction}\\_\\mathsf{factor}^{
(\\mathsf{min}\\_\\mathsf{early}\\_\\mathsf{stopping}\\_\\mathsf{rate}
+ \\mathsf{rung})}` steps)
and repeats the same procedure.
.. note::
If the step of the last intermediate value may change with each trial, please
manually specify the minimum possible step to ``min_resource``.
reduction_factor:
A parameter for specifying reduction factor of promotable trials
(in the `paper <http://arxiv.org/abs/1810.05934>`_ this parameter is
referred to as :math:`\\eta`). At the completion point of each rung,
about :math:`{1 \\over \\mathsf{reduction}\\_\\mathsf{factor}}`
trials will be promoted.
min_early_stopping_rate:
A parameter for specifying the minimum early-stopping rate
(in the `paper <http://arxiv.org/abs/1810.05934>`_ this parameter is
referred to as :math:`s`).
"""
def __init__(
self,
min_resource: Union[str, int] = "auto",
reduction_factor: int = 4,
min_early_stopping_rate: int = 0,
) -> None:
if isinstance(min_resource, str) and min_resource != "auto":
raise ValueError(
"The value of `min_resource` is {}, "
"but must be either `min_resource` >= 1 or 'auto'".format(min_resource)
)
if isinstance(min_resource, int) and min_resource < 1:
raise ValueError(
"The value of `min_resource` is {}, "
"but must be either `min_resource >= 1` or 'auto'".format(min_resource)
)
if reduction_factor < 2:
raise ValueError(
"The value of `reduction_factor` is {}, "
"but must be `reduction_factor >= 2`".format(reduction_factor)
)
if min_early_stopping_rate < 0:
raise ValueError(
"The value of `min_early_stopping_rate` is {}, "
"but must be `min_early_stopping_rate >= 0`".format(min_early_stopping_rate)
)
self._min_resource = None # type: Optional[int]
if isinstance(min_resource, int):
self._min_resource = min_resource
self._reduction_factor = reduction_factor
self._min_early_stopping_rate = min_early_stopping_rate
def prune(
self, study: "optuna_core.study.Study", trial: "optuna_core.trial.FrozenTrial"
) -> bool:
step = trial.last_step
if step is None:
return False
rung = _get_current_rung(trial)
value = trial.intermediate_values[step]
trials = None # type: Optional[List["optuna_core.trial.FrozenTrial"]]
while True:
if self._min_resource is None:
if trials is None:
trials = study.get_trials(deepcopy=False)
self._min_resource = _estimate_min_resource(trials)
if self._min_resource is None:
return False
assert self._min_resource is not None
rung_promotion_step = self._min_resource * (
self._reduction_factor ** (self._min_early_stopping_rate + rung)
)
if step < rung_promotion_step:
return False
if math.isnan(value):
return True
if trials is None:
trials = study.get_trials(deepcopy=False)
rung_key = _completed_rung_key(rung)
study._storage.set_trial_system_attr(trial._trial_id, rung_key, value)
if not _is_trial_promotable_to_next_rung(
value,
_get_competing_values(trials, value, rung_key),
self._reduction_factor,
study.direction,
):
return True
rung += 1
def _estimate_min_resource(trials: List["optuna_core.trial.FrozenTrial"]) -> Optional[int]:
n_steps = [
t.last_step for t in trials if t.state == TrialState.COMPLETE and t.last_step is not None
]
if not n_steps:
return None
# Get the maximum number of steps and divide it by 100.
last_step = max(n_steps)
return max(last_step // 100, 1)
def _get_current_rung(trial: "optuna_core.trial.FrozenTrial") -> int:
# The following loop takes `O(log step)` iterations.
rung = 0
while _completed_rung_key(rung) in trial.system_attrs:
rung += 1
return rung
def _completed_rung_key(rung: int) -> str:
return "completed_rung_{}".format(rung)
def _get_competing_values(
trials: List["optuna_core.trial.FrozenTrial"], value: float, rung_key: str
) -> List[float]:
competing_values = [t.system_attrs[rung_key] for t in trials if rung_key in t.system_attrs]
competing_values.append(value)
return competing_values
def _is_trial_promotable_to_next_rung(
value: float,
competing_values: List[float],
reduction_factor: int,
study_direction: "optuna_core.study.StudyDirection",
) -> bool:
promotable_idx = (len(competing_values) // reduction_factor) - 1
if promotable_idx == -1:
# Optuna does not support suspending or resuming ongoing trials. Therefore, for the first
# `eta - 1` trials, this implementation instead promotes the trial if its value is the
# smallest one among the competing values.
promotable_idx = 0
competing_values.sort()
if study_direction == optuna_core.study.StudyDirection.MAXIMIZE:
return value >= competing_values[-(promotable_idx + 1)]
return value <= competing_values[promotable_idx]
| 38.028112
| 100
| 0.626782
|
7c6d81544d5af45d2ff38fd37128a30200bb6c17
| 9,336
|
py
|
Python
|
ch18/genetic.py
|
skypather/GeneticAlgorithmsWithPython
|
d6d99f59e6e2b77380cadf0a1158d2442298fe89
|
[
"Apache-2.0"
] | 2
|
2018-04-08T15:05:42.000Z
|
2018-04-08T15:06:30.000Z
|
ch18/genetic.py
|
skypather/GeneticAlgorithmsWithPython
|
d6d99f59e6e2b77380cadf0a1158d2442298fe89
|
[
"Apache-2.0"
] | null | null | null |
ch18/genetic.py
|
skypather/GeneticAlgorithmsWithPython
|
d6d99f59e6e2b77380cadf0a1158d2442298fe89
|
[
"Apache-2.0"
] | 1
|
2019-10-09T01:28:38.000Z
|
2019-10-09T01:28:38.000Z
|
# File: genetic.py
# from chapter 18 of _Genetic Algorithms with Python_
#
# Author: Clinton Sheppard <fluentcoder@gmail.com>
# Copyright (c) 2016 Clinton Sheppard
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import random
import statistics
import sys
import time
from bisect import bisect_left
from enum import Enum
from enum import IntEnum
from math import exp
def _generate_parent(length, geneSet, get_fitness):
genes = []
while len(genes) < length:
sampleSize = min(length - len(genes), len(geneSet))
genes.extend(random.sample(geneSet, sampleSize))
fitness = get_fitness(genes)
return Chromosome(genes, fitness, Strategies.Create)
def _mutate(parent, geneSet, get_fitness):
childGenes = parent.Genes[:]
index = random.randrange(0, len(parent.Genes))
newGene, alternate = random.sample(geneSet, 2)
childGenes[index] = alternate if newGene == childGenes[index] else newGene
fitness = get_fitness(childGenes)
return Chromosome(childGenes, fitness, Strategies.Mutate)
def _mutate_custom(parent, custom_mutate, get_fitness):
childGenes = parent.Genes[:]
custom_mutate(childGenes)
fitness = get_fitness(childGenes)
return Chromosome(childGenes, fitness, Strategies.Mutate)
def _crossover(parentGenes, index, parents, get_fitness, crossover, mutate,
generate_parent):
donorIndex = random.randrange(0, len(parents))
if donorIndex == index:
donorIndex = (donorIndex + 1) % len(parents)
childGenes = crossover(parentGenes, parents[donorIndex].Genes)
if childGenes is None:
# parent and donor are indistinguishable
parents[donorIndex] = generate_parent()
return mutate(parents[index])
fitness = get_fitness(childGenes)
return Chromosome(childGenes, fitness, Strategies.Crossover)
def get_best(get_fitness, targetLen, optimalFitness, geneSet, display,
custom_mutate=None, custom_create=None, maxAge=None,
poolSize=1, crossover=None, maxSeconds=None):
if custom_mutate is None:
def fnMutate(parent):
return _mutate(parent, geneSet, get_fitness)
else:
def fnMutate(parent):
return _mutate_custom(parent, custom_mutate, get_fitness)
if custom_create is None:
def fnGenerateParent():
return _generate_parent(targetLen, geneSet, get_fitness)
else:
def fnGenerateParent():
genes = custom_create()
return Chromosome(genes, get_fitness(genes), Strategies.Create)
strategyLookup = {
Strategies.Create: lambda p, i, o: fnGenerateParent(),
Strategies.Mutate: lambda p, i, o: fnMutate(p),
Strategies.Crossover: lambda p, i, o:
_crossover(p.Genes, i, o, get_fitness, crossover, fnMutate,
fnGenerateParent)
}
usedStrategies = [strategyLookup[Strategies.Mutate]]
if crossover is not None:
usedStrategies.append(strategyLookup[Strategies.Crossover])
def fnNewChild(parent, index, parents):
return random.choice(usedStrategies)(parent, index, parents)
else:
def fnNewChild(parent, index, parents):
return fnMutate(parent)
for timedOut, improvement in _get_improvement(fnNewChild,
fnGenerateParent, maxAge,
poolSize, maxSeconds):
if timedOut:
return improvement
display(improvement)
f = strategyLookup[improvement.Strategy]
usedStrategies.append(f)
if not optimalFitness > improvement.Fitness:
return improvement
def _get_improvement(new_child, generate_parent, maxAge, poolSize,
maxSeconds):
startTime = time.time()
bestParent = generate_parent()
yield maxSeconds is not None and time.time() - \
startTime > maxSeconds, bestParent
parents = [bestParent]
historicalFitnesses = [bestParent.Fitness]
for _ in range(poolSize - 1):
parent = generate_parent()
if maxSeconds is not None and time.time() - startTime > maxSeconds:
yield True, parent
if parent.Fitness > bestParent.Fitness:
yield False, parent
bestParent = parent
historicalFitnesses.append(parent.Fitness)
parents.append(parent)
lastParentIndex = poolSize - 1
pindex = 1
while True:
if maxSeconds is not None and time.time() - startTime > maxSeconds:
yield True, bestParent
pindex = pindex - 1 if pindex > 0 else lastParentIndex
parent = parents[pindex]
child = new_child(parent, pindex, parents)
if parent.Fitness > child.Fitness:
if maxAge is None:
continue
parent.Age += 1
if maxAge > parent.Age:
continue
index = bisect_left(historicalFitnesses, child.Fitness, 0,
len(historicalFitnesses))
difference = len(historicalFitnesses) - index
proportionSimilar = difference / len(historicalFitnesses)
if random.random() < exp(-proportionSimilar):
parents[pindex] = child
continue
bestParent.Age = 0
parents[pindex] = bestParent
continue
if not child.Fitness > parent.Fitness:
# same fitness
child.Age = parent.Age + 1
parents[pindex] = child
continue
child.Age = 0
parents[pindex] = child
if child.Fitness > bestParent.Fitness:
bestParent = child
yield False, bestParent
historicalFitnesses.append(bestParent.Fitness)
def hill_climbing(optimizationFunction, is_improvement, is_optimal,
get_next_feature_value, display, initialFeatureValue):
best = optimizationFunction(initialFeatureValue)
stdout = sys.stdout
sys.stdout = None
while not is_optimal(best):
featureValue = get_next_feature_value(best)
child = optimizationFunction(featureValue)
if is_improvement(best, child):
best = child
sys.stdout = stdout
display(best, featureValue)
sys.stdout = None
sys.stdout = stdout
return best
def tournament(generate_parent, crossover, compete, display, sort_key,
numParents=10, max_generations=100):
pool = [[generate_parent(), [0, 0, 0]] for _ in
range(1 + numParents * numParents)]
best, bestScore = pool[0]
def getSortKey(x):
return sort_key(x[0], x[1][CompetitionResult.Win],
x[1][CompetitionResult.Tie],
x[1][CompetitionResult.Loss])
generation = 0
while generation < max_generations:
generation += 1
for i in range(0, len(pool)):
for j in range(0, len(pool)):
if i == j:
continue
playera, scorea = pool[i]
playerb, scoreb = pool[j]
result = compete(playera, playerb)
scorea[result] += 1
scoreb[2 - result] += 1
pool.sort(key=getSortKey, reverse=True)
if getSortKey(pool[0]) > getSortKey([best, bestScore]):
best, bestScore = pool[0]
display(best, bestScore[CompetitionResult.Win],
bestScore[CompetitionResult.Tie],
bestScore[CompetitionResult.Loss], generation)
parents = [pool[i][0] for i in range(numParents)]
pool = [[crossover(parents[i], parents[j]), [0, 0, 0]]
for i in range(len(parents))
for j in range(len(parents))
if i != j]
pool.extend([parent, [0, 0, 0]] for parent in parents)
pool.append([generate_parent(), [0, 0, 0]])
return best
class CompetitionResult(IntEnum):
Loss = 0,
Tie = 1,
Win = 2,
class Chromosome:
def __init__(self, genes, fitness, strategy):
self.Genes = genes
self.Fitness = fitness
self.Strategy = strategy
self.Age = 0
class Strategies(Enum):
Create = 0,
Mutate = 1,
Crossover = 2
class Benchmark:
@staticmethod
def run(function):
timings = []
stdout = sys.stdout
for i in range(100):
sys.stdout = None
startTime = time.time()
function()
seconds = time.time() - startTime
sys.stdout = stdout
timings.append(seconds)
mean = statistics.mean(timings)
if i < 10 or i % 10 == 9:
print("{} {:3.2f} {:3.2f}".format(
1 + i, mean,
statistics.stdev(timings, mean) if i > 1 else 0))
| 35.363636
| 78
| 0.617395
|
f242445b4b38e37d7443b6e6ed987fb3f97cff46
| 294
|
py
|
Python
|
Flask-System/create.py
|
mkousathanas/myrepo
|
63868918a83e1d0fc4979860d1cb7d8d3f3d9725
|
[
"MIT"
] | null | null | null |
Flask-System/create.py
|
mkousathanas/myrepo
|
63868918a83e1d0fc4979860d1cb7d8d3f3d9725
|
[
"MIT"
] | null | null | null |
Flask-System/create.py
|
mkousathanas/myrepo
|
63868918a83e1d0fc4979860d1cb7d8d3f3d9725
|
[
"MIT"
] | null | null | null |
import sqlite3
"""Για την έναρξη της σύνδεσης με τη βάση δεδομένων"""
conn = sqlite3.connect('database.db')
print "Πετυχημένη σύνδεση με τη βάση δεδομένων";
conn.execute('CREATE TABLE users (name TEXT, addr TEXT, city TEXT, pin TEXT)')
print "Ο πίνακας δημιουργήθηκε με επιτυχία";
conn.close()
| 36.75
| 78
| 0.758503
|
ff3d8650e1952d8dd8920497e0c16ef34a47af41
| 5,117
|
py
|
Python
|
tests/algorithms/test_distributions.py
|
festusdrakon/PyDP
|
ae02ef587b8716f038b2a3c0c454682f6cc772b2
|
[
"Apache-2.0"
] | null | null | null |
tests/algorithms/test_distributions.py
|
festusdrakon/PyDP
|
ae02ef587b8716f038b2a3c0c454682f6cc772b2
|
[
"Apache-2.0"
] | null | null | null |
tests/algorithms/test_distributions.py
|
festusdrakon/PyDP
|
ae02ef587b8716f038b2a3c0c454682f6cc772b2
|
[
"Apache-2.0"
] | 1
|
2020-05-29T20:46:25.000Z
|
2020-05-29T20:46:25.000Z
|
import pytest
from pydp.distributions import (
LaplaceDistribution,
GaussianDistribution,
# GeometricDistribution,
)
import pydp as dp
import math
from typing import List
from itertools import accumulate
import math
k_num_samples = 10000000
k_num_geometric_samples = 1000000
k_gaussian_samples = 1000000
k_one_over_log2 = 1.44269504089
def skew(samples: List[float], mu: float, sigma: float):
"""Unfortunately this is implemented in third_party/differential-privacy/cc/algorithms/distributions_test.cc
and we don't want to pull the test files in. I'm assuming it'll be moved to
third_party/differential-privacy/cc/algorithms/util.h If they (upstream) move it we can use it.
Until then this should suffice. #FIXME: when possible we can fix this.
"""
skew = list(
accumulate(samples, lambda lhs, rhs: lhs + (rhs - mu) * (rhs - mu) * (rhs - mu))
)[-1]
return skew / (len(samples) * sigma * sigma * sigma)
def kurtosis(samples: List[float], mu: float, var: float):
"""Unfortunately this is implemented in third_party/differential-privacy/cc/algorithms/distributions_test.cc
and we don't want to pull the test files in. I'm assuming it'll be moved to
third_party/differential-privacy/cc/algorithms/util.h If they (upstream) move it we can use it.
Until then this should suffice. #FIXME: when possible we can fix this.
"""
kurt = list(
accumulate(samples, lambda lhs, rhs: lhs + ((rhs - mu) * (rhs - mu)) ** 2)
)[-1]
n = len(samples)
kurt = (n + 1) * kurt / (n * var * var)
kurt -= 3 * (n - 1)
kurt *= (n - 1) / (n - 2) / (n - 3)
return kurt
# From what I understand @openmined/dp-research are going to look at validating correctness
# Until then we can use this to assert on floating point numbers.
# FIXME: When possible we should add 'correctness' tests.
expect_near = lambda expected, actual, tol: (
expected + tol >= actual and expected - tol <= actual
)
class TestLaplaceDistribution:
def test_diversity_getter(self):
sensitivity, epsilon = 1.0, 22.0
dist = LaplaceDistribution(epsilon=epsilon, sensitivity=sensitivity)
assert dist.get_diversity() == sensitivity / epsilon
def test_check_statistics_for_geo_unit_values(self):
ld = LaplaceDistribution(epsilon=1.0, sensitivity=1.0)
samples = [ld.sample(scale=1.0) for _ in range(k_num_geometric_samples)]
mean = dp.util.mean(samples)
var = dp.util.variance(samples)
assert expect_near(0.0, mean, 0.01)
assert expect_near(2.0, var, 0.1)
assert expect_near(0.0, skew(samples, mean, math.sqrt(var)), 0.1)
assert expect_near(3.0, kurtosis(samples, mean, var), 0.1)
class TestGaussianDistribution:
def test_standard_deviation_getter(self):
stddev = k_one_over_log2
dist = GaussianDistribution(stddev)
assert dist.stddev == stddev
class TestLaplaceDistributionDatatypes:
def test_LaplaceDistributionTypes(self):
ld = LaplaceDistribution(epsilon=1.0, sensitivity=1.0)
assert isinstance(ld, LaplaceDistribution)
sud = ld.get_uniform_double()
assert isinstance(sud, float)
lds = ld.sample()
lds1 = ld.sample(4.0)
assert isinstance(lds, float)
assert isinstance(lds1, float)
ldg = ld.get_diversity()
assert isinstance(ldg, float)
# TODO
# lcdf = ld.cdf(2.0, 0.5)
# assert isinstance(lcdf, float)
class TestGaussianDistributionDataTypes:
def test_GaussianDistributionTypes(self):
gd = GaussianDistribution(3)
assert isinstance(gd, GaussianDistribution)
gds = gd.sample()
gds1 = gd.sample(1.0)
assert isinstance(gds, float)
assert isinstance(gds1, float)
gdstd = gd.stddev
assert isinstance(gdstd, float)
class TestGeometricDistribution:
@pytest.mark.skip(reason="This test should pass, see comments")
def test_ratios(self):
"""
This test fails. It's a replica of
https://github.com/google/differential-privacy/blob/9923ad4ee1b84a7002085e50345fcc05f2b21bcb/cc/algorithms/distributions_test.cc#L208 and should pass.
"""
from collections import Counter
p = 1e-2
dist = GeometricDistribution(lambda_=-1.0 * math.log(1 - p))
samples = [dist.sample() for _ in range(k_num_geometric_samples)]
counts = list(Counter([s for s in samples if s < 51]).values())
ratios = [c_i / c_j for c_i, c_j in zip(counts[:-1], counts[1:])]
assert expect_near(p, dp.util.mean(ratios), p / 1e-2)
# TODO: port the following tests
#
# TEST(LaplaceDistributionTest, CheckStatisticsForUnitValues)
# TEST(LaplaceDistributionTest, CheckStatisticsForSpecificDistribution)
# TEST(LaplaceDistributionTest, CheckStatisticsForSpecificScaledDistribution)
# TEST(GaussDistributionTest, CheckStatisticsForUnitValues)
# TEST(GaussDistributionTest, CheckStatisticsForSpecificDistribution)
# TEST(GaussDistributionTest, CheckStatisticsForSpecificScaledDistribution)
#
| 36.55
| 159
| 0.689662
|
b6d7e191e9f64aa3710bfe5c849abd1ca7ae0333
| 53,310
|
py
|
Python
|
tests/gclient_smoketest.py
|
miaosf/depot_tools
|
cb329f4a09b779497ab56ea2fe47c4aff31c35b6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/gclient_smoketest.py
|
miaosf/depot_tools
|
cb329f4a09b779497ab56ea2fe47c4aff31c35b6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/gclient_smoketest.py
|
miaosf/depot_tools
|
cb329f4a09b779497ab56ea2fe47c4aff31c35b6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Smoke tests for gclient.py.
Shell out 'gclient' and run basic conformance tests.
This test assumes GClientSmokeBase.URL_BASE is valid.
"""
import logging
import os
import re
import subprocess
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
from testing_support.fake_repos import join, write
from testing_support.fake_repos import FakeReposTestBase, FakeRepoTransitive
import gclient_utils
import subprocess2
GCLIENT_PATH = os.path.join(ROOT_DIR, 'gclient')
COVERAGE = False
class GClientSmokeBase(FakeReposTestBase):
def setUp(self):
super(GClientSmokeBase, self).setUp()
# Make sure it doesn't try to auto update when testing!
self.env = os.environ.copy()
self.env['DEPOT_TOOLS_UPDATE'] = '0'
def gclient(self, cmd, cwd=None):
if not cwd:
cwd = self.root_dir
if COVERAGE:
# Don't use the wrapper script.
cmd_base = ['coverage', 'run', '-a', GCLIENT_PATH + '.py']
else:
cmd_base = [GCLIENT_PATH]
cmd = cmd_base + cmd
process = subprocess.Popen(cmd, cwd=cwd, env=self.env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=sys.platform.startswith('win'))
(stdout, stderr) = process.communicate()
logging.debug("XXX: %s\n%s\nXXX" % (' '.join(cmd), stdout))
logging.debug("YYY: %s\n%s\nYYY" % (' '.join(cmd), stderr))
# pylint: disable=E1103
return (stdout.replace('\r\n', '\n'), stderr.replace('\r\n', '\n'),
process.returncode)
def untangle(self, stdout):
tasks = {}
remaining = []
for line in stdout.splitlines(False):
m = re.match(r'^(\d)+>(.*)$', line)
if not m:
remaining.append(line)
else:
self.assertEquals([], remaining)
tasks.setdefault(int(m.group(1)), []).append(m.group(2))
out = []
for key in sorted(tasks.iterkeys()):
out.extend(tasks[key])
out.extend(remaining)
return '\n'.join(out)
def parseGclient(self, cmd, items, expected_stderr='', untangle=False):
"""Parse gclient's output to make it easier to test.
If untangle is True, tries to sort out the output from parallel checkout."""
(stdout, stderr, returncode) = self.gclient(cmd)
if untangle:
stdout = self.untangle(stdout)
self.checkString(expected_stderr, stderr)
self.assertEquals(0, returncode)
return self.checkBlock(stdout, items)
def splitBlock(self, stdout):
"""Split gclient's output into logical execution blocks.
___ running 'foo' at '/bar'
(...)
___ running 'baz' at '/bar'
(...)
will result in 2 items of len((...).splitlines()) each.
"""
results = []
for line in stdout.splitlines(False):
# Intentionally skips empty lines.
if not line:
continue
if line.startswith('__'):
match = re.match(r'^________ ([a-z]+) \'(.*)\' in \'(.*)\'$', line)
if not match:
match = re.match(r'^_____ (.*) is missing, synching instead$', line)
if match:
# Blah, it's when a dependency is deleted, we should probably not
# output this message.
results.append([line])
elif (
not re.match(
r'_____ [^ ]+ : Attempting rebase onto [0-9a-f]+...',
line) and
not re.match(r'_____ [^ ]+ at [^ ]+', line)):
# The two regexp above are a bit too broad, they are necessary only
# for git checkouts.
self.fail(line)
else:
results.append([[match.group(1), match.group(2), match.group(3)]])
else:
if not results:
# TODO(maruel): gclient's git stdout is inconsistent.
# This should fail the test instead!!
pass
else:
results[-1].append(line)
return results
def checkBlock(self, stdout, items):
results = self.splitBlock(stdout)
for i in xrange(min(len(results), len(items))):
if isinstance(items[i], (list, tuple)):
verb = items[i][0]
path = items[i][1]
else:
verb = items[i]
path = self.root_dir
self.checkString(results[i][0][0], verb, (i, results[i][0][0], verb))
if sys.platform == 'win32':
# Make path lower case since casing can change randomly.
self.checkString(
results[i][0][2].lower(),
path.lower(),
(i, results[i][0][2].lower(), path.lower()))
else:
self.checkString(results[i][0][2], path, (i, results[i][0][2], path))
self.assertEquals(len(results), len(items), (stdout, items, len(results)))
return results
@staticmethod
def svnBlockCleanup(out):
"""Work around svn status difference between svn 1.5 and svn 1.6
I don't know why but on Windows they are reversed. So sorts the items."""
for i in xrange(len(out)):
if len(out[i]) < 2:
continue
out[i] = [out[i][0]] + sorted([x[1:].strip() for x in out[i][1:]])
return out
class GClientSmoke(GClientSmokeBase):
"""Doesn't require either svnserve nor git-daemon."""
@property
def svn_base(self):
return 'svn://random.server/svn/'
@property
def git_base(self):
return 'git://random.server/git/'
def testHelp(self):
"""testHelp: make sure no new command was added."""
result = self.gclient(['help'])
# Roughly, not too short, not too long.
self.assertTrue(1000 < len(result[0]) and len(result[0]) < 2100,
'Too much written to stdout: %d bytes' % len(result[0]))
self.assertEquals(0, len(result[1]))
self.assertEquals(0, result[2])
def testUnknown(self):
result = self.gclient(['foo'])
# Roughly, not too short, not too long.
self.assertTrue(1000 < len(result[0]) and len(result[0]) < 2100,
'Too much written to stdout: %d bytes' % len(result[0]))
self.assertEquals(0, len(result[1]))
self.assertEquals(0, result[2])
def testNotConfigured(self):
res = ('', 'Error: client not configured; see \'gclient config\'\n', 1)
self.check(res, self.gclient(['cleanup']))
self.check(res, self.gclient(['diff']))
self.check(res, self.gclient(['pack']))
self.check(res, self.gclient(['revert']))
self.check(res, self.gclient(['revinfo']))
self.check(res, self.gclient(['runhooks']))
self.check(res, self.gclient(['status']))
self.check(res, self.gclient(['sync']))
self.check(res, self.gclient(['update']))
def testConfig(self):
p = join(self.root_dir, '.gclient')
def test(cmd, expected):
if os.path.exists(p):
os.remove(p)
results = self.gclient(cmd)
self.check(('', '', 0), results)
self.checkString(expected, open(p, 'rU').read())
test(['config', self.svn_base + 'trunk/src/'],
('solutions = [\n'
' { "name" : "src",\n'
' "url" : "%strunk/src",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n') % self.svn_base)
test(['config', self.git_base + 'repo_1', '--name', 'src'],
('solutions = [\n'
' { "name" : "src",\n'
' "url" : "%srepo_1",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n') % self.git_base)
test(['config', 'foo', 'faa'],
'solutions = [\n'
' { "name" : "foo",\n'
' "url" : "foo",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "faa",\n'
' },\n'
']\n')
test(['config', 'foo', '--deps', 'blah'],
'solutions = [\n'
' { "name" : "foo",\n'
' "url" : "foo",\n'
' "deps_file" : "blah",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n')
test(['config', '--spec', '["blah blah"]'], '["blah blah"]')
os.remove(p)
results = self.gclient(['config', 'foo', 'faa', 'fuu'])
err = ('Usage: gclient.py config [options] [url] [safesync url]\n\n'
'gclient.py: error: Inconsistent arguments. Use either --spec or one'
' or 2 args\n')
self.check(('', err, 2), results)
self.assertFalse(os.path.exists(join(self.root_dir, '.gclient')))
def testSolutionNone(self):
results = self.gclient(['config', '--spec',
'solutions=[{"name": "./", "url": None}]'])
self.check(('', '', 0), results)
results = self.gclient(['sync'])
self.check(('', '', 0), results)
self.assertTree({})
results = self.gclient(['revinfo'])
self.check(('./: None\n', '', 0), results)
self.check(('', '', 0), self.gclient(['cleanup']))
self.check(('', '', 0), self.gclient(['diff']))
self.assertTree({})
self.check(('', '', 0), self.gclient(['pack']))
self.check(('', '', 0), self.gclient(['revert']))
self.assertTree({})
self.check(('', '', 0), self.gclient(['runhooks']))
self.assertTree({})
self.check(('', '', 0), self.gclient(['status']))
def testDifferentTopLevelDirectory(self):
# Check that even if the .gclient file does not mention the directory src
# itself, but it is included via dependencies, the .gclient file is used.
self.gclient(['config', self.svn_base + 'trunk/src.DEPS'])
deps = join(self.root_dir, 'src.DEPS')
os.mkdir(deps)
write(join(deps, 'DEPS'),
'deps = { "src": "%strunk/src" }' % (self.svn_base))
src = join(self.root_dir, 'src')
os.mkdir(src)
res = self.gclient(['status', '--jobs', '1'], src)
self.checkBlock(res[0], [('running', deps), ('running', src)])
class GClientSmokeSVN(GClientSmokeBase):
def setUp(self):
super(GClientSmokeSVN, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_svn()
def testSync(self):
# TODO(maruel): safesync.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
# Test unversioned checkout.
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running',
# This is due to the way svn update is called for a
# single file when File() is used in a DEPS file.
('running', os.path.join(self.root_dir, 'src', 'file', 'other')),
'running', 'running', 'running', 'running'])
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
# Manually remove svn_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'svn_hooked1'))
# Test incremental versioned sync: sync backward.
self.parseGclient(
['sync', '--revision', 'src@1', '--deps', 'mac',
'--delete_unversioned_trees', '--jobs', '1'],
['running', 'running', 'running', 'running', 'deleting'])
tree = self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running', 'running', 'running', 'running'])
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testSyncIgnoredSolutionName(self):
"""TODO(maruel): This will become an error soon."""
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
results = self.gclient(
['sync', '--deps', 'mac', '-r', 'invalid@1', '--jobs', '1'])
self.checkBlock(results[0], [
'running', 'running',
# This is due to the way svn update is called for a single file when
# File() is used in a DEPS file.
('running', os.path.join(self.root_dir, 'src', 'file', 'other')),
'running', 'running', 'running', 'running'])
self.checkString('Please fix your script, having invalid --revision flags '
'will soon considered an error.\n', results[1])
self.assertEquals(0, results[2])
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testSyncNoSolutionName(self):
# When no solution name is provided, gclient uses the first solution listed.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.parseGclient(['sync', '--deps', 'mac', '-r', '1', '--jobs', '1'],
['running', 'running', 'running', 'running'])
tree = self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
self.assertTree(tree)
def testSyncJobs(self):
if not self.enabled:
return
# TODO(maruel): safesync.
self.gclient(['config', self.svn_base + 'trunk/src/'])
# Test unversioned checkout.
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '8'],
['running', 'running',
# This is due to the way svn update is called for a
# single file when File() is used in a DEPS file.
('running', os.path.join(self.root_dir, 'src', 'file', 'other')),
'running', 'running', 'running', 'running'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
# Manually remove svn_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'svn_hooked1'))
# Test incremental versioned sync: sync backward.
self.parseGclient(
['sync', '--revision', 'src@1', '--deps', 'mac',
'--delete_unversioned_trees', '--jobs', '8'],
['running', 'running', 'running', 'running', 'deleting'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '8'],
['running', 'running', 'running', 'running', 'running'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testSyncCustomDeps(self):
if not self.enabled:
return
out = (
'solutions = [\n'
' { "name" : "src",\n'
' "url" : "%(base)s/src",\n'
' "custom_deps" : {\n'
# Remove 2 deps, change 1, add 1.
' "src/other": None,\n'
' "src/third_party/foo": \'%(base)s/third_party/prout\',\n'
' "src/file/other": None,\n'
' "new_deps": "/trunk/src/third_party",\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n\n' %
{ 'base': self.svn_base + 'trunk' })
fileobj = open(os.path.join(self.root_dir, '.gclient'), 'w')
fileobj.write(out)
fileobj.close()
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running', 'running', 'running'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/prout@2', 'src/third_party/foo'),
('trunk/src/third_party@2', 'new_deps'))
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testSyncCustomDepsNoDeps(self):
if not self.enabled:
return
out = (
'solutions = [\n'
# This directory has no DEPS file.
' { "name" : "src/third_party",\n'
' "url" : "%(base)s/src/third_party",\n'
' "custom_deps" : {\n'
# Add 1.
' "src/other": \'/trunk/other\',\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n\n' %
{ 'base': self.svn_base + 'trunk' })
fileobj = open(os.path.join(self.root_dir, '.gclient'), 'w')
fileobj.write(out)
fileobj.close()
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running'],
untangle=True)
tree = self.mangle_svn_tree(
('trunk/src/third_party@2', 'src/third_party'),
('trunk/other@2', 'src/other'))
self.assertTree(tree)
def testRevertAndStatus(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
# Tested in testSync.
self.gclient(['sync', '--deps', 'mac'])
write(join(self.root_dir, 'src', 'other', 'hi'), 'Hey!')
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[['running', join(self.root_dir, 'src')],
['running', join(self.root_dir, 'src', 'other')]])
out = self.svnBlockCleanup(out)
self.checkString('file', out[0][1])
self.checkString('other', out[0][2])
self.checkString('svn_hooked1', out[0][3])
self.checkString(join('third_party', 'foo'), out[0][4])
self.checkString('hi', out[1][1])
self.assertEquals(5, len(out[0]))
self.assertEquals(2, len(out[1]))
# Revert implies --force implies running hooks without looking at pattern
# matching.
results = self.gclient(['revert', '--deps', 'mac', '--jobs', '1'])
out = self.splitBlock(results[0])
# src, src/other is missing, src/other, src/third_party/foo is missing,
# src/third_party/foo, 2 svn hooks, 3 related to File().
self.assertEquals(10, len(out))
self.checkString('', results[1])
self.assertEquals(0, results[2])
tree = self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other'))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/svn_hooked1'] = 'svn_hooked1'
tree['src/svn_hooked2'] = 'svn_hooked2'
self.assertTree(tree)
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[['running', join(self.root_dir, 'src')]])
out = self.svnBlockCleanup(out)
self.checkString('file', out[0][1])
self.checkString('other', out[0][2])
self.checkString('svn_hooked1', out[0][3])
self.checkString('svn_hooked2', out[0][4])
self.checkString(join('third_party', 'foo'), out[0][5])
self.assertEquals(6, len(out[0]))
self.assertEquals(1, len(out))
def testRevertAndStatusDepsOs(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
# Tested in testSync.
self.gclient(['sync', '--deps', 'mac', '--revision', 'src@1'])
write(join(self.root_dir, 'src', 'other', 'hi'), 'Hey!')
# Without --verbose, gclient won't output the directories without
# modification.
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[['running', join(self.root_dir, 'src')],
['running', join(self.root_dir, 'src', 'other')]])
out = self.svnBlockCleanup(out)
self.checkString('other', out[0][1])
self.checkString(join('third_party', 'fpp'), out[0][2])
self.checkString(join('third_party', 'prout'), out[0][3])
self.checkString('hi', out[1][1])
self.assertEquals(4, len(out[0]))
self.assertEquals(2, len(out[1]))
# So verify it works with --verbose.
out = self.parseGclient(
['status', '--deps', 'mac', '--verbose', '--jobs', '1'],
[['running', join(self.root_dir, 'src')],
['running', join(self.root_dir, 'src', 'other')],
['running', join(self.root_dir, 'src', 'third_party', 'fpp')],
['running', join(self.root_dir, 'src', 'third_party', 'prout')]])
out = self.svnBlockCleanup(out)
self.checkString('other', out[0][1])
self.checkString(join('third_party', 'fpp'), out[0][2])
self.checkString(join('third_party', 'prout'), out[0][3])
self.checkString('hi', out[1][1])
self.assertEquals(4, len(out[0]))
self.assertEquals(2, len(out[1]))
self.assertEquals(1, len(out[2]))
self.assertEquals(1, len(out[3]))
self.assertEquals(4, len(out))
# Revert implies --force implies running hooks without looking at pattern
# matching.
# TODO(maruel): In general, gclient revert output is wrong. It should output
# the file list after some ___ running 'svn status'
results = self.gclient(['revert', '--deps', 'mac', '--jobs', '1'])
out = self.splitBlock(results[0])
self.assertEquals(7, len(out))
self.checkString('', results[1])
self.assertEquals(0, results[2])
tree = self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/prout@2', 'src/third_party/prout'))
self.assertTree(tree)
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[['running', join(self.root_dir, 'src')]])
out = self.svnBlockCleanup(out)
self.checkString('other', out[0][1])
self.checkString(join('third_party', 'fpp'), out[0][2])
self.checkString(join('third_party', 'prout'), out[0][3])
self.assertEquals(4, len(out[0]))
def testRunHooks(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync', '--deps', 'mac'])
out = self.parseGclient(['runhooks', '--deps', 'mac'],
['running', 'running'])
self.checkString(1, len(out[0]))
self.checkString(1, len(out[1]))
def testRunHooksDepsOs(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync', '--deps', 'mac', '--revision', 'src@1'])
out = self.parseGclient(['runhooks', '--deps', 'mac'], [])
self.assertEquals([], out)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
out = ('src: %(base)s/src\n'
'src/file/other: File("%(base)s/other/DEPS")\n'
'src/other: %(base)s/other\n'
'src/third_party/foo: %(base)s/third_party/foo@1\n' %
{ 'base': self.svn_base + 'trunk' })
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--actual'])
out = ('src: %(base)s/src@2\n'
'src/file/other: %(base)s/other/DEPS@2\n'
'src/other: %(base)s/other@2\n'
'src/third_party/foo: %(base)s/third_party/foo@1\n' %
{ 'base': self.svn_base + 'trunk' })
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--snapshot'])
out = ('# Snapshot generated with gclient revinfo --snapshot\n'
'solutions = [\n'
' { "name" : "src",\n'
' "url" : "%(base)s/src",\n'
' "deps_file" : "DEPS",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' "foo/bar": None,\n'
' "invalid": None,\n'
' "src/file/other": \'%(base)s/other/DEPS@2\',\n'
' "src/other": \'%(base)s/other@2\',\n'
' "src/third_party/foo": '
'\'%(base)s/third_party/foo@1\',\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n\n' %
{ 'base': self.svn_base + 'trunk' })
self.check((out, '', 0), results)
def testRevInfoAltDeps(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/', '--deps-file',
'DEPS.alt'])
self.gclient(['sync'])
results = self.gclient(['revinfo', '--snapshot'])
out = ('# Snapshot generated with gclient revinfo --snapshot\n'
'solutions = [\n'
' { "name" : "src",\n'
' "url" : "%(base)s/src",\n'
' "deps_file" : "DEPS.alt",\n'
' "managed" : True,\n'
' "custom_deps" : {\n'
' "src/other2": \'%(base)s/other@2\',\n'
' },\n'
' "safesync_url": "",\n'
' },\n'
']\n\n' %
{ 'base': self.svn_base + 'trunk' })
self.check((out, '', 0), results)
def testWrongDirectory(self):
# Check that we're not using a .gclient configuration which only talks
# about a subdirectory src when we're in a different subdirectory src-other.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync'])
other_src = join(self.root_dir, 'src-other')
os.mkdir(other_src)
res = ('', 'Error: client not configured; see \'gclient config\'\n', 1)
self.check(res, self.gclient(['status'], other_src))
def testCorrectDirectory(self):
# Check that when we're in the subdirectory src, the .gclient configuration
# is used.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync'])
src = join(self.root_dir, 'src')
res = self.gclient(['status', '--jobs', '1'], src)
self.checkBlock(res[0], [('running', src)])
def testInitialCheckoutNotYetDone(self):
# Check that gclient can be executed when the initial checkout hasn't been
# done yet.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.parseGclient(
['sync', '--jobs', '1'],
['running', 'running',
# This is due to the way svn update is called for a
# single file when File() is used in a DEPS file.
('running', os.path.join(self.root_dir, 'src', 'file', 'other')),
'running', 'running', 'running', 'running'])
def testInitialCheckoutFailed(self):
# Check that gclient can be executed from an arbitrary sub directory if the
# initial checkout has failed.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
self.gclient(['sync'])
# Cripple the checkout.
os.remove(join(self.root_dir, '.gclient_entries'))
src = join(self.root_dir, 'src')
res = self.gclient(['sync', '--jobs', '1'], src)
self.checkBlock(res[0],
['running', 'running', 'running'])
def testUnversionedRepository(self):
# Check that gclient automatically deletes crippled SVN repositories.
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
cmd = ['sync', '--jobs', '1', '--delete_unversioned_trees', '--reset']
self.assertEquals(0, self.gclient(cmd)[-1])
third_party = join(self.root_dir, 'src', 'third_party')
subprocess2.check_call(['svn', 'propset', '-q', 'svn:ignore', 'foo', '.'],
cwd=third_party)
# Cripple src/third_party/foo and make sure gclient still succeeds.
gclient_utils.rmtree(join(third_party, 'foo', '.svn'))
self.assertEquals(0, self.gclient(cmd)[-1])
class GClientSmokeSVNTransitive(GClientSmokeBase):
FAKE_REPOS_CLASS = FakeRepoTransitive
def setUp(self):
super(GClientSmokeSVNTransitive, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_svn()
def testSyncTransitive(self):
if not self.enabled:
return
self.gclient(['config', self.svn_base + 'trunk/src/'])
def test_case(parent, timestamp, fixed, output):
# We check out revision 'parent' and expect the following:
# - src/ is checked out at r'parent'
# - src/same_repo is checked out at r'parent' (due to --transitive)
# - src/same_repo_fixed is checked out at r'fixed'
# - src/different_repo is checked out at r'timestamp'
# (due to --transitive)
# - src/different_repo_fixed is checked out at r'fixed'
revisions = self.FAKE_REPOS.svn_revs
self.parseGclient(
['sync', '--transitive', '--revision', 'src@%d' % parent,
'--jobs', '1'], output)
self.assertTree({
'src/origin': revisions[parent]['trunk/src/origin'],
'src/DEPS': revisions[parent]['trunk/src/DEPS'],
'src/same_repo/origin': revisions[parent]['trunk/third_party/origin'],
'src/same_repo_fixed/origin':
revisions[fixed]['trunk/third_party/origin'],
'src/different_repo/origin':
revisions[timestamp]['trunk/third_party/origin'],
'src/different_repo_fixed/origin':
revisions[fixed]['trunk/third_party/origin'],
})
# Here are the test cases for checking out 'trunk/src' at r1, r2 and r3
# r1: Everything is normal
test_case(parent=1, timestamp=1, fixed=1,
output=['running', 'running', 'running', 'running', 'running'])
# r2: Svn will scan from r1 upwards until it finds a revision matching the
# given timestamp or it takes the next smallest one (which is r2 in this
# case).
test_case(parent=2, timestamp=2, fixed=1,
output=['running', 'running', 'running'])
# r3: Svn will scan from r1 upwards until it finds a revision matching the
# given timestamp or it takes the next smallest one. Since
# timestamp(r3) < timestamp(r2) svn will checkout r1.
# This happens often on http://googlecode.com but is unexpected to happen
# with svnserve (unless you manually change 'svn:date')
test_case(parent=3, timestamp=1, fixed=1,
output=['running', 'running', 'running'])
class GClientSmokeGIT(GClientSmokeBase):
def setUp(self):
super(GClientSmokeGIT, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_git()
def testSync(self):
if not self.enabled:
return
# TODO(maruel): safesync.
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
# Test unversioned checkout.
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running', 'running', 'running', 'running'])
# TODO(maruel): http://crosbug.com/3582 hooks run even if not matching, must
# add sync parsing to get the list of updated files.
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Manually remove git_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'git_hooked1'))
# Test incremental versioned sync: sync backward.
diffdir = os.path.join(self.root_dir, 'src', 'repo2', 'repo_renamed')
self.parseGclient(['sync', '--jobs', '1', '--revision',
'src@' + self.githash('repo_1', 1),
'--deps', 'mac', '--delete_unversioned_trees'],
['running', 'running', ('running', diffdir), 'deleting'])
tree = self.mangle_git_tree(('repo_1@1', 'src'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
expect3 = ('running', os.path.join(self.root_dir, 'src', 'repo2', 'repo3'))
expect4 = ('running', os.path.join(self.root_dir, 'src', 'repo4'))
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running', 'running', expect3, expect4])
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_3@2', 'src/repo2/repo_renamed'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testSyncIgnoredSolutionName(self):
"""TODO(maruel): This will become an error soon."""
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1',
'--revision', 'invalid@' + self.githash('repo_1', 1)],
['running', 'running', 'running', 'running', 'running'],
'Please fix your script, having invalid --revision flags '
'will soon considered an error.\n')
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testSyncNoSolutionName(self):
if not self.enabled:
return
# When no solution name is provided, gclient uses the first solution listed.
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1',
'--revision', self.githash('repo_1', 1)],
['running', 'running', 'running', 'running'])
tree = self.mangle_git_tree(('repo_1@1', 'src'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
self.assertTree(tree)
def testSyncJobs(self):
if not self.enabled:
return
# TODO(maruel): safesync.
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
# Test unversioned checkout.
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '8'],
['running', 'running', 'running', 'running', 'running'],
untangle=True)
# TODO(maruel): http://crosbug.com/3582 hooks run even if not matching, must
# add sync parsing to get the list of updated files.
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Manually remove git_hooked1 before synching to make sure it's not
# recreated.
os.remove(join(self.root_dir, 'src', 'git_hooked1'))
# Test incremental versioned sync: sync backward.
expect3 = ('running',
os.path.join(self.root_dir, 'src', 'repo2', 'repo_renamed'))
self.parseGclient(
['sync', '--revision', 'src@' + self.githash('repo_1', 1),
'--deps', 'mac', '--delete_unversioned_trees', '--jobs', '8'],
['running', 'running', expect3, 'deleting'],
untangle=True)
tree = self.mangle_git_tree(('repo_1@1', 'src'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
# Test incremental sync: delete-unversioned_trees isn't there.
expect4 = os.path.join(self.root_dir, 'src', 'repo2', 'repo3')
expect5 = os.path.join(self.root_dir, 'src', 'repo4')
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '8'],
['running', 'running', 'running',
('running', expect4),
('running', expect5)],
untangle=True)
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_3@2', 'src/repo2/repo_renamed'),
('repo_4@2', 'src/repo4'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testRevertAndStatus(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
# Tested in testSync.
self.gclient(['sync', '--deps', 'mac'])
write(join(self.root_dir, 'src', 'repo2', 'hi'), 'Hey!')
expected1 = ('running', os.path.join(self.root_dir, 'src'))
expected2 = ('running', os.path.join(expected1[1], 'repo2'))
expected3 = ('running', os.path.join(expected2[1], 'repo_renamed'))
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'],
[expected1, expected2, expected3])
# TODO(maruel): http://crosbug.com/3584 It should output the unversioned
# files.
self.assertEquals(3, len(out))
# Revert implies --force implies running hooks without looking at pattern
# matching. For each expected path, 'git reset' and 'git clean' are run, so
# there should be two results for each. The last two results should reflect
# writing git_hooked1 and git_hooked2.
expected4 = ('running', self.root_dir)
out = self.parseGclient(['revert', '--deps', 'mac', '--jobs', '1'],
[expected1, expected1,
expected2, expected2,
expected3, expected3,
expected4, expected4])
self.assertEquals(8, len(out))
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
results = self.gclient(['status', '--deps', 'mac', '--jobs', '1'])
out = results[0].splitlines(False)
# TODO(maruel): http://crosbug.com/3584 It should output the unversioned
# files.
self.assertEquals(6, len(out))
def testRunHooks(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.gclient(['sync', '--deps', 'mac'])
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
os.remove(join(self.root_dir, 'src', 'git_hooked1'))
os.remove(join(self.root_dir, 'src', 'git_hooked2'))
# runhooks runs all hooks even if not matching by design.
out = self.parseGclient(['runhooks', '--deps', 'mac'],
['running', 'running'])
self.assertEquals(1, len(out[0]))
self.assertEquals(1, len(out[1]))
tree = self.mangle_git_tree(('repo_1@2', 'src'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
self.assertTree(tree)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['config', self.git_base + 'repo_1', '--name', 'src'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
out = ('src: %(base)srepo_1\n'
'src/repo2: %(base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(base)srepo_3\n' %
{
'base': self.git_base,
'hash2': self.githash('repo_2', 1)[:7],
})
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--actual'])
out = ('src: %(base)srepo_1@%(hash1)s\n'
'src/repo2: %(base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(base)srepo_3@%(hash3)s\n' %
{
'base': self.git_base,
'hash1': self.githash('repo_1', 2),
'hash2': self.githash('repo_2', 1),
'hash3': self.githash('repo_3', 2),
})
self.check((out, '', 0), results)
class GClientSmokeBoth(GClientSmokeBase):
def setUp(self):
super(GClientSmokeBoth, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_svn() and self.FAKE_REPOS.set_up_git()
def testMultiSolutions(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running', 'running',
# This is due to the way svn update is called for a single
# file when File() is used in a DEPS file.
('running', self.root_dir + '/src/file/other'),
'running', 'running', 'running', 'running', 'running', 'running',
'running', 'running'])
tree = self.mangle_git_tree(('repo_1@2', 'src-git'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree.update(self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other')))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testMultiSolutionsJobs(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
# There is no guarantee that the ordering will be consistent.
(stdout, stderr, returncode) = self.gclient(
['sync', '--deps', 'mac', '--jobs', '8'])
stdout = self.untangle(stdout)
self.checkString('', stderr)
self.assertEquals(0, returncode)
results = self.splitBlock(stdout)
self.assertEquals(12, len(results))
tree = self.mangle_git_tree(('repo_1@2', 'src-git'),
('repo_2@1', 'src/repo2'),
('repo_3@2', 'src/repo2/repo_renamed'))
tree.update(self.mangle_svn_tree(
('trunk/src@2', 'src'),
('trunk/third_party/foo@1', 'src/third_party/foo'),
('trunk/other@2', 'src/other')))
tree['src/file/other/DEPS'] = (
self.FAKE_REPOS.svn_revs[2]['trunk/other/DEPS'])
tree['src/git_hooked1'] = 'git_hooked1'
tree['src/git_hooked2'] = 'git_hooked2'
tree['src/svn_hooked1'] = 'svn_hooked1'
self.assertTree(tree)
def testMultiSolutionsMultiRev(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
self.parseGclient(
['sync', '--deps', 'mac', '--jobs', '1', '--revision', '1',
'-r', 'src-git@' + self.githash('repo_1', 1)],
['running', 'running', 'running', 'running',
'running', 'running', 'running', 'running'],
expected_stderr=
'You must specify the full solution name like --revision src@1\n'
'when you have multiple solutions setup in your .gclient file.\n'
'Other solutions present are: src-git.\n')
tree = self.mangle_git_tree(('repo_1@1', 'src-git'),
('repo_2@2', 'src/repo2'),
('repo_3@1', 'src/repo2/repo3'),
('repo_4@2', 'src/repo4'))
tree.update(self.mangle_svn_tree(
('trunk/src@1', 'src'),
('trunk/third_party/foo@2', 'src/third_party/fpp'),
('trunk/other@1', 'src/other'),
('trunk/third_party/foo@2', 'src/third_party/prout')))
self.assertTree(tree)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
out = ('src: %(svn_base)s/src/\n'
'src-git: %(git_base)srepo_1\n'
'src/file/other: File("%(svn_base)s/other/DEPS")\n'
'src/other: %(svn_base)s/other\n'
'src/repo2: %(git_base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(git_base)srepo_3\n'
'src/third_party/foo: %(svn_base)s/third_party/foo@1\n') % {
'svn_base': self.svn_base + 'trunk',
'git_base': self.git_base,
'hash2': self.githash('repo_2', 1)[:7],
}
self.check((out, '', 0), results)
results = self.gclient(['revinfo', '--deps', 'mac', '--actual'])
out = ('src: %(svn_base)s/src/@2\n'
'src-git: %(git_base)srepo_1@%(hash1)s\n'
'src/file/other: %(svn_base)s/other/DEPS@2\n'
'src/other: %(svn_base)s/other@2\n'
'src/repo2: %(git_base)srepo_2@%(hash2)s\n'
'src/repo2/repo_renamed: %(git_base)srepo_3@%(hash3)s\n'
'src/third_party/foo: %(svn_base)s/third_party/foo@1\n') % {
'svn_base': self.svn_base + 'trunk',
'git_base': self.git_base,
'hash1': self.githash('repo_1', 2),
'hash2': self.githash('repo_2', 1),
'hash3': self.githash('repo_3', 2),
}
self.check((out, '', 0), results)
def testRecurse(self):
if not self.enabled:
return
self.gclient(['config', '--spec',
'solutions=['
'{"name": "src",'
' "url": "' + self.svn_base + 'trunk/src/"},'
'{"name": "src-git",'
'"url": "' + self.git_base + 'repo_1"}]'])
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['recurse', '-j1', 'sh', '-c',
'echo $GCLIENT_SCM,$GCLIENT_URL,`pwd`'])
entries = [tuple(line.split(','))
for line in results[0].strip().split('\n')]
logging.debug(entries)
bases = {'svn': self.svn_base, 'git': self.git_base}
expected_source = [
('svn', 'trunk/src/', 'src'),
('git', 'repo_1', 'src-git'),
('svn', 'trunk/other', 'src/other'),
('git', 'repo_2@' + self.githash('repo_2', 1)[:7], 'src/repo2'),
('git', 'repo_3', 'src/repo2/repo_renamed'),
('svn', 'trunk/third_party/foo@1', 'src/third_party/foo'),
]
expected = [(scm, bases[scm] + url, os.path.join(self.root_dir, path))
for (scm, url, path) in expected_source]
self.assertEquals(sorted(entries), sorted(expected))
class GClientSmokeFromCheckout(GClientSmokeBase):
# WebKit abuses this. It has a .gclient and a DEPS from a checkout.
def setUp(self):
super(GClientSmokeFromCheckout, self).setUp()
self.enabled = self.FAKE_REPOS.set_up_svn()
os.rmdir(self.root_dir)
if self.enabled:
usr, pwd = self.FAKE_REPOS.USERS[0]
subprocess2.check_call(
['svn', 'checkout', self.svn_base + '/trunk/webkit',
self.root_dir, '-q',
'--non-interactive', '--no-auth-cache',
'--username', usr, '--password', pwd])
def testSync(self):
if not self.enabled:
return
self.parseGclient(['sync', '--deps', 'mac', '--jobs', '1'],
['running', 'running'])
tree = self.mangle_svn_tree(
('trunk/webkit@2', ''),
('trunk/third_party/foo@1', 'foo/bar'))
self.assertTree(tree)
def testRevertAndStatus(self):
if not self.enabled:
return
self.gclient(['sync'])
# TODO(maruel): This is incorrect.
out = self.parseGclient(['status', '--deps', 'mac', '--jobs', '1'], [])
# Revert implies --force implies running hooks without looking at pattern
# matching.
results = self.gclient(['revert', '--deps', 'mac', '--jobs', '1'])
out = self.splitBlock(results[0])
self.assertEquals(2, len(out))
self.checkString(2, len(out[0]))
self.checkString(2, len(out[1]))
self.checkString('foo', out[1][1])
self.checkString('', results[1])
self.assertEquals(0, results[2])
tree = self.mangle_svn_tree(
('trunk/webkit@2', ''),
('trunk/third_party/foo@1', 'foo/bar'))
self.assertTree(tree)
# TODO(maruel): This is incorrect.
out = self.parseGclient(['status', '--deps', 'mac'], [])
def testRunHooks(self):
if not self.enabled:
return
# Hooks aren't really tested for now since there is no hook defined.
self.gclient(['sync', '--deps', 'mac'])
out = self.parseGclient(['runhooks', '--deps', 'mac'], ['running'])
self.assertEquals(1, len(out))
self.assertEquals(2, len(out[0]))
self.assertEquals(3, len(out[0][0]))
self.checkString('foo', out[0][1])
tree = self.mangle_svn_tree(
('trunk/webkit@2', ''),
('trunk/third_party/foo@1', 'foo/bar'))
self.assertTree(tree)
def testRevInfo(self):
if not self.enabled:
return
self.gclient(['sync', '--deps', 'mac'])
results = self.gclient(['revinfo', '--deps', 'mac'])
expected = (
'./: None\nfoo/bar: %strunk/third_party/foo@1\n' % self.svn_base,
'', 0)
self.check(expected, results)
# TODO(maruel): To be added after the refactor.
#results = self.gclient(['revinfo', '--snapshot'])
#expected = (
# './: None\nfoo/bar: %strunk/third_party/foo@1\n' % self.svn_base,
# '', 0)
#self.check(expected, results)
def testRest(self):
if not self.enabled:
return
self.gclient(['sync'])
# TODO(maruel): This is incorrect, it should run on ./ too.
self.parseGclient(
['cleanup', '--deps', 'mac', '--verbose', '--jobs', '1'],
[('running', join(self.root_dir, 'foo', 'bar'))])
self.parseGclient(
['diff', '--deps', 'mac', '--verbose', '--jobs', '1'],
[('running', join(self.root_dir, 'foo', 'bar'))])
if __name__ == '__main__':
if '-v' in sys.argv:
logging.basicConfig(level=logging.DEBUG)
if '-c' in sys.argv:
COVERAGE = True
sys.argv.remove('-c')
if os.path.exists('.coverage'):
os.remove('.coverage')
os.environ['COVERAGE_FILE'] = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'.coverage')
unittest.main()
| 40.052592
| 80
| 0.559595
|
a1fa3906866949782c0334a8bad6aea15b3b5685
| 1,841
|
py
|
Python
|
main.py
|
umchemurziev/avito
|
c6125693aa00c2d933b37e32c7d94b7bd12a8288
|
[
"MIT"
] | null | null | null |
main.py
|
umchemurziev/avito
|
c6125693aa00c2d933b37e32c7d94b7bd12a8288
|
[
"MIT"
] | null | null | null |
main.py
|
umchemurziev/avito
|
c6125693aa00c2d933b37e32c7d94b7bd12a8288
|
[
"MIT"
] | null | null | null |
import asyncio
async def get_matrix(url):
import requests
async def url_to_matrix(url):
loop = asyncio.get_event_loop()
res = await loop.run_in_executor(None, requests.get, url)
if res.status_code in range(400, 601):
print("Error: ", res.status_code)
return []
matrix = []
temp = []
n = (len(res.text.split("\n")) - 2) // 2
m = 0
for elem in res.text.split():
if elem.isdigit():
temp.append(int(elem))
m += 1
if m == n:
matrix.append(temp)
temp = []
m = 0
return matrix
async def read_matrix(matrix):
res = []
n = len(matrix)
m = 0
for v in range(n // 2):
# Заполнение верхней горизонтальной матрицы
for i in range(n - m):
res.append(matrix[i + v][v])
# Заполнение левой вертикальной матрицы
for i in range(v + 1, n - v):
res.append(matrix[-v - 1][i])
# Заполнение нижней горизонтальной матрицы
for i in range(v + 1, n - v):
res.append(matrix[-i - 1][-v - 1])
# Заполнение правой вертикальной матрицы
for i in range(v + 1, n - (v + 1)):
res.append(matrix[v][-i - 1])
m += 2
return res
matrix = await url_to_matrix(url)
read = await read_matrix(matrix)
return read
# SOURCE_URL = 'https://raw.githubusercontent.com/avito-tech/python-trainee-assignment/main/matrix.txt'
# TRAVERSAL = [
# 10, 50, 90, 130,
# 140, 150, 160, 120,
# 80, 40, 30, 20,
# 60, 100, 110, 70,
# ]
# def test_get_matrix():
# assert asyncio.run(get_matrix(SOURCE_URL)) == TRAVERSAL
# test_get_matrix()
| 26.3
| 103
| 0.507876
|
2d7357064dda4c2ce56b74614059d38806d37c1b
| 2,704
|
py
|
Python
|
src/ipyradiant/query/namespace_manager.py
|
lnijhawan/ipyradiant
|
d804e9031ef39c1ea75fedd52d110302c065ad84
|
[
"BSD-3-Clause"
] | null | null | null |
src/ipyradiant/query/namespace_manager.py
|
lnijhawan/ipyradiant
|
d804e9031ef39c1ea75fedd52d110302c065ad84
|
[
"BSD-3-Clause"
] | null | null | null |
src/ipyradiant/query/namespace_manager.py
|
lnijhawan/ipyradiant
|
d804e9031ef39c1ea75fedd52d110302c065ad84
|
[
"BSD-3-Clause"
] | null | null | null |
""" a namespace manager
"""
# Copyright (c) 2021 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
import re
import ipywidgets as W
import traitlets as T
from rdflib.namespace import RDF, RDFS, XSD
default_ns = {
"rdfs": RDFS,
"rdf": RDF,
"xsd": XSD,
}
def collapse_namespace(namespaces, cell):
"""TODO"""
uf_link = """<a href=\"{}" target=\"_blank\">{}</a>"""
or_statement = "|".join([uri for _, uri in namespaces])
pattern = f"({or_statement}).*"
quick_check = re.match(pattern, str(cell))
if quick_check:
for term, uri in namespaces:
if cell.startswith(uri):
return uf_link.format(cell, str(cell).replace(uri, term + ":"))
else:
return uf_link.format(cell, cell)
class NamespaceManager(W.VBox):
"""
TODO perform validation on the user_namespaces_value to ensure valid prefixes exist?
TODO better default namespaces? (maybe module import?)
"""
default_namespaces = T.Instance(W.HTML)
included_namespaces_value = T.Unicode()
user_namespaces = T.Instance(W.Textarea)
user_namespaces_value = T.Unicode()
namespaces = T.Unicode()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.children = tuple([self.default_namespaces, self.user_namespaces])
self.namespaces = str(self.included_namespaces_value)
@T.default("default_namespaces")
def make_default_namespaces(self):
default_namespaces = W.HTML(
"""
<p style="color:blue;font-size:12px;">PREFIX xsd:
<i><http://www.w3.org/2001/XMLSchema#></i></p>
<p style="color:blue;font-size:12px;">PREFIX rdfs:
<i><http://www.w3.org/2000/01/rdf-schema#></i></p>
<p style="color:blue;font-size:12px;">PREFIX rdf:
<i><http://www.w3.org/1999/02/22-rdf-syntax-ns#></i></p>
"""
)
return default_namespaces
@T.default("user_namespaces")
def make_default_user_namespaces(self):
user_namespaces = W.Textarea(
placeholder="PREFIX ex: <https://www.example.org/>",
layout=W.Layout(width="80%"),
)
T.link((user_namespaces, "value"), (self, "user_namespaces_value"))
return user_namespaces
@T.default("included_namespaces_value")
def make_included_namespaces_value(self):
return "\n".join([f"PREFIX {ns}: <{uri}>" for ns, uri in default_ns.items()])
@T.observe("user_namespaces_value")
def update_namespaces(self, changes):
self.namespaces = "\n".join([self.included_namespaces_value, changes.new])
| 33.382716
| 88
| 0.626849
|
0f9eb7bdac3d9a52777dda6ba114a6c05c60f1f8
| 706
|
py
|
Python
|
src/core/path_settings.py
|
wu-clan/automated_api
|
6c41ddc3b900608403855afb9e740ce31e24186f
|
[
"MulanPSL-1.0"
] | null | null | null |
src/core/path_settings.py
|
wu-clan/automated_api
|
6c41ddc3b900608403855afb9e740ce31e24186f
|
[
"MulanPSL-1.0"
] | null | null | null |
src/core/path_settings.py
|
wu-clan/automated_api
|
6c41ddc3b900608403855afb9e740ce31e24186f
|
[
"MulanPSL-1.0"
] | null | null | null |
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import os
from src.core.conf import settings
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# 测试用例参数 yaml 文件
YAML_FILE = os.path.join(BASE_DIR, 'data')
# 测试用例参数 xlsx 文件
XLSX_FILE = os.path.join(BASE_DIR, 'data')
# 测试用例参数文件
TEMPLATE_XLSX_FILE = os.path.join(BASE_DIR, 'data', 'DemoAPITestCase.xlsx')
# 测试用例路径
TEST_CASES = os.path.join(BASE_DIR, 'testcase', settings.PROJECT)
# 日志路径
LOG_PATH = os.path.join(BASE_DIR, 'log')
# EXCEL测试报告
EXCEL_REPORT = os.path.join(BASE_DIR, 'report', 'excel_report')
# HTML测试报告
HTML_REPORT = os.path.join(BASE_DIR, 'report', 'html_report')
# YAML测试报告
YAML_REPORT = os.path.join(BASE_DIR, 'report', 'yaml_report')
| 22.0625
| 75
| 0.730878
|
767b6b09189226c8156ad211d41e6290df060db1
| 2,063
|
py
|
Python
|
body/stretch_body/hello_utils.py
|
PickNikRobotics/stretch_body
|
6d8fd9cea3292c19c4c2256f9c6cceea56ff3e5f
|
[
"RSA-MD"
] | null | null | null |
body/stretch_body/hello_utils.py
|
PickNikRobotics/stretch_body
|
6d8fd9cea3292c19c4c2256f9c6cceea56ff3e5f
|
[
"RSA-MD"
] | null | null | null |
body/stretch_body/hello_utils.py
|
PickNikRobotics/stretch_body
|
6d8fd9cea3292c19c4c2256f9c6cceea56ff3e5f
|
[
"RSA-MD"
] | 1
|
2021-05-04T16:29:26.000Z
|
2021-05-04T16:29:26.000Z
|
import yaml
import math
import os
import time
def print_stretch_re_use():
print("For use with S T R E T C H (TM) RESEARCH EDITION from Hello Robot Inc.\n")
def create_time_string():
t = time.localtime()
time_string = str(t.tm_year) + str(t.tm_mon).zfill(2) + str(t.tm_mday).zfill(2) + str(t.tm_hour).zfill(2) + str(t.tm_min).zfill(2) + str(t.tm_sec).zfill(2)
return time_string
def deg_to_rad(x):
return math.pi*x/180.0
def rad_to_deg(x):
return 180.0*x/math.pi
def get_fleet_id():
return os.environ['HELLO_FLEET_ID']
def set_fleet_id(id):
os.environ['HELLO_FLEET_ID']=id
def get_fleet_directory():
return os.environ['HELLO_FLEET_PATH']+'/'+get_fleet_id()+'/'
def read_fleet_yaml(fn):
s = file(get_fleet_directory()+fn, 'r')
p = yaml.load(s,Loader=yaml.FullLoader)
if p is None:
return {}
else:
return p
def write_fleet_yaml(fn,rp):
with open(get_fleet_directory()+fn, 'w') as yaml_file:
yaml.dump(rp, yaml_file, default_flow_style=False)
class TimerStats():
def __init__(self):
self.av = None
self.mx = None
self.count = 0
def update(self, duration):
if self.av is None:
self.av = duration
else:
self.av = ((self.count * self.av) + duration) / (self.count + 1)
if self.mx is None:
self.mx = duration
elif self.mx < duration:
self.mx = duration
self.count = self.count + 1
def output_string(self):
out = 'timer: av = ' + str(self.av) + ' , max = ' + str(self.mx)
return out
def pretty_print(self):
print 'Timer Stat -- Avg: ', str(self.av), 'Max: ', str(self.mx)
class ThreadServiceExit(Exception):
"""
Custom exception which is used to trigger the clean exit
of all running threads and the main program.
"""
pass
#Signal handler, must be set from main thread
def thread_service_shutdown(signum, frame):
print('Caught signal %d' % signum)
raise ThreadServiceExit
| 24.270588
| 159
| 0.62094
|
ba1ba48a5157edc7680048ddccb7d0756cb4d0ad
| 2,100
|
py
|
Python
|
lib/galaxy/workflow/steps.py
|
ramezrawas/galaxy-1
|
c03748dd49c060a68d07bce56eae33e0ba154414
|
[
"CC-BY-3.0"
] | 6
|
2018-11-03T22:43:35.000Z
|
2022-02-15T17:51:33.000Z
|
lib/galaxy/workflow/steps.py
|
igorhollaender/OBSOLETE_sirv_dashboard
|
85aec60b80ef6f561d89398e3da5963d3d0f2aa4
|
[
"CC-BY-3.0"
] | 7
|
2016-12-07T22:19:37.000Z
|
2019-01-30T15:04:26.000Z
|
lib/galaxy/workflow/steps.py
|
igorhollaender/OBSOLETE_sirv_dashboard
|
85aec60b80ef6f561d89398e3da5963d3d0f2aa4
|
[
"CC-BY-3.0"
] | 10
|
2017-04-10T21:40:22.000Z
|
2022-02-21T16:50:10.000Z
|
""" This module contains utility methods for reasoning about and ordering
workflow steps.
"""
import math
from galaxy.util.topsort import (
CycleError,
topsort,
topsort_levels
)
def attach_ordered_steps( workflow, steps ):
""" Attempt to topologically order steps and attach to workflow. If this
fails - the workflow contains cycles so it mark it as such.
"""
ordered_steps = order_workflow_steps( steps )
if ordered_steps:
workflow.has_cycles = False
for i, step in enumerate( ordered_steps ):
step.order_index = i
workflow.steps.append( step )
else:
workflow.has_cycles = True
workflow.steps = steps
def order_workflow_steps( steps ):
"""
Perform topological sort of the steps, return ordered or None
"""
position_data_available = True
for step in steps:
if not step.position or 'left' not in step.position or 'top' not in step.position:
position_data_available = False
if position_data_available:
steps.sort(cmp=lambda s1, s2: cmp( math.sqrt(s1.position['left'] ** 2 + s1.position['top'] ** 2), math.sqrt(s2.position['left'] ** 2 + s2.position['top'] ** 2)))
try:
edges = edgelist_for_workflow_steps( steps )
node_order = topsort( edges )
return [ steps[i] for i in node_order ]
except CycleError:
return None
def edgelist_for_workflow_steps( steps ):
"""
Create a list of tuples representing edges between ``WorkflowSteps`` based
on associated ``WorkflowStepConnection``s
"""
edges = []
steps_to_index = dict( ( step, i ) for i, step in enumerate( steps ) )
for step in steps:
edges.append( ( steps_to_index[step], steps_to_index[step] ) )
for conn in step.input_connections:
edges.append( ( steps_to_index[conn.output_step], steps_to_index[conn.input_step] ) )
return edges
def order_workflow_steps_with_levels( steps ):
try:
return topsort_levels( edgelist_for_workflow_steps( steps ) )
except CycleError:
return None
| 32.8125
| 169
| 0.664286
|
da1cad60f5ff393c8474cc1715db3e73e5351b04
| 37,365
|
py
|
Python
|
vistrails/packages/matplotlib/parse.py
|
celiafish/VisTrails
|
d8cb575b8b121941de190fe608003ad1427ef9f6
|
[
"BSD-3-Clause"
] | 1
|
2020-03-13T20:56:21.000Z
|
2020-03-13T20:56:21.000Z
|
vistrails/packages/matplotlib/parse.py
|
celiafish/VisTrails
|
d8cb575b8b121941de190fe608003ad1427ef9f6
|
[
"BSD-3-Clause"
] | null | null | null |
vistrails/packages/matplotlib/parse.py
|
celiafish/VisTrails
|
d8cb575b8b121941de190fe608003ad1427ef9f6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import ast
import re
import sys
from xml.etree import ElementTree as ET
import docutils.core
import docutils.nodes
from itertools import izip
import inspect
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.docstring
def new_call(self, func):
return func
matplotlib.docstring.Substitution.__call__ = new_call
import matplotlib.pyplot
from matplotlib.artist import Artist, ArtistInspector
import matplotlib.cbook
# want to get lowercase accepts too
ArtistInspector._get_valid_values_regex = re.compile(
r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))", re.IGNORECASE)
from specs import SpecList, ModuleSpec, InputPortSpec, OutputPortSpec, \
AlternatePortSpec
# sys.path.append('/vistrails/src/git')
from vistrails.core.modules.utils import expand_port_spec_string
##############################################################################
# docutils parsing code
##############################################################################
def parse_docutils_thead(elt):
header = []
for child in elt.children:
if child.__class__ == docutils.nodes.row:
assert len(header) == 0, "More than one row in header"
for subchild in child.children:
if subchild.__class__ == docutils.nodes.entry:
header.append(parse_docutils_elt(subchild)[0].strip())
return header
def parse_docutils_tbody(elt):
rows = []
for child in elt.children:
if child.__class__ == docutils.nodes.row:
row = []
for subchild in child.children:
if subchild.__class__ == docutils.nodes.entry:
row.append(parse_docutils_elt(subchild)[0].strip())
rows.append(row)
return rows
def parse_docutils_table(elt):
header = []
rows = []
for child in elt.children:
if child.__class__ == docutils.nodes.tgroup:
for subchild in child.children:
if subchild.__class__ == docutils.nodes.thead:
header = parse_docutils_thead(subchild)
elif subchild.__class__ == docutils.nodes.tbody:
rows = parse_docutils_tbody(subchild)
print "== TABLE =="
print "HEADER:", header
print "ROWS:", '\n'.join(str(r) for r in rows)
return (header, rows)
def parse_docutils_term(elt):
terms = []
accepts = ""
for child in elt.children:
if child.__class__ == docutils.nodes.emphasis:
term = parse_docutils_elt(child)[0].strip()
if term in ('True', 'False') or accepts != "":
accepts += term
elif term != "None":
terms.append(term)
elif child.__class__ == docutils.nodes.Text:
if str(child).strip() not in [',', '/']:
accepts += str(child)
else:
accepts += parse_docutils_elt(child)[0]
accepts = accepts.strip()
if accepts.startswith(':'):
accepts = accepts[1:].strip()
return terms, accepts
def parse_docutils_deflist(elt):
print "GOT DEFLIST!"
args = []
term = None
definition = None
for child in elt.children:
assert child.__class__ == docutils.nodes.definition_list_item, "NO DEF LIST ITEM!"
for subchild in child.children:
if subchild.__class__ == docutils.nodes.term:
terms, accepts = parse_docutils_term(subchild)
print "TERMS:", terms
if accepts:
print "ACCEPTS:", accepts
elif subchild.__class__ == docutils.nodes.definition:
definition = parse_docutils_elt(subchild)[0].rstrip()
print "DEFINITION:", definition
for term in terms:
args.append((term, accepts, definition))
return args
def parse_docutils_elt(elt, last_text=""):
def get_last_block(cur_text):
num_newlines = 1
end_idx = len(cur_text)
while cur_text.endswith("\n\n" * num_newlines):
num_newlines += 1
end_idx -= 2
idx = cur_text.rfind("\n\n",0,end_idx)
if idx < 0:
idx = 0
else:
idx += 2
return cur_text[idx:].strip()
text = ""
args = []
tables = []
call_signatures = []
for child in elt.children:
if child.__class__ == docutils.nodes.Text:
ntext = ' '.join(s for s in str(child).split('\n'))
text += ntext
elif child.__class__ == docutils.nodes.system_message:
pass
elif child.__class__ == docutils.nodes.definition_list:
args.append((get_last_block(last_text + text),
parse_docutils_deflist(child)))
elif child.__class__ == docutils.nodes.table:
tables.append((get_last_block(last_text + text),) + \
parse_docutils_table(child))
elif isinstance(child, docutils.nodes.Inline):
(ntext, nargs, ntables, ncall_sigs) = \
parse_docutils_elt(child, last_text + text)
text += ntext
args += nargs
tables += ntables
call_signatures += ncall_sigs
else:
(ntext, nargs, ntables, ncall_sigs) = \
parse_docutils_elt(child, last_text + text)
if child.__class__ == docutils.nodes.literal_block:
check_str = (last_text + text).lower().strip()
if check_str.endswith("\ncall signature:") or \
check_str.endswith("\ncall signatures:"):
call_signatures.append(ntext)
text += ntext.strip() + "\n\n"
args += nargs
tables += ntables
call_signatures += ncall_sigs
return (text.rstrip(), args, tables, call_signatures)
def parse_docutils_str(docstring, should_print=False):
root = docutils.core.publish_doctree(docstring)
if should_print:
print root
return parse_docutils_elt(root)
##############################################################################
# util methods
##############################################################################
def capfirst(s):
return s[0].upper() + s[1:]
def pretty_name(s):
cap = True
new_s = ""
for i, c in enumerate(s):
if cap:
c = c.upper()
cap = False
if c != '_' or i == 0:
new_s += c
else:
cap = True
return new_s
def get_value_and_type(s):
try:
val = eval(s)
if isinstance(val, type):
return (None, None)
except Exception:
val = s
port_type = get_type_from_val(val)
return (val, port_type)
def get_type_from_val(val):
if isinstance(val, float):
return "basic:Float"
elif isinstance(val, bool):
return "basic:Boolean"
elif isinstance(val, (int, long)):
return "basic:Integer"
elif isinstance(val, basestring):
return "basic:String"
elif isinstance(val, list):
return "basic:List"
return None
def resolve_port_type(port_types, port_spec):
port_types_set = set(p for p in port_types if p is not None)
was_set = False
if port_spec.port_type is not None:
port_types_set.add(port_spec.port_type)
if len(port_types_set) == 1:
port_spec.port_type = next(iter(port_types_set))
was_set = True
elif len(port_types_set) == 2:
if 'basic:Float' in port_types_set and \
'basic:Integer' in port_types_set:
port_spec.port_type = 'basic:Float'
was_set = True
elif 'basic:List' in port_types_set:
port_spec.port_type = 'basic:List'
base_name = port_spec.name
port_spec.name = base_name + "Sequence"
port_types_set.discard('basic:List')
alternate_spec = \
AlternatePortSpec(name=base_name + "Scalar",
port_type=next(iter(port_types_set)))
port_spec.alternate_specs.append(alternate_spec)
was_set = True
if not was_set:
if "color" in port_spec.name:
port_spec.port_type = "basic:Color"
port_spec.translations = "translate_color"
elif port_spec.name == "x":
port_spec.port_type = "basic:List"
elif port_spec.name == "y":
port_spec.port_type = "basic:List"
else:
port_spec.port_type = None
# # FIXME
# # what to do with scalar/sequence-type args
# elif len(port_types_set) == 2 and 'basic:List' in port_types_set:
# port_type = 'basic:List'
# else:
# port_type = None
# return port_type
def assign_port_values(port_spec, values, default_val):
assign_port_spec = None
if port_spec.defaults is not None and len(port_spec.defaults) > 0:
current_default = port_spec.defaults
port_spec.defaults = None
else:
current_default = []
if len(port_spec.alternate_specs) == 0:
assign_port_spec = port_spec
else:
port_types = set()
for value in values + current_default + \
([default_val] if default_val is not None else []):
port_type = get_type_from_val(value)
if port_type is not None:
port_types.add(port_type)
if len(port_types) == 1:
for ps in [port_spec] + port_spec.alternate_specs:
if ps.port_type == next(iter(port_types)):
assign_port_spec = ps
elif len(port_types) > 1:
raise Exception("Multiple value types found!")
if assign_port_spec is not None:
if len(values) > 0:
assign_port_spec.entry_types = ['enum']
assign_port_spec.values = [values]
if len(current_default) > 0:
assign_port_spec.defaults = current_default
elif default_val is not None:
assign_port_spec.defaults = [default_val]
def parse_description(desc):
key_to_type = {'string': 'basic:String',
'integer': 'basic:Integer',
'sequence': 'basic:List',
'float': 'basic:Float',
'boolean': 'basic:Boolean',
'scalar': 'basic:Float',
'vector': 'basic:List',
'list': 'basic:List'}
port_types = []
option_strs = []
default_val = None
allows_none = False
default_paren_re = re.compile(r"((\S*)\s+)?\(default:?(\s+(\S*))?\)",
re.IGNORECASE)
default_is_re = re.compile(r"default\s+is\s+(\S*)", re.IGNORECASE)
if '|' in desc:
m = re.search("\[([\s\S]*?)\]", desc)
if m:
opt_str = m.group(1)
else:
opt_str = desc
opts = opt_str.split('|')
for opt in opts:
opt = opt.strip()
m = default_paren_re.search(opt)
if m:
(_, before_res, _, after_res) = m.groups()
if after_res:
assert default_val is None, ('Multiple defaults: '
'"%s" "%s"' % (default_val, after_res))
default_val = after_res
opt = after_res
elif before_res:
assert default_val is None, ('Multiple defaults: '
'"%s" "%s"' % (default_val, after_res))
default_val = before_res
opt = before_res
found_type = False
opt_lower = opt.lower()
if opt_lower == "none":
found_type = True
allows_none = True
elif opt_lower == "true" or opt_lower == "false":
found_type = True
port_types.append("basic:Boolean")
else:
for key in key_to_type:
if key in opt_lower:
found_type = True
port_types.append(key_to_type[key])
if not found_type:
(val, port_type) = get_value_and_type(opt)
option_strs.append(val)
if port_type is not None:
port_types.append(port_type)
found_type = True
if default_val is None:
m = default_paren_re.search(desc)
if m:
(_, before_res, _, after_res) = m.groups()
if after_res:
default_val = after_res
elif before_res:
default_val = before_res
else:
m = default_is_re.search(desc)
if m:
(default_val,) = m.groups()
if default_val.endswith('.') or default_val.endswith(','):
default_val = default_val[:-1]
if default_val:
(default_val, port_type) = get_value_and_type(default_val)
if port_type is not None:
port_types.append(port_type)
should_print = False
if len(port_types) == 0:
for key, port_type in key_to_type.iteritems():
if key in desc:
port_types.append(port_type)
return (port_types, option_strs, default_val, allows_none)
def parse_translation(rows, should_reverse=True):
t = {}
port_types = []
values = []
for row in rows:
(val1, port_type1) = get_value_and_type(row[0])
(val2, port_type2) = get_value_and_type(row[1])
if should_reverse:
if val2 != None:
port_types.append(port_type2)
values.append(val2)
t[val2] = val1
else:
if val1 != None:
port_types.append(port_type1)
values.append(val1)
t[val1] = val2
return (t, port_types, values)
def do_translation_override(port_specs, names, rows, opts):
if 'name' in opts:
names = opts['name']
if names is None:
raise ValueError("Must specify name of port to use translation for")
if isinstance(names, basestring) or not matplotlib.cbook.iterable(names):
names = [names]
should_reverse = opts.get('reverse', True)
values_only = opts.get('values_only', False)
(t, port_type, values) = \
parse_translation(rows, should_reverse)
for name in names:
print "TRANSLATING", name
if name not in port_specs:
port_specs[name] = InputPortSpec(name)
port_specs[name].entry_types = ['enum']
port_specs[name].values = [values]
if not values_only:
port_specs[name].translations = t
def get_names(obj, default_module_base, default_super_base,
prefix="Mpl", suffix=""):
module_name = None
super_name = None
if isinstance(obj, tuple):
if len(obj) > 2:
super_name = obj[2]
if len(obj) < 2:
raise ValueError("Need to specify 2- or 3-tuple")
(obj, module_name) = obj[:2]
if module_name is None:
module_name = "%s%s%s" % (prefix,
pretty_name(default_module_base(obj)),
suffix)
if super_name is None:
super_name = "%s%s%s" % (prefix,
pretty_name(default_super_base(obj)),
suffix)
return (obj, module_name, super_name)
##############################################################################
# main methods
##############################################################################
def parse_argspec(obj_or_str):
if isinstance(obj_or_str, basestring):
obj_or_str = obj_or_str.strip()
if not obj_or_str.endswith(":"):
obj_or_str += ":"
if not obj_or_str.startswith("def "):
obj_or_str = "def " + obj_or_str
try:
tree = ast.parse(obj_or_str + "\n pass")
except SyntaxError:
# cannot parse the argspec
print "*** CANNOT PARSE", obj_or_str
return []
argspec_name = tree.body[0].name
argspec_args = [a.id for a in tree.body[0].args.args]
print tree.body[0].args.defaults
argspec_defaults = []
for i, d in enumerate(tree.body[0].args.defaults):
try:
d_val = ast.literal_eval(d)
except ValueError:
d_val = None
argspec_defaults.append(d_val)
else:
argspec = inspect.getargspec(obj_or_str)
argspec_args = argspec.args
argspec_defaults = argspec.defaults
if not argspec_defaults:
start_defaults = len(argspec_args) + 1
else:
start_defaults = len(argspec_args) - len(argspec_defaults)
port_specs_list = []
has_self = False
for i, arg in enumerate(argspec_args):
if i == 0 and arg == "self":
has_self = True
continue
port_spec = InputPortSpec(arg)
port_spec.arg_pos = (i-1) if has_self else i
if i >= start_defaults:
port_spec.required = False
default_val = argspec_defaults[i-start_defaults]
if default_val is not None:
port_spec.defaults = [default_val]
port_type = get_type_from_val(default_val)
if port_type is not None:
port_spec.port_type = port_type
else:
port_spec.required = True
port_specs_list.append(port_spec)
return port_specs_list
def process_docstring(docstring, port_specs, parent, table_overrides):
(cleaned_docstring, args, tables, call_sigs) = \
parse_docutils_str(docstring)
if len(call_sigs) > 0:
for call_sig in call_sigs:
port_specs_list = parse_argspec(call_sig)
for port_spec in port_specs_list:
if port_spec.arg in port_specs:
# have to reconcile the two
old_port_spec = port_specs[port_spec.arg]
resolve_port_type([port_spec.port_type], old_port_spec)
if old_port_spec.defaults is None:
if port_spec.defaults is not None:
assign_port_values(old_port_spec, [],
port_spec.defaults[0])
# old_port_spec.defaults = port_spec.defaults
elif old_port_spec.defaults != port_spec.defaults:
# keep it as the old spec is
print "*** Different defaults!" + \
str(old_port_spec.defaults) + \
" : " + str(port_spec.defaults)
assign_port_values(old_port_spec, [],
old_port_spec.defaults[0])
else:
port_specs[port_spec.arg] = port_spec
output_port_specs = []
for (deflist_intro, deflist) in args:
print "PROCESSING DEFLIST", deflist_intro
if re.search("return value", deflist_intro, re.IGNORECASE):
print " -> RETURN VALUE"
for (name, accepts, port_doc) in deflist:
(port_types, option_strs, default_val, allows_none) = \
parse_description(accepts)
(pt2, _, dv2, _) = parse_description(port_doc)
port_types.extend(pt2)
if default_val is None:
default_val = dv2
oport = OutputPortSpec(name, docstring=port_doc)
resolve_port_type(port_types, oport)
output_port_specs.append(oport)
elif (re.search("argument", deflist_intro, re.IGNORECASE) or
re.search("kwarg", deflist_intro, re.IGNORECASE)):
print " -> ARGUMENTS"
for (name, accepts, port_doc) in deflist:
if name not in port_specs:
port_specs[name] = InputPortSpec(name, docstring=port_doc)
else:
port_specs[name].docstring = port_doc
(port_types, option_strs, default_val, allows_none) = \
parse_description(accepts)
(pt2, _, dv2, _) = parse_description(port_doc)
port_types.extend(pt2)
if default_val is None:
default_val = dv2
resolve_port_type(port_types, port_specs[name])
assign_port_values(port_specs[name], option_strs, default_val)
for (table_intro, header, rows) in tables:
print "GOT TABLE", table_intro, rows[0]
table_key = parent + (table_intro,)
if table_key in table_overrides:
(override_type, opts) = table_overrides[table_key]
if override_type == "translation":
do_translation_override(port_specs, None, rows, opts)
continue
elif override_type == "ports":
table_intro = "kwarg"
elif override_type == "skip":
continue
if re.search("return value", table_intro, re.IGNORECASE):
print " -> RETURN"
if len(rows[0]) != 2:
raise ValueError("row that has more/less than 2 columns!")
for (name, port_doc) in rows:
(port_types, option_strs, default_val, allows_none) = \
parse_description(port_doc)
oport = OutputPortSpec(name, docstring=port_doc)
resolve_port_type(port_types, oport)
output_port_specs.append(oport)
elif (re.search("argument", table_intro, re.IGNORECASE) or
re.search("kwarg", table_intro, re.IGNORECASE)):
print " -> ARGUMENT"
if len(rows[0]) != 2:
raise ValueError("row that has more/less than 2 columns!")
for (name, port_doc) in rows:
if name not in port_specs:
port_specs[name] = InputPortSpec(name, docstring=port_doc)
else:
port_specs[name].docstring = port_doc
(port_types, option_strs, default_val, allows_none) = \
parse_description(port_doc)
resolve_port_type(port_types, port_specs[name])
assign_port_values(port_specs[name], option_strs, default_val)
else:
raise ValueError("Unknown table: %s\n %s %s" % (
parent, table_intro, header))
return cleaned_docstring, output_port_specs
def parse_plots(plot_types, table_overrides):
def get_module_base(n):
return n
def get_super_base(n):
return "plot"
module_specs = []
for plot in plot_types:
port_specs = {}
print "========================================"
print plot
print "========================================"
(plot, module_name, super_name) = \
get_names(plot, get_module_base, get_super_base, "Mpl", "")
try:
plot_obj = getattr(matplotlib.pyplot, plot)
except AttributeError:
print '*** CANNOT ADD PLOT "%s";' \
'IT DOES NOT EXIST IN THIS MPL VERSION ***' % plot
continue
port_specs_list = parse_argspec(plot_obj)
for port_spec in port_specs_list:
port_specs[port_spec.arg] = port_spec
docstring = plot_obj.__doc__
if plot == 'contour':
# want to change the double newline to single newline...
print "&*&* FINDING:", \
docstring.find("*extent*: [ *None* | (x0,x1,y0,y1) ]\n\n")
docstring = docstring.replace("*extent*: [ *None* | (x0,x1,y0,y1) ]\n\n",
"*extent*: [ *None* | (x0,x1,y0,y1) ]\n")
if plot == 'annotate':
docstring = docstring % dict((k,v) for k, v in matplotlib.docstring.interpd.params.iteritems() if k == 'Annotation')
elif plot == 'barbs':
docstring = docstring % dict((k,v) for k,v in matplotlib.docstring.interpd.params.iteritems() if k == 'barbs_doc')
cleaned_docstring, output_port_specs = \
process_docstring(docstring, port_specs, ('pyplot', plot),
table_overrides)
# for port_spec in port_specs.itervalues():
# if port_spec.defaults is not None:
# port_spec.defaults = [str(v) for v in port_spec.defaults]
# if port_spec.values is not None:
# port_spec.values = [[str(v) for v in port_spec.values[0]]]
# for alt_ps in port_spec.alternate_specs:
# if alt_ps.defaults is not None:
# alt_ps.defaults = [str(v) for v in alt_ps.defaults]
# if alt_ps.values is not None:
# alt_ps.values = [[str(v) for v in alt_ps.values[0]]]
module_specs.append(ModuleSpec(module_name, super_name,
"matplotlib.pyplot.%s" % plot,
cleaned_docstring, port_specs.values(),
output_port_specs))
my_specs = SpecList(module_specs)
return my_specs
_get_accepts_regex = re.compile(
r"([\s\S]*)\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))([\s\S]*)",
re.IGNORECASE)
def parse_artists(artist_types, table_overrides={}):
def get_module_name(obj):
return obj.__name__
def get_super_name(obj):
for base in obj.__bases__:
if issubclass(base, Artist):
return base.__name__
return ""
module_specs = []
for klass in artist_types:
(klass, module_name, super_name) = \
get_names(klass, get_module_name, get_super_name, "Mpl",
"Properties")
port_specs = {}
insp = ArtistInspector(klass)
klass_name = klass.__name__
klass_qualname = klass.__module__ + "." + klass_name
for (s, t) in insp._get_setters_and_targets():
print "** %s **" % s
if t.rsplit('.',1)[0] != klass_qualname:
# let inheritance work
continue
if s in port_specs:
raise ValueError('duplicate port "%s"' % s)
port_spec = InputPortSpec(s)
port_specs[s] = port_spec
accepts_raw = insp.get_valid_values(s)
(accepts, deflists, tables, call_sigs) = \
parse_docutils_str(accepts_raw)
if len(deflists) + len(tables) > 0:
raise ValueError("accepts has deflists and/or tables")
(port_types, option_strs, default_val, allows_none) = \
parse_description(accepts)
if default_val is not None:
port_spec.default_val = default_val
if len(option_strs) > 0:
port_spec.entry_types = ['enum']
port_spec.values = [option_strs]
port_spec.hide = False
docstring = getattr(insp.o, 'set_' + s).__doc__
if docstring is None:
docstring = ""
else:
docstring = docstring % matplotlib.docstring.interpd.params
match = _get_accepts_regex.search(docstring)
if match is not None:
print "STARTING DOCSTRING:", docstring
groups = match.groups()
if len(groups) > 2 and groups[2]:
docstring = groups[0] + groups[2]
else:
docstring = groups[0]
print "FIXED DOCSTRING:", docstring
(cleaned_docstring, args, tables, call_sigs) = \
parse_docutils_str(docstring)
port_spec.docstring = cleaned_docstring
translations = None
for (table_intro, header, rows) in tables:
print "TABLE:", table_intro
if (klass.__name__, s, table_intro) in table_overrides:
(override_type, opts) = \
table_overrides[(klass.__name__, s, table_intro)]
if override_type == "translation":
do_translation_override(port_specs, s, rows, opts)
continue
elif override_type == "ports":
table_intro = "kwarg"
elif override_type == "skip":
continue
if len(header) != 2:
raise ValueError("Table not two columns!")
if translations is not None:
raise ValueError("Two translations in one attr")
(translations, pt2, values) = parse_translation(rows)
port_spec.translations = translations
port_spec.values = [values]
port_types.extend(pt2)
resolve_port_type(port_types, port_spec)
constructor_port_specs = {}
port_specs_list = parse_argspec(klass.__init__)
for port_spec in port_specs_list:
constructor_port_specs[port_spec.arg] = port_spec
constructor_docstring = klass.__init__.__doc__
if constructor_docstring is not None:
_, output_port_specs = process_docstring(constructor_docstring,
constructor_port_specs,
(klass.__name__,
'__init__'),
table_overrides)
for arg, ps in constructor_port_specs.iteritems():
if arg not in port_specs:
ps.constructor_arg = True
ps.required = False
port_specs[arg] = ps
module_spec = ModuleSpec(module_name, super_name, klass_qualname,
klass.__doc__, port_specs.values())
module_specs.append(module_spec)
my_specs = SpecList(module_specs)
return my_specs
def run_artists():
import matplotlib.axes
import matplotlib.axis
import matplotlib.collections
import matplotlib.figure
import matplotlib.image
import matplotlib.lines
import matplotlib.patches
import matplotlib.text
artist_py_modules = [matplotlib.axes,
matplotlib.axis,
matplotlib.collections,
matplotlib.figure,
matplotlib.image,
matplotlib.lines,
matplotlib.patches,
matplotlib.text,
]
exclude = set([])
artist_types = set() # (Artist, None, "MplProperties")]
for py_module in artist_py_modules:
for cls_name, cls in inspect.getmembers(py_module, inspect.isclass):
if cls_name in exclude:
continue
if issubclass(cls, Artist) and cls != Artist:
artist_types.add(cls)
print "ARTIST TYPES:", artist_types
artist_types = [(Artist, None, "MplProperties")] + \
list(sorted(artist_types, key=lambda x: list(reversed(x.mro()))))
print "SORTED ARTIST TYPES:", artist_types
# FIXME want this to be indexed by artist name, too...
artist_overrides = {('Axes', 'aspect', 'aspect'):
('translation', {'reverse': False,
'values_only': True}),
# FIXME may want documentation from adjustable?
('Axes', 'aspect', 'adjustable'):
('skip', {}),
# FIXME may want documentation from anchor?
('Axes', 'aspect', 'anchor'):
('skip', {}),
('ConnectionPatch', '__init__', "Valid keys are"):
('ports', {}),
('ConnectionPatch', '__init__', "coordsA and coordsB are strings that indicate the coordinates of xyA and xyB."):
('translation', {'name': ['coordsA', 'coordsB'],
'reverse': False,
'values_only': True}),
('Annotation', '__init__', "If the dictionary has a key arrowstyle, a FancyArrowPatch instance is created with the given dictionary and is drawn. Otherwise, a YAArow patch instance is created and drawn. Valid keys for YAArow are"):
('skip', {}),
('Annotation', '__init__', "Valid keys for FancyArrowPatch are"):
('skip', {}),
('Annotation', '__init__', "xycoords and textcoords are strings that indicate the coordinates of xy and xytext."):
('translation', {'name': ['xycoords', 'textcoords'],
'reverse': False,
'values_only': True}),
}
specs = parse_artists(artist_types, artist_overrides)
specs.write_to_xml("mpl_artists_raw.xml")
def run_plots():
# from matplotlib's boilerplate.py
plot_types = ['acorr',
'arrow',
'axhline',
'axhspan',
'axvline',
'axvspan',
'bar',
'barh',
'broken_barh',
'boxplot',
'cohere',
'clabel',
'contour',
'contourf',
'csd',
'errorbar',
'fill',
'fill_between',
'fill_betweenx',
'hexbin',
'hist',
'hist2d',
'hlines',
'imshow',
'loglog',
'pcolor',
'pcolormesh',
'pie',
# add plot later
# 'plot',
'plot_date',
'psd',
'quiver',
'quiverkey',
'scatter',
'semilogx',
'semilogy',
'specgram',
'stackplot',
'stem',
'step',
'streamplot',
'tricontour',
'tricontourf',
'tripcolor',
'triplot',
'vlines',
'xcorr',
'barbs',
]
plot_types += ['spy',
'polar',
]
# FIXME added to keep existing code happy for now
plot_types += ['legend',
'annotate',
('plot', 'MplLinePlot')]
table_overrides = {('pyplot', 'plot', 'The following format string characters are accepted to control the line style or marker:'):
('translation', {'name': 'marker'}),
('pyplot', 'plot', 'The following color abbreviations are supported:'):
('skip', {}),
('pyplot', 'legend', 'The location codes are'):
('translation', {'name': 'loc',
'reverse': False}),
('pyplot', 'legend', 'Padding and spacing between various elements use following keywords parameters. These values are measure in font-size units. E.g., a fontsize of 10 points and a handlelength=5 implies a handlelength of 50 points. Values from rcParams will be used if None.'):
('ports', {}),
('pyplot', 'annotate', "If the dictionary has a key arrowstyle, a FancyArrowPatch instance is created with the given dictionary and is drawn. Otherwise, a YAArow patch instance is created and drawn. Valid keys for YAArow are"):
('skip', {}),
('pyplot', 'annotate', "Valid keys for FancyArrowPatch are"):
('skip', {}),
('pyplot', 'annotate', "xycoords and textcoords are strings that indicate the coordinates of xy and xytext."):
('translation', {'name': ['xycoords', 'textcoords'],
'reverse': False,
'values_only': True}),
}
specs = parse_plots(plot_types, table_overrides)
specs.write_to_xml("mpl_plots_raw.xml")
def run(which="all"):
if which == "all" or which == "artists":
run_artists()
if which == "all" or which == "plots":
run_plots()
def get_docutils(plot):
import matplotlib.pyplot
plot_obj = getattr(matplotlib.pyplot, plot)
(_, _, _, call_sigs) = parse_docutils_str(plot_obj.__doc__, True)
print call_sigs
if __name__ == '__main__':
if len(sys.argv) <= 1:
run()
elif len(sys.argv) == 2:
run(sys.argv[1])
else:
raise TypeError("usage: python parse.py [all|artists|plots]")
| 39.75
| 304
| 0.526375
|
3610827665e06acb76502cb0aa5189d29dd66ed5
| 21,619
|
py
|
Python
|
axelrod/strategies/memoryone.py
|
Jessegoodspeed/Axelrod
|
bdb1c909e9a37a1527fd1094f45586bc5f853414
|
[
"MIT"
] | null | null | null |
axelrod/strategies/memoryone.py
|
Jessegoodspeed/Axelrod
|
bdb1c909e9a37a1527fd1094f45586bc5f853414
|
[
"MIT"
] | null | null | null |
axelrod/strategies/memoryone.py
|
Jessegoodspeed/Axelrod
|
bdb1c909e9a37a1527fd1094f45586bc5f853414
|
[
"MIT"
] | null | null | null |
"""Memory One strategies. Note that there are Memory One strategies in other
files, including titfortat.py and zero_determinant.py"""
import warnings
from typing import Tuple
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class WinStayLoseShift(Player):
"""
Win-Stay Lose-Shift, also called Pavlov.
Names:
- Win Stay Lose Shift: [Nowak1993]_
- WSLS: [Stewart2012]_
- Pavlov: [Kraines1989]_
"""
name = "Win-Stay Lose-Shift"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
if not self.history:
return C
# React to the opponent's last move
last_round = (self.history[-1], opponent.history[-1])
if last_round == (C, C) or last_round == (D, D):
return C
return D
class MemoryOnePlayer(Player):
"""
Uses a four-vector for strategies based on the last round of play,
(P(C|CC), P(C|CD), P(C|DC), P(C|DD)). Win-Stay Lose-Shift is set as
the default player if four_vector is not given.
Intended to be used as an abstract base class or to at least be supplied
with a initializing four_vector.
Names
- Memory One: [Nowak1990]_
"""
name = "Generic Memory One Player"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(
self,
four_vector: Tuple[float, float, float, float] = None,
initial: Action = C,
) -> None:
"""
Parameters
----------
four_vector: list or tuple of floats of length 4
The response probabilities to the preceding round of play
( P(C|CC), P(C|CD), P(C|DC), P(C|DD) )
initial: C or D
The initial move
Special Cases
-------------
Alternator is equivalent to MemoryOnePlayer((0, 0, 1, 1), C)
Cooperator is equivalent to MemoryOnePlayer((1, 1, 1, 1), C)
Defector is equivalent to MemoryOnePlayer((0, 0, 0, 0), D)
Random is equivalent to MemoryOnePlayer((0.5, 0.5, 0.5, 0.5))
(with a random choice for the initial state)
TitForTat is equivalent to MemoryOnePlayer((1, 0, 1, 0), C)
WinStayLoseShift is equivalent to MemoryOnePlayer((1, 0, 0, 1), C)
See also: The remaining strategies in this file
Multiple strategies in titfortat.py
Grofman, Joss in axelrod_tournaments.py
"""
super().__init__()
self._initial = initial
self.set_initial_four_vector(four_vector)
def set_initial_four_vector(self, four_vector):
if four_vector is None:
four_vector = (1, 0, 0, 1)
warnings.warn("Memory one player is set to default (1, 0, 0, 1).")
self.set_four_vector(four_vector)
def set_four_vector(self, four_vector: Tuple[float, float, float, float]):
if not all(0 <= p <= 1 for p in four_vector):
raise ValueError(
"An element in the probability vector, {}, is not "
"between 0 and 1.".format(str(four_vector))
)
self._four_vector = dict(
zip([(C, C), (C, D), (D, C), (D, D)], four_vector)
)
def _post_init(self):
# Adjust classifiers
values = set(self._four_vector.values())
self.classifier["stochastic"] = any(0 < x < 1 for x in values)
if all(x == 0 for x in values) or all(x == 1 for x in values):
self.classifier["memory_depth"] = 0
def strategy(self, opponent: Player) -> Action:
if len(opponent.history) == 0:
return self._initial
# Determine which probability to use
p = self._four_vector[(self.history[-1], opponent.history[-1])]
# Draw a random number in [0, 1] to decide
try:
return self._random.random_choice(p)
except AttributeError:
return D if p == 0 else C
class WinShiftLoseStay(MemoryOnePlayer):
"""Win-Shift Lose-Stay, also called Reverse Pavlov.
Names:
- WSLS: [Li2011]_
"""
name = "Win-Shift Lose-Stay"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = D) -> None:
four_vector = (0, 1, 1, 0)
super().__init__(four_vector)
self._initial = initial
class CoopWhenBothDef(MemoryOnePlayer):
"""Player only cooperates when both players had defected.
Names:
"""
name = "Coop When Both Defect"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = D) -> None:
four_vector = (0, 0, 0, 1)
super().__init__(four_vector)
self._initial = initial
class CoopWhenBothDef1(MemoryOnePlayer):
"""Player cooperates initially and when both players defect.
Names:
"""
name = "Coop When Both Defect 1"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = C) -> None:
four_vector = (0, 0, 0, 1)
super().__init__(four_vector)
self._initial = initial
class TwoDefect(MemoryOnePlayer):
"""Player cooperates only after they receive the temptation payoff.
Names:
"""
name = "Two Defect"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = D) -> None:
four_vector = (0, 0, 1, 0)
super().__init__(four_vector)
self._initial = initial
class TwoCoop(MemoryOnePlayer):
"""Player cooperates initially and after they receive the temptation payoff.
Names:
"""
name = "Two Coop"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = C) -> None:
four_vector = (0, 0, 1, 0)
super().__init__(four_vector)
self._initial = initial
class CuriousDefector(MemoryOnePlayer):
"""Player cooperates initially and then defects thereafter.
Names:
"""
name = "Curious Defector"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = C) -> None:
four_vector = (0, 0, 0, 0)
super().__init__(four_vector)
self._initial = initial
class SuckerDefect(MemoryOnePlayer):
"""Player cooperates only after receiving sucker's payoff.
Names:
"""
name = "Sucker Defect"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = D) -> None:
four_vector = (0, 1, 0, 0)
super().__init__(four_vector)
self._initial = initial
class SuckerCoop(MemoryOnePlayer):
"""Player cooperates initially and after receiving sucker's payoff.
Names:
"""
name = "Sucker Coop"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = C) -> None:
four_vector = (0, 1, 0, 0)
super().__init__(four_vector)
self._initial = initial
class WinShiftLoseStayCoop(MemoryOnePlayer):
"""Same as Win-Shift Lose-Stay but initial move is to cooperate.
Names:
"""
name = "Win-Shift Lose-Stay Coop"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = C) -> None:
four_vector = (0, 1, 1, 0)
super().__init__(four_vector)
self._initial = initial
class SevenDefect(MemoryOnePlayer):
"""Player defects initially and defects after receving reward payoff.
Names:
"""
name = "Seven Defect"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = D) -> None:
four_vector = (0, 1, 1, 1)
super().__init__(four_vector)
self._initial = initial
class SevenCoop(MemoryOnePlayer):
"""Player defects only after receving reward payoff.
Names:
"""
name = "Seven Coop"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = C) -> None:
four_vector = (0, 1, 1, 1)
super().__init__(four_vector)
self._initial = initial
class StubbornDef(MemoryOnePlayer):
"""
Player only cooperates after receviing reward payoff, but this will never happen
because the player initially defects and will only defect otherwise - a catch-22.
This strategy is exact opposite of Seven Coop.
Names:
"""
name = "Stubborn Def"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = D) -> None:
four_vector = (1, 0, 0, 0)
super().__init__(four_vector)
self._initial = initial
class GrimTrigger(MemoryOnePlayer):
"""
Cooperates until other player defects, from which point the player will only defect.
This strategy is similar to Stubborn Def but the initial move is to cooperate.
This strategy is exact opposite of Seven Defect.
Names:
"""
name = "Grim Trigger"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = C) -> None:
four_vector = (1, 0, 0, 0)
super().__init__(four_vector)
self._initial = initial
class WinStayLoseShiftDef(MemoryOnePlayer):
"""This strategy is similar to Win-Stay Lose-Shift but the initial move is to defect.
Names:
"""
name = "Win-Stay Lose-Shift Defect"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = D) -> None:
four_vector = (1, 0, 0, 1)
super().__init__(four_vector)
self._initial = initial
class BitterCooperatorDef(MemoryOnePlayer):
"""
This strategy initially defects and then defects only when it receives sucker's payoff.
Names:
"""
name = "Bitter Cooperator Def"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = D) -> None:
four_vector = (1, 0, 1, 1)
super().__init__(four_vector)
self._initial = initial
class BitterCooperator(MemoryOnePlayer):
"""
This strategy is to defect only when receives sucker's payoff.
Names:
"""
name = "Bitter Cooperator"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = C) -> None:
four_vector = (1, 0, 1, 1)
super().__init__(four_vector)
self._initial = initial
class ThirteenDefect(MemoryOnePlayer):
"""
This strategy is to defect initially and only after receiving the temptation payoff.
Names:
"""
name = "Thirteen Defect"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = D) -> None:
four_vector = (1, 1, 0, 1)
super().__init__(four_vector)
self._initial = initial
class ThirteenCoop(MemoryOnePlayer):
"""
This strategy is to defect only after receiving the temptation payoff.
Names:
"""
name = "Thirteen Coop"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = C) -> None:
four_vector = (1, 1, 0, 1)
super().__init__(four_vector)
self._initial = initial
class FourteenDefect(MemoryOnePlayer):
"""
This strategy is to defect initially and only after receiving the punishment payoff.
Names:
"""
name = "Fourteen Defect"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = D) -> None:
four_vector = (1, 1, 1, 0)
super().__init__(four_vector)
self._initial = initial
class FourteenCoop(MemoryOnePlayer):
"""
This strategy is to defect only after receiving the punishment payoff.
Names:
"""
name = "Fourteen Coop"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = C) -> None:
four_vector = (1, 1, 1, 0)
super().__init__(four_vector)
self._initial = initial
class CooperatorDef(MemoryOnePlayer):
"""
This strategy is to defect initally and then to cooperate thereafter.
Names:
"""
name = "Cooperator Def"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, initial: Action = D) -> None:
four_vector = (1, 1, 1, 1)
super().__init__(four_vector)
self._initial = initial
class GTFT(MemoryOnePlayer):
"""Generous Tit For Tat Strategy.
Names:
- Generous Tit For Tat: [Nowak1993]_
- Naive peace maker: [Gaudesi2016]_
- Soft Joss: [Gaudesi2016]_
"""
name = "GTFT"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, p: float = None) -> None:
"""
Parameters
p, float
A parameter used to compute the four-vector
Special Cases
TitForTat is equivalent to GTFT(0)
"""
self.p = p
super().__init__()
def set_initial_four_vector(self, four_vector):
pass
def receive_match_attributes(self):
(R, P, S, T) = self.match_attributes["game"].RPST()
if self.p is None:
self.p = min(1 - (T - R) / (R - S), (R - P) / (T - P))
four_vector = [1, self.p, 1, self.p]
self.set_four_vector(four_vector)
def __repr__(self) -> str:
assert self.p is not None
return "%s: %s" % (self.name, round(self.p, 2))
class FirmButFair(MemoryOnePlayer):
"""A strategy that cooperates on the first move, and cooperates except after
receiving a sucker payoff.
Names:
- Firm But Fair: [Frean1994]_"""
name = "Firm But Fair"
def __init__(self) -> None:
four_vector = (1, 0, 1, 2 / 3)
super().__init__(four_vector)
self.set_four_vector(four_vector)
class StochasticCooperator(MemoryOnePlayer):
"""Stochastic Cooperator.
Names:
- Stochastic Cooperator: [Adami2013]_
"""
name = "Stochastic Cooperator"
def __init__(self) -> None:
four_vector = (0.935, 0.229, 0.266, 0.42)
super().__init__(four_vector)
self.set_four_vector(four_vector)
class StochasticWSLS(MemoryOnePlayer):
"""
Stochastic WSLS, similar to Generous TFT. Note that this is not the same as
Stochastic WSLS described in [Amaral2016]_, that strategy is a modification
of WSLS that learns from the performance of other strategies.
Names:
- Stochastic WSLS: Original name by Marc Harper
"""
name = "Stochastic WSLS"
def __init__(self, ep: float = 0.05) -> None:
"""
Parameters
ep, float
A parameter used to compute the four-vector -- the probability of
cooperating when the previous round was CD or DC
Special Cases
WinStayLoseShift is equivalent to StochasticWSLS(0)
"""
self.ep = ep
four_vector = (1.0 - ep, ep, ep, 1.0 - ep)
super().__init__(four_vector)
self.set_four_vector(four_vector)
class SoftJoss(MemoryOnePlayer):
"""
Defects with probability 0.9 when the opponent defects, otherwise
emulates Tit-For-Tat.
Names:
- Soft Joss: [Prison1998]_
"""
name = "Soft Joss"
def __init__(self, q: float = 0.9) -> None:
"""
Parameters
q, float
A parameter used to compute the four-vector
Special Cases
Cooperator is equivalent to SoftJoss(0)
TitForTat is equivalent to SoftJoss(1)
"""
self.q = q
four_vector = (1.0, 1 - q, 1, 1 - q)
super().__init__(four_vector)
def __repr__(self) -> str:
return "%s: %s" % (self.name, round(self.q, 2))
class ALLCorALLD(Player):
"""This strategy is at the parameter extreme of the ZD strategies (phi = 0).
It simply repeats its last move, and so mimics ALLC or ALLD after round one.
If the tournament is noisy, there will be long runs of C and D.
For now starting choice is random of 0.6, but that was an arbitrary choice
at implementation time.
Names:
- ALLC or ALLD: Original name by Marc Harper
- Repeat: [Akin2015]_
"""
name = "ALLCorALLD"
classifier = {
"memory_depth": 1, # Memory-one Four-Vector (1, 1, 0, 0)
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
if len(self.history) == 0:
return self._random.random_choice(0.6)
return self.history[-1]
class ReactivePlayer(MemoryOnePlayer):
"""
A generic reactive player. Defined by 2 probabilities conditional on the
opponent's last move: P(C|C), P(C|D).
Names:
- Reactive: [Nowak1989]_
"""
name = "Reactive Player"
def __init__(self, probabilities: Tuple[float, float]) -> None:
four_vector = (*probabilities, *probabilities)
super().__init__(four_vector)
| 27.193711
| 91
| 0.601369
|
b5234ee349e289b304fef3186e31859dd9165d62
| 4,890
|
py
|
Python
|
frappe/hooks.py
|
indictranstech/harware-frappe
|
92a67b7b780e242de3722163c32811781cf3492c
|
[
"MIT"
] | null | null | null |
frappe/hooks.py
|
indictranstech/harware-frappe
|
92a67b7b780e242de3722163c32811781cf3492c
|
[
"MIT"
] | null | null | null |
frappe/hooks.py
|
indictranstech/harware-frappe
|
92a67b7b780e242de3722163c32811781cf3492c
|
[
"MIT"
] | 2
|
2016-07-28T06:00:16.000Z
|
2018-03-21T19:59:50.000Z
|
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "frappe"
app_title = "Frappe Framework"
app_publisher = "Frappe Technologies"
app_description = "Full stack web framework with Python, Javascript, MariaDB, Redis, Node"
app_icon = "octicon octicon-circuit-board"
app_color = "orange"
source_link = "https://github.com/frappe/frappe"
app_license = "MIT"
app_email = "info@frappe.io"
before_install = "frappe.utils.install.before_install"
after_install = "frappe.utils.install.after_install"
# website
app_include_js = [
"assets/js/desk.min.js",
"assets/js/editor.min.js",
"assets/js/list.min.js",
"assets/js/form.min.js",
"assets/js/report.min.js"
]
app_include_css = [
"assets/css/desk.min.css",
"assets/css/list.min.css",
"assets/css/form.min.css",
"assets/css/report.min.css",
"assets/css/module.min.css"
]
web_include_js = [
"website_script.js"
]
bootstrap = "assets/frappe/css/bootstrap.css"
web_include_css = [
"assets/css/frappe-web.css"
]
website_route_rules = [
{"from_route": "/blog", "to_route": "Blog Post"},
{"from_route": "/blog/<category>", "to_route": "Blog Post"}
]
write_file_keys = ["file_url", "file_name"]
notification_config = "frappe.core.notifications.get_notification_config"
before_tests = "frappe.utils.install.before_tests"
website_generators = ["Web Page", "Blog Post", "Blog Category", "Web Form"]
email_append_to = ["Event", "ToDo", "Communication"]
calendars = ["Event"]
# login
on_session_creation = [
"frappe.core.doctype.communication.feed.login_feed",
"frappe.core.doctype.user.user.notifify_admin_access_to_system_manager"
]
# permissions
permission_query_conditions = {
"Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
"ToDo": "frappe.desk.doctype.todo.todo.get_permission_query_conditions",
"User": "frappe.core.doctype.user.user.get_permission_query_conditions",
"Note": "frappe.desk.doctype.note.note.get_permission_query_conditions",
}
has_permission = {
"Event": "frappe.desk.doctype.event.event.has_permission",
"ToDo": "frappe.desk.doctype.todo.todo.has_permission",
"User": "frappe.core.doctype.user.user.has_permission",
"Note": "frappe.desk.doctype.note.note.has_permission",
"Communication": "frappe.core.doctype.communication.communication.has_permission"
}
standard_queries = {
"User": "frappe.core.doctype.user.user.user_query"
}
doc_events = {
"*": {
"after_insert": "frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"validate": "frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"on_update": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
"frappe.core.doctype.communication.feed.update_feed"
],
"after_rename": "frappe.desk.notifications.clear_doctype_notifications",
"on_submit": [
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts",
],
"on_cancel": [
"frappe.desk.notifications.clear_doctype_notifications",
"frappe.email.doctype.email_alert.email_alert.trigger_email_alerts"
],
"on_trash": "frappe.desk.notifications.clear_doctype_notifications"
}
}
scheduler_events = {
"all": [
"frappe.email.bulk.flush",
"frappe.email.doctype.email_account.email_account.pull",
"frappe.email.doctype.email_account.email_account.notify_unreplied",
"frappe.utils.error.collect_error_snapshots",
],
"daily": [
"frappe.email.bulk.clear_outbox",
"frappe.desk.notifications.clear_notifications",
"frappe.core.doctype.scheduler_log.scheduler_log.set_old_logs_as_seen",
"frappe.desk.doctype.event.event.send_event_digest",
"frappe.sessions.clear_expired_sessions",
"frappe.email.doctype.email_alert.email_alert.trigger_daily_alerts",
"frappe.async.remove_old_task_logs",
],
"daily_long": [
"frappe.integrations.doctype.dropbox_backup.dropbox_backup.take_backups_daily"
],
"weekly_long": [
"frappe.integrations.doctype.dropbox_backup.dropbox_backup.take_backups_weekly"
]
}
default_background = "/assets/frappe/images/ui/into-the-dawn.jpg"
get_translated_dict = {
("doctype", "System Settings"): "frappe.geo.country_info.get_translated_dict",
("page", "setup-wizard"): "frappe.geo.country_info.get_translated_dict"
}
sounds = [
{"name": "email", "src": "/assets/frappe/sounds/email.mp3", "volume": 0.1},
{"name": "submit", "src": "/assets/frappe/sounds/submit.mp3", "volume": 0.1},
{"name": "cancel", "src": "/assets/frappe/sounds/cancel.mp3", "volume": 0.1},
{"name": "delete", "src": "/assets/frappe/sounds/delete.mp3", "volume": 0.05},
{"name": "click", "src": "/assets/frappe/sounds/click.mp3", "volume": 0.05},
{"name": "error", "src": "/assets/frappe/sounds/error.mp3", "volume": 0.1},
# {"name": "alert", "src": "/assets/frappe/sounds/alert.mp3"},
# {"name": "chime", "src": "/assets/frappe/sounds/chime.mp3"},
]
| 32.6
| 90
| 0.743763
|
233f778bab334ed7079c3699c38c8cfa8d5ae182
| 608
|
py
|
Python
|
senza/manaus/utils.py
|
mschwitalla/senza
|
301a43fde41db194cbb80c68271692d1fe2212db
|
[
"Apache-2.0"
] | 106
|
2015-03-30T14:15:15.000Z
|
2021-07-26T07:30:11.000Z
|
senza/manaus/utils.py
|
mschwitalla/senza
|
301a43fde41db194cbb80c68271692d1fe2212db
|
[
"Apache-2.0"
] | 547
|
2015-04-13T09:58:50.000Z
|
2021-01-26T11:20:35.000Z
|
senza/manaus/utils.py
|
mschwitalla/senza
|
301a43fde41db194cbb80c68271692d1fe2212db
|
[
"Apache-2.0"
] | 102
|
2015-04-01T08:09:53.000Z
|
2020-11-05T09:05:28.000Z
|
"""
Generic functions related to AWS/Boto/Manaus but don't belong to any specific
component
"""
from typing import Dict, Optional # noqa: F401 pylint: disable=locally-disabled, unused-import
from botocore.exceptions import ClientError
__all__ = ["extract_client_error_code"]
def extract_client_error_code(exception: ClientError) -> Optional[str]:
"""
Extracts the client error code from a boto ClientError exception. Returns
None if it fails.
"""
error = exception.response.get('Error', {}) # type: Dict[str, Optional[str]]
error_code = error.get('Code')
return error_code
| 28.952381
| 95
| 0.730263
|
c1e978d629fad1246666a9feefb6eb348fc5f2b9
| 353
|
py
|
Python
|
photoStore/Store/migrations/0002_rename_cvr_url_photo_photo_url.py
|
j-ahn94/JPhotoStore
|
308c018b6e1163e7d00d1e5df9fc84d7157f85c1
|
[
"MIT"
] | null | null | null |
photoStore/Store/migrations/0002_rename_cvr_url_photo_photo_url.py
|
j-ahn94/JPhotoStore
|
308c018b6e1163e7d00d1e5df9fc84d7157f85c1
|
[
"MIT"
] | null | null | null |
photoStore/Store/migrations/0002_rename_cvr_url_photo_photo_url.py
|
j-ahn94/JPhotoStore
|
308c018b6e1163e7d00d1e5df9fc84d7157f85c1
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.6 on 2021-08-09 03:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Store', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='photo',
old_name='cvr_url',
new_name='photo_url',
),
]
| 18.578947
| 47
| 0.572238
|
9c49de4c8411b96b5ba67fb50f94f815a1088ee2
| 5,500
|
py
|
Python
|
main.py
|
rAcHekLoS/plugin.video.owncast
|
419b40db0cffd3f659b1fa2e70f80ef19867f267
|
[
"MIT"
] | 2
|
2021-12-13T02:33:42.000Z
|
2021-12-13T03:18:43.000Z
|
main.py
|
rAcHekLoS/plugin.video.owncast
|
419b40db0cffd3f659b1fa2e70f80ef19867f267
|
[
"MIT"
] | null | null | null |
main.py
|
rAcHekLoS/plugin.video.owncast
|
419b40db0cffd3f659b1fa2e70f80ef19867f267
|
[
"MIT"
] | null | null | null |
# Module: main
# Author: rache_klos
# Created on: 16.01.2021
import sys
from urllib.parse import urlencode, parse_qsl
import resources.lib.owncast as owncast
import xbmcgui
import xbmcplugin
import xbmcaddon
import xbmc
import inputstreamhelper
# Get the plugin url in plugin:// notation.
_url = sys.argv[0]
# Get the plugin handle as an integer number.
_handle = int(sys.argv[1])
def get_url(**kwargs):
# Create a URL for calling the plugin recursively from the given set of keyword arguments.
return '{0}?{1}'.format(_url, urlencode(kwargs))
def get_videos(category):
# Call Owncast global directory
VIDEOS = owncast.owncast_directory()
return VIDEOS[category]
def list_categories():
# Set plugin category.
xbmcplugin.setPluginCategory(_handle, 'Owncast streams')
# Set plugin content.
xbmcplugin.setContent(_handle, 'videos')
# Call Owncast global directory
VIDEOS = owncast.owncast_directory()
# Get video categories
categories = VIDEOS.keys()
# Iterate through categories
for category in categories:
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=category)
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
list_item.setArt({'thumb': VIDEOS[category][0]['thumb']})
# Set additional info for the list item.
list_item.setInfo('video', {'title': category,
'plot': category,
'mediatype': 'video'})
# Create a URL for a plugin recursive call.
url = get_url(action='listing', category=category)
# is_folder = True means that this item opens a sub-list of lower level items.
is_folder = True
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def list_videos(category):
# Set plugin category.
xbmcplugin.setPluginCategory(_handle, category)
# Set plugin content.
xbmcplugin.setContent(_handle, 'videos')
# Get the list of videos in the category.
videos = get_videos(category)
# Iterate through videos.
for video in videos:
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=video['name'])
# Set additional info for the list item.
# 'mediatype' is needed for skin to display info for this ListItem correctly.
list_item.setInfo('video', {'title': video['name'],
'genre': video['genre'],
'tagline': video['title'],
'plot': '[COLOR blue]' + video['description'] + '[/COLOR]',
'plotoutline': video['description'],
'mediatype': 'video'})
# Set graphics for the list item.
list_item.setArt({'thumb': video['thumb']})
# Set 'IsPlayable' property to 'true'.
list_item.setProperty('IsPlayable', 'true')
# Create a URL for a plugin recursive call.
url = get_url(action='play', video=video['url'])
# Add the list item to a virtual Kodi folder.
is_folder = False
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def play_video(path):
# Create a playable item with a path to play.
play_item = xbmcgui.ListItem(path=path)
addon = xbmcaddon.Addon()
use_inputstream = addon.getSetting('use_inputstream')
if use_inputstream == 'true':
is_helper = inputstreamhelper.Helper('mpd')
play_item.setProperty('inputstream', is_helper.inputstream_addon)
play_item.setProperty('inputstream.adaptive.manifest_type', 'hls')
# Pass the item to the Kodi player.
xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)
def router(paramstring):
# Parse a URL-encoded paramstring to the dictionary of
# {<parameter>: <value>} elements
params = dict(parse_qsl(paramstring))
# Check the parameters passed to the plugin
if params:
if params['action'] == 'listing':
# Display the list of videos in a provided category.
list_videos(params['category'])
elif params['action'] == 'play':
# Play a video from a provided URL.
play_video(params['video'] + '/hls/stream.m3u8')
# Check for Play and Ping Owncast instance
owncast.start_ping(params)
xbmc.log("Start Owncast Stream")
else:
raise ValueError('Invalid paramstring: {0}!'.format(paramstring))
else:
list_categories()
if __name__ == '__main__':
# Call the router function and pass the plugin call parameters to it.
# We use string slicing to trim the leading '?' from the plugin call paramstring
router(sys.argv[2][1:])
| 40.145985
| 95
| 0.651091
|
0b86505c7945c6983de12e0c13859acfae164158
| 2,093
|
py
|
Python
|
1010.pairs-of-songs-with-total-durations-divisible-by-60.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
1010.pairs-of-songs-with-total-durations-divisible-by-60.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
1010.pairs-of-songs-with-total-durations-divisible-by-60.py
|
windard/leeeeee
|
0107a5f95746592ca4fe78d2b5875cf65b1910e7
|
[
"MIT"
] | null | null | null |
# coding=utf-8
#
# @lc app=leetcode id=1010 lang=python
#
# [1010] Pairs of Songs With Total Durations Divisible by 60
#
# https://leetcode.com/problems/pairs-of-songs-with-total-durations-divisible-by-60/description/
#
# algorithms
# Easy (45.72%)
# Likes: 198
# Dislikes: 16
# Total Accepted: 15.3K
# Total Submissions: 33.5K
# Testcase Example: '[30,20,150,100,40]'
#
# In a list of songs, the i-th song has a duration of time[i] seconds.
#
# Return the number of pairs of songs for which their total duration in seconds
# is divisible by 60. Formally, we want the number of indices i < j with
# (time[i] + time[j]) % 60 == 0.
#
#
#
# Example 1:
#
#
# Input: [30,20,150,100,40]
# Output: 3
# Explanation: Three pairs have a total duration divisible by 60:
# (time[0] = 30, time[2] = 150): total duration 180
# (time[1] = 20, time[3] = 100): total duration 120
# (time[1] = 20, time[4] = 40): total duration 60
#
#
#
# Example 2:
#
#
# Input: [60,60,60]
# Output: 3
# Explanation: All three pairs have a total duration of 120, which is divisible
# by 60.
#
#
#
#
#
# Note:
#
#
# 1 <= time.length <= 60000
# 1 <= time[i] <= 500
#
#
class Solution(object):
def _numPairsDivisibleBy60(self, time):
"""
:type time: List[int]
:rtype: int
"""
# Time Limit
count = 0
for i in range(len(time)):
for j in range(i+1, len(time)):
if not (time[i] + time[j]) % 60:
count += 1
return count
def numPairsDivisibleBy60(self, time):
"""
:type time: List[int]
:rtype: int
"""
data = {}
for t in time:
t = t % 60
data[t] = data.get(t, 0) + 1
count = 0
zero = data.get(0)
if zero and zero > 1:
count += zero * (zero - 1) / 2
zero = data.get(30)
if zero and zero > 1:
count += zero * (zero - 1) / 2
for i in range(1, 30):
count += data.get(i, 0) * data.get(60-i, 0)
return count
# HashMap
| 22.505376
| 96
| 0.543239
|
de88017dae0e946f8affb05c31fb94d0b8c991b5
| 868
|
py
|
Python
|
azure/mgmt/resource/__init__.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
azure/mgmt/resource/__init__.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
azure/mgmt/resource/__init__.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .features import FeatureClient
from .locks import ManagementLockClient
from .policy import PolicyClient
from .resources import ResourceManagementClient
from .subscriptions import SubscriptionClient
from .links import ManagementLinkClient
from .managedapplications import ApplicationClient
from .version import VERSION
__version__ = VERSION
__all__ = [
'FeatureClient',
'ManagementLockClient',
'PolicyClient',
'ResourceManagementClient',
'SubscriptionClient',
'ManagementLinkClient',
'ApplicationClient'
]
| 31
| 76
| 0.650922
|
3a8ef011486cdac22808f79c8724afbad31c1939
| 3,969
|
py
|
Python
|
mopy/MusicInfo.py
|
motools/mopy
|
c5f76b02300f2c086cfbf48acb00774deeb858fb
|
[
"DOC"
] | 10
|
2015-04-15T16:00:26.000Z
|
2022-01-15T21:50:29.000Z
|
mopy/MusicInfo.py
|
motools/mopy
|
c5f76b02300f2c086cfbf48acb00774deeb858fb
|
[
"DOC"
] | null | null | null |
mopy/MusicInfo.py
|
motools/mopy
|
c5f76b02300f2c086cfbf48acb00774deeb858fb
|
[
"DOC"
] | 2
|
2015-10-16T08:37:13.000Z
|
2017-10-04T15:14:55.000Z
|
"""
MusicInfo.py
Holds a collection of Music Ontology objects
Created by Chris Sutton on 2007-08-13.
Copyright (c) 2007 Chris Sutton. All rights reserved.
"""
from logging import log, error, warning, info, debug
from mopy import model
import random
class MusicInfoException(Exception):
def __init__(self, message) :
self.message = message
def __str__(self) :
return self.message
class MusicInfo(object):
def __init__(self, objects=None, namespaceBindings = model.namespaceBindings):
if objects==None:
objects = []
self.MainIdx = {}
for obj in objects:
self.add(obj)
self.namespaceBindings = namespaceBindings
def add(self, obj, URI=None):
if URI == None:
# See if we have an existing blind node object :
if hasattr(obj, "URI") == False or obj.URI == None or isBlind(obj):
#raise MusicInfoException("Tried to add object "+str(obj)+" with no URI !")
found = self.findExistingBlindObj(obj)
if found == None:
if not isBlind(obj):
debug(" Assigning a blind URI for "+str(obj).replace("\n","|"))
obj.URI = getBlindURI()
else:
debug("Already know blind obj "+str(obj).replace("\n","|"))
obj.URI = found.URI # Update other references to this blind node
return
URI = obj.URI
if not self.haveURI(URI):
# Add object :
self.MainIdx[URI] = obj
if not hasattr(obj, "shortname"):
raise MusicInfoException("No shortname property for object " + str(obj) + ", did it come from the MO model ?")
if not hasattr(self, obj.shortname+"Idx"):
setattr(self, obj.shortname+"Idx", {})
getattr(self, obj.shortname+"Idx")[URI] = obj
else:
existing = self.MainIdx[URI]
if isinstance(obj, type(existing)):
keep = obj # The (possibly) more specific subclass
add = existing
elif isinstance(existing, type(obj)):
keep = existing
add = obj
else:
raise MusicInfoException("Tried to add two objects for the same URI and classes are a mismatch !"\
+"\n Existing : "+str(existing)+"\nAdding : "+str(obj))
debug("Merging Existing : "+str(existing)+"\nAdding : "+str(obj))
for propName in keep._props.keys():
if add._props.has_key(propName):
for v in add._props[propName]:
#debug("Adding "+str(v).replace("\n","|")+" to "+str(keep).replace("\n","|")+" as "+propName)
keep._props[propName].add(v)
# Update references
self.MainIdx[URI] = keep
if not hasattr(self, keep.shortname+"Idx"):
setattr(self, keep.shortname+"Idx", {})
getattr(self, keep.shortname+"Idx")[URI] = keep
if keep != existing:
del getattr(self, existing.shortname+"Idx")[URI]
for obj in self.MainIdx.values():
for propSet in obj._props.values():
outdatedRefs = [v for v in propSet if v==existing]
for v in outdatedRefs:
debug("Updating reference in "+str(obj).replace("\n","|"))
propSet.remove(v)
propSet.add(keep)
def haveURI(self, uri):
return self.MainIdx.has_key(uri)
def findExistingBlindObj(self, o):
if not hasattr(o, "shortname"):
raise MusicInfoException("No shortname property for object " + str(o) + ", did it come from the MO model ?")
if not hasattr(self, o.shortname+"Idx"):
return None
idx = getattr(self, o.shortname+"Idx")
for obj in idx.values():
if obj.URI.startswith("blind:"):
# test properties
match = True
try:
for propName in obj._props.keys():
if obj._props[propName] != o._props[propName]:
#print("Disregarding "+str(obj).replace("\n","|")+" due to property "+propName+" differing.\n")
match = False
except:
match = False
if match == True:
info("Found object "+str(obj).replace("\n","|")+" to match "+str(o).replace("\n","|"))
return obj
return None
def isBlind(obj):
return hasattr(obj,"URI") and obj.URI != None and obj.URI.startswith("blind:")
def getBlindURI(s=None):
if s==None:
s=str(hex(random.getrandbits(64))[2:-1])
return "blind:"+s
| 32.532787
| 114
| 0.647518
|
259e650eb8c9f7af0a93be390dd09be6758cf20c
| 1,418
|
py
|
Python
|
moonlight/settings/dev.py
|
ad-free/moonlight
|
b60bf8668f2559c67425c683d2c9103f7685fdc1
|
[
"MIT"
] | null | null | null |
moonlight/settings/dev.py
|
ad-free/moonlight
|
b60bf8668f2559c67425c683d2c9103f7685fdc1
|
[
"MIT"
] | null | null | null |
moonlight/settings/dev.py
|
ad-free/moonlight
|
b60bf8668f2559c67425c683d2c9103f7685fdc1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import logging
from moonlight.settings.base import *
from django.utils.translation import gettext_lazy as _
from django.conf import settings
logger = logging.getLogger("")
logger.info("Starting on development environment")
DEBUG = True
ALLOWED_HOSTS += ["127.0.0.1", "localhost", "172.16.2.201"]
INSTALLED_APPS += []
TIME_ZONE = "Asia/Ho_Chi_Minh"
USE_TZ = True
USE_I18N = True
LANGUAGES = (
("en", _("English")),
("vn", _("Vietnamese")),
)
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
"datefmt": "%d/%b/%Y-%H:%M:%S",
},
"simple": {"format": "%(levelname)s %(message)s"},
},
"handlers": {
"file": {
"level": "INFO",
"class": "logging.handlers.TimedRotatingFileHandler",
"filename": os.path.join(settings.LOG_DIR, "api.log"),
"when": "D",
"interval": 1,
"backupCount": 10,
"formatter": "verbose",
},
},
"loggers": {
"": {"handlers": ["file"], "level": os.getenv("DJANGO_LOG_LEVEL", "INFO"), },
"root": {
"handlers": ["file"],
"level": os.getenv("DJANGO_LOG_LEVEL", "ERROR"),
},
},
}
| 25.321429
| 86
| 0.543724
|
45c75cf1958080efd70fa04d600ed0ac0da2756d
| 26,521
|
py
|
Python
|
tensorflow/python/ops/stateful_random_ops_test.py
|
huonw/tensorflow
|
85f47254af7cc230a4a031998dffe770b7edbb9d
|
[
"Apache-2.0"
] | 1
|
2018-02-25T02:08:34.000Z
|
2018-02-25T02:08:34.000Z
|
tensorflow/python/ops/stateful_random_ops_test.py
|
huonw/tensorflow
|
85f47254af7cc230a4a031998dffe770b7edbb9d
|
[
"Apache-2.0"
] | 58
|
2021-11-22T05:41:28.000Z
|
2022-01-19T01:33:40.000Z
|
tensorflow/python/ops/stateful_random_ops_test.py
|
eniktab/tensorflow_build
|
af6b342154e5ba16543d3fc351fad26c230fe567
|
[
"Apache-2.0"
] | 1
|
2022-03-18T04:26:38.000Z
|
2022-03-18T04:26:38.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateful_random_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from absl.testing import parameterized
import numpy as np
from tensorflow.python.distribute import values as dist_values
from tensorflow.python.distribute.mirrored_strategy import MirroredStrategy
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.kernel_tests.random import util as \
random_test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import gen_stateful_random_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import stateful_random_ops as \
random
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
g_seeded = None
g_unseeded = None
GPU_FLOATS = [dtypes.float16, dtypes.float32, dtypes.float64]
CPU_FLOATS = GPU_FLOATS + [dtypes.bfloat16]
FLOATS = GPU_FLOATS
INTS = [dtypes.int32, dtypes.int64]
class StatefulRandomOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(StatefulRandomOpsTest, self).setUp()
physical_devices = config.list_physical_devices("CPU")
config.set_logical_device_configuration(
physical_devices[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
def testCreateRNGStateIntSeed(self):
"""Tests `create_rng_state` when `seed` is int."""
# using leading 'F' to test overflow tolerance
state = random.create_rng_state(0xFFFF222233334444FFAA666677778888,
random.RNG_ALG_PHILOX)
self.assertAllEqual(
list(map(random._uint_to_int,
[0xFFAA666677778888, 0xFFFF222233334444] +
[0] * (random.PHILOX_STATE_SIZE - 2))),
state)
def assertAllDifferent(self, tensors):
"""Checks that there are no duplicate elements anywhere among the tensors.
Args:
tensors: a list of tensors. They can have different shapes.
"""
tensors = [array_ops.reshape(t, shape=[-1]) for t in tensors]
ls = array_ops.concat(tensors, axis=0).numpy().tolist()
self.assertAllEqual(len(ls), len(set(ls)))
@test_util.run_v2_only
def testNonDeterministicInts(self):
"""Tests that non_deterministic_ints returns different results every time.
This test is flaky, but with very low probability of failing.
"""
shape = [2, 3]
dtype = dtypes.int64
a = random.non_deterministic_ints(shape=shape, dtype=dtype)
self.assertAllEqual(shape, a.shape)
self.assertEqual(dtype, a.dtype)
b = random.non_deterministic_ints(shape, dtype=dtype)
self.assertAllDifferent([a, b])
@test_util.run_v2_only
def testBatchSeeds(self):
"""Test for batch seeds.
"""
shape = [2, 3]
count = 6
gen = random.Generator.from_seed(1234)
keys1 = gen._make_int64_keys(shape=shape)
keys2 = gen._make_int64_keys(shape=shape)
self.assertAllDifferent([keys1, keys2])
seeds1 = gen.make_seeds(count=count)
seeds2 = gen.make_seeds(count=count)
self.assertAllDifferent([seeds1[0, :], seeds2[0, :]])
gens = gen.split(count=count)
self.assertAllEqual(count, len(gens))
randoms = [g.uniform_full_int(shape=shape, dtype=dtypes.int32)
for g in gens]
self.assertAllDifferent(randoms)
# Tests graph mode.
@def_function.function
def f():
return gen.make_seeds(count=count)
for _ in range(3):
f()
def assertRegex(self, pattern, text):
self.assertTrue(
re.search(pattern, text),
"Can't find pattern '%s' in text '%s'" % (pattern, text))
@test_util.run_v2_only
@test_util.run_cuda_only
def testCrossDeviceSplit(self):
"""Tests that a CPU RNG can split into RNGs on GPU.
"""
with ops.device("/device:CPU:0"):
gen = random.Generator.from_seed(1234) # gen is on CPU
self.assertRegex("CPU", gen.state.device)
with ops.device(test_util.gpu_device_name()):
gens = gen.split(count=10) # gens are on GPU
self.assertRegex("GPU", gens[0].state.device)
@test_util.run_v2_only
def testReset(self):
shape = [2, 3]
gen = random.Generator.from_seed(0)
for resetter in [
lambda g: g.reset(state=[1, 2, 3]),
lambda g: g.reset_from_seed(1234),
lambda g: g.reset_from_key_counter(key=1, counter=[2, 3]),
]:
resetter(gen)
expected_normal = gen.normal(shape)
@def_function.function
def f(resetter):
resetter(gen)
return gen.normal(shape)
def check_results(expected_normal, v):
self.assertAllEqual(expected_normal, v)
check_results(expected_normal, f(resetter))
check_results(expected_normal, f(resetter))
@test_util.run_v2_only
def testGeneratorCreation(self):
"""Tests generator creation, in both eager and tf.function.
The interaction between Generator creation and defun should be the same as
tf.Variable.
"""
shape = [2, 3]
alg = random.RNG_ALG_PHILOX
for constructor in [
lambda: random.Generator(state=[1, 2, 3], alg=alg),
lambda: random.Generator.from_seed(1234),
lambda: random.Generator.from_key_counter( # pylint: disable=g-long-lambda
key=1, counter=[2, 3], alg=alg),
]:
gen = constructor()
# Tests tf.function
expected_normal1 = gen.normal(shape)
expected_normal2 = gen.normal(shape)
global g_seeded
g_seeded = None
@def_function.function
def f(constructor):
global g_seeded
# defun'ed function should only create variables once
if g_seeded is None:
g_seeded = constructor()
return g_seeded.normal(shape)
def check_results(expected_normal, v):
self.assertAllEqual(expected_normal, v)
check_results(expected_normal1, f(constructor))
check_results(expected_normal2, f(constructor))
@parameterized.parameters([
("philox", random.RNG_ALG_PHILOX, random.Algorithm.PHILOX),
("threefry", random.RNG_ALG_THREEFRY, random.Algorithm.THREEFRY)])
@test_util.run_v2_only
def testAlg(self, name, int_id, enum_id):
g_by_name = random.Generator.from_seed(1234, name)
g_by_int = random.Generator.from_seed(1234, int_id)
g_by_enum = random.Generator.from_seed(1234, enum_id)
self.assertEqual(g_by_name.algorithm, g_by_int.algorithm)
self.assertEqual(g_by_name.algorithm, g_by_enum.algorithm)
@test_util.run_v2_only
def testGeneratorCreationWithVar(self):
"""Tests creating generator with a variable.
"""
alg = random.RNG_ALG_PHILOX
state = [1, 2, 3]
var = variables.Variable(state, dtype=random.STATE_TYPE)
g = random.Generator(state=state, alg=alg)
g_var = random.Generator(state=var, alg=alg)
shape = [2, 3]
g.normal(shape)
g_var.normal(shape)
self.assertAllEqual(g.state.read_value(), var.read_value())
@test_util.run_v2_only
def testGeneratorCreationUnseeded(self):
"""Tests generator creation, the unseeded case."""
shape = [2, 3]
global g_unseeded
g_unseeded = None
@def_function.function
def f():
global g_unseeded
# defun'ed function should only create variables once
if g_unseeded is None:
g_unseeded = random.Generator.from_non_deterministic_state()
return g_unseeded.normal(shape)
self.assertAllEqual(shape, f().shape)
@test_util.run_v2_only
def testGeneratorCopy(self):
"""Tests copying a generator."""
g = random.Generator.from_seed(0)
g_copy = random.Generator(g)
self.assertAllEqual(g.algorithm, g_copy.algorithm)
self.assertAllEqual(g.state.read_value(), g_copy.state.read_value())
# Tests tf.function
global g_seeded
g_seeded = None
# Do the same in tf.function
@def_function.function
def f():
global g_seeded
# defun'ed function should only create variables once
if g_seeded is None:
g_seeded = random.Generator(g)
self.assertAllEqual(g.algorithm, g_seeded.algorithm)
self.assertAllEqual(g.state.read_value(), g_seeded.state.read_value())
f()
@test_util.run_v1_only(
("This test is specifically for checking TF1 compatibility. "
"It cannot run under TF2."))
def testTF1(self):
seed = 1234
shape = [2, 3]
expected_normal1 = constant_op.constant(
[[0.9356609, 1.0854305, -0.93788373],
[-0.50615472, 1.31697023, 0.71375787]], dtype=dtypes.float32)
expected_normal2 = constant_op.constant(
[[-0.3964749, 0.8369565, -0.30946946],
[1.1206646, 1.00852597, -0.10185789]], dtype=dtypes.float32)
with self.cached_session() as sess:
gen1 = random.Generator.from_seed(seed)
gen2 = random.Generator.from_non_deterministic_state()
sess.run((gen1._state_var.initializer, gen2._state_var.initializer))
r1 = gen1.normal(shape, dtype=dtypes.float32)
r2 = gen2.normal(shape, dtype=dtypes.float32)
def f():
return sess.run((r1, r2))
def check_results(expected_normal, v1, v2):
self.assertAllClose(expected_normal, v1, rtol=1e-5, atol=1e-5)
self.assertAllEqual(shape, v2.shape)
check_results(expected_normal1, *f())
check_results(expected_normal2, *f())
@test_util.run_v2_only
@test_util.also_run_as_tf_function
def testEagerAndDefun(self):
"""A simple test to make sure the op works in eager and defunned mode."""
random.get_global_generator().normal((3,))
@test_util.run_v2_only
def testOpSeedSelectionAfterSetSeed(self):
"""Tests that op-seed selection is reset after reseting global generator.
Fixing GitHub issue 9171:
https://github.com/tensorflow/tensorflow/issues/9171
"""
shape = (3,)
random.get_global_generator().reset_from_seed(1)
a = random.get_global_generator().normal(shape)
random.get_global_generator().reset_from_seed(1)
b = random.get_global_generator().normal(shape)
self.assertAllEqual(a, b)
# Now do the above again using accelerated ('defun'ed) computation
@def_function.function
def f():
return random.get_global_generator().normal(shape)
random.get_global_generator().reset_from_seed(1)
c = f()
random.get_global_generator().reset_from_seed(1)
d = f()
self.assertAllEqual(c, d)
self.assertAllEqual(a, c)
@test_util.run_v2_only
def testOpSeedSelectionNotSensitive(self):
"""Test that op-seed selection is not sensitive to trivial changes.
Test that op-seed selection is not sensitive to trivial computation
(i.e. graph) changes.
Fixing b/32087099
"""
def f(include_print):
shape = constant_op.constant([5])
if include_print:
shape = logging_ops.Print(shape, [shape])
return random.get_global_generator().normal(shape)
def compare(fst_includes_print, snd_includes_print):
random.get_global_generator().reset_from_seed(50)
fst = f(fst_includes_print)
random.get_global_generator().reset_from_seed(50)
snd = f(snd_includes_print)
self.assertAllEqual(fst, snd)
# Now do the above again using accelerated (defunned) 'f'.
# Running 'f' with two different Boolean arguments should cause
# two different graphs to be generated, hence demonstrating the
# insensitivity to graph changes.
f_acc = def_function.function(f)
random.get_global_generator().reset_from_seed(50)
fst = f_acc(fst_includes_print)
random.get_global_generator().reset_from_seed(50)
snd = f_acc(snd_includes_print)
self.assertAllEqual(fst, snd)
compare(False, False)
compare(True, True)
compare(True, False)
@test_util.run_v2_only
def testKey(self):
key = 1234
gen = random.Generator(state=[0, 0, key], alg=random.RNG_ALG_PHILOX)
got = gen.key
self.assertAllEqual(key, got)
@def_function.function
def f():
return gen.key
got = f()
self.assertAllEqual(key, got)
@test_util.run_v2_only
def testSkip(self):
key = 1234
counter = 5678
gen = random.Generator(state=[counter, 0, key], alg=random.RNG_ALG_PHILOX)
delta = 432
gen.skip(delta)
new_counter = gen._state_var[0]
self.assertAllEqual(counter + delta * 256, new_counter)
def _sameAsOldRandomOps(self, device, floats):
def compare(dtype, old, new):
seed1, seed2 = 79, 25
# note how the two seeds for the old op correspond to the seed for the new
# op
with ops.device(device):
gen = random.Generator(state=[0, seed2, seed1],
alg=random.RNG_ALG_PHILOX)
# create a graph for the old op in order to call it many times
@def_function.function
def run_old():
with ops.device(device):
return old(dtype, seed1, seed2)
def run_new():
with ops.device(device):
return new(dtype, gen)
for _ in range(100):
self.assertAllEqual(run_old(), run_new())
shape = constant_op.constant([4, 7])
minval = 128
maxval = 256
# passing `dtype` around to compress go/gpylint-faq#cell-var-from-loop and
# go/gpylint-faq#undefined-loop-variable
def old_normal(dtype, seed1, seed2):
return gen_random_ops.random_standard_normal(
shape, dtype=dtype, seed=seed1, seed2=seed2)
def new_normal(dtype, gen):
return gen._standard_normal(shape, dtype=dtype)
def old_truncated_normal(dtype, seed1, seed2):
return gen_random_ops.truncated_normal(
shape, dtype=dtype, seed=seed1, seed2=seed2)
def new_truncated_normal(dtype, gen):
return gen._truncated_normal(shape, dtype=dtype)
def old_uniform_int(dtype, seed1, seed2):
minval2 = constant_op.constant(minval, dtype=dtype)
maxval2 = constant_op.constant(maxval, dtype=dtype)
return gen_random_ops.random_uniform_int(
shape, minval=minval2, maxval=maxval2, seed=seed1, seed2=seed2)
def new_uniform_int(dtype, gen):
return gen.uniform(shape, minval=minval, maxval=maxval, dtype=dtype)
def old_uniform(dtype, seed1, seed2):
return gen_random_ops.random_uniform(
shape, dtype=dtype, seed=seed1, seed2=seed2)
def new_uniform(dtype, gen):
return gen._uniform(shape, dtype=dtype)
for dtype in floats:
compare(dtype, old_normal, new_normal)
compare(dtype, old_truncated_normal, new_truncated_normal)
compare(dtype, old_uniform, new_uniform)
for dtype in INTS:
compare(dtype, old_uniform_int, new_uniform_int)
@test_util.run_v2_only
def testSameAsOldRandomOpsCPU(self):
"""Tests that the generated numbers are the same as the old random_ops.py.
The CPU version.
"""
self._sameAsOldRandomOps("/device:CPU:0", CPU_FLOATS)
@test_util.run_v2_only
@test_util.run_cuda_only
def testSameAsOldRandomOpsGPU(self):
"""Tests that the generated numbers are the same as the old random_ops.py.
The GPU version.
"""
self._sameAsOldRandomOps(test_util.gpu_device_name(), GPU_FLOATS)
@parameterized.parameters(INTS + [dtypes.uint32, dtypes.uint64])
@test_util.run_v2_only
@test_util.run_cuda_only
def testGPUEqualsCPU(self, dtype):
"""Tests that GPU and CPU generate the same integer outputs."""
seed = 1234
shape = [315, 49]
with ops.device("/device:CPU:0"):
cpu = random.Generator.from_seed(seed).uniform_full_int(
shape=shape, dtype=dtype)
with ops.device(test_util.gpu_device_name()):
gpu = random.Generator.from_seed(seed).uniform_full_int(
shape=shape, dtype=dtype)
self.assertAllEqual(cpu, gpu)
@parameterized.parameters(FLOATS + INTS)
@test_util.run_v2_only
def testUniformIsInRange(self, dtype):
minval = 2
maxval = 33
size = 1000
gen = random.Generator.from_seed(1234)
x = gen.uniform(
shape=[size], dtype=dtype, minval=minval, maxval=maxval).numpy()
self.assertTrue(np.all(x >= minval))
self.assertTrue(np.all(x < maxval))
@parameterized.parameters(FLOATS)
@test_util.run_v2_only
def testNormalIsFinite(self, dtype):
gen = random.Generator.from_seed(1234)
x = gen.normal(shape=[10000], dtype=dtype).numpy()
self.assertTrue(np.all(np.isfinite(x)))
@parameterized.parameters(FLOATS + INTS)
@test_util.run_v2_only
def testDistributionOfUniform(self, dtype):
"""Use Pearson's Chi-squared test to test for uniformity."""
n = 1000
seed = 12
gen = random.Generator.from_seed(seed)
maxval = 1
if dtype.is_integer:
maxval = 100
x = gen.uniform(shape=[n], maxval=maxval, dtype=dtype).numpy()
if maxval > 1:
# Normalize y to range [0, 1).
x = x.astype(float) / maxval
# Tests that the values are distributed amongst 10 bins with equal
# probability. 16.92 is the Chi^2 value for 9 degrees of freedom with
# p=0.05. This test is probabilistic and would be flaky if the random
# seed were not fixed.
val = random_test_util.chi_squared(x, 10)
self.assertLess(val, 16.92)
@parameterized.parameters(FLOATS)
@test_util.run_v2_only
def testDistributionOfNormal(self, dtype):
"""Use Anderson-Darling test to test distribution appears normal."""
n = 1000
gen = random.Generator.from_seed(1234)
x = gen.normal(shape=[n], dtype=dtype).numpy()
# The constant 2.492 is the 5% critical value for the Anderson-Darling
# test where the mean and variance are known. This test is probabilistic
# so to avoid flakiness the seed is fixed.
self.assertLess(
random_test_util.anderson_darling(x.astype(float)), 2.492)
@test_util.run_v2_only
def testErrors(self):
"""Tests that proper errors are raised.
"""
shape = [2, 3]
gen = random.Generator.from_seed(1234)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
r"must have shape \[\], not"):
gen_stateful_random_ops.stateful_standard_normal_v2(
gen.state.handle, [0, 0], shape)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
r"must have shape \[\], not"):
gen_stateful_random_ops.rng_skip(
gen.state.handle, gen.algorithm, [0, 0])
with self.assertRaisesWithPredicateMatch(
TypeError, "EagerTensor of dtype int64"):
gen_stateful_random_ops.stateful_standard_normal_v2(
gen.state.handle, 1.1, shape)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"Unsupported algorithm id"):
gen_stateful_random_ops.stateful_standard_normal_v2(
gen.state.handle, 123, shape)
var = variables.Variable([0, 0], dtype=dtypes.int32)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"dtype of RNG state variable must be int64, not"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_PHILOX, shape)
var = variables.Variable([[0]], dtype=dtypes.int64)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"RNG state must have one and only one dimension, not"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_PHILOX, shape)
var = variables.Variable([0], dtype=dtypes.int64)
with self.assertRaisesWithPredicateMatch(
errors.InvalidArgumentError,
"For the Philox algorithm, the size of state must be at least"):
gen_stateful_random_ops.stateful_standard_normal_v2(
var.handle, random.RNG_ALG_PHILOX, shape)
@test_util.run_v2_only
def testGetGlobalGeneratorWithXla(self):
"""Demonstrates using the global generator with XLA."""
if not config.list_physical_devices("XLA_CPU"):
self.skipTest("No XLA_CPU device available.")
random.set_global_generator(None)
@def_function.function(experimental_compile=True)
def make_seed():
generator = random.get_global_generator()
state = array_ops.identity(generator.state, name="state")
return generator.uniform_full_int((2,), dtypes.int32, name="seed"), state
with ops.device("/device:XLA_CPU:0"):
seed, state = make_seed()
self.assertTrue(np.all(np.isfinite(seed.numpy())))
random.get_global_generator().reset(state)
self.assertAllEqual(make_seed()[0], seed)
@test_util.run_v2_only
def testSetGlobalGeneratorBadWithDefun(self):
"""Demonstrates that set_global_generator don't work properly with defun.
"""
shape = (3,)
@def_function.function
def f():
return random.get_global_generator().normal(shape)
random.set_global_generator(random.Generator.from_seed(50))
with self.assertRaisesWithPredicateMatch(
errors.NotFoundError, "Resource .+ does not exist"):
_ = f()
random.set_global_generator(random.Generator.from_seed(50))
_ = f()
@test_util.run_v2_only
def testFunctionArg(self):
"""Tests that RNG can be used as tf.function's argument.
"""
shape = [2, 3]
@def_function.function
def f(gen):
return gen.normal(shape)
g1 = random.Generator.from_seed(1)
g2 = random.Generator.from_seed(1)
res1 = f(g1)
res2 = g2.normal(shape)
self.assertAllEqual(res1, res2)
self.assertAllEqual(g1.state.read_value(), g2.state.read_value())
@test_util.run_v2_only
def testLimitedRetracingWithCompositeTensors(self):
"""Tests that RNGs with the same shape/dtype won't cause retracing.
"""
trace_count = [0]
@def_function.function
def f(x):
trace_count[0] += 1
return x.normal([])
f(random.Generator.from_seed(1))
f(random.Generator.from_seed(2))
self.assertEqual(trace_count[0], 1)
def testMostSpecificCompatibleType(self):
"""Tests GeneratorSpec.most_specific_compatible_type.
"""
spec = random.GeneratorSpec(shape=(2, 3), dtype=dtypes.int32)
res = spec.most_specific_compatible_type(
random.GeneratorSpec(shape=(2, 3), dtype=dtypes.int32))
self.assertEqual(spec, res)
with self.assertRaisesWithPredicateMatch(ValueError, ""):
spec.most_specific_compatible_type(
tensor_spec.TensorSpec(shape=(2, 3), dtype=dtypes.int32))
with self.assertRaisesWithPredicateMatch(ValueError, ""):
spec.most_specific_compatible_type(
random.GeneratorSpec(shape=(2, 4), dtype=dtypes.int32))
with self.assertRaisesWithPredicateMatch(ValueError, ""):
spec.most_specific_compatible_type(
random.GeneratorSpec(shape=(2, 3), dtype=dtypes.int64))
@test_util.run_v2_only
@test_util.run_cuda_only
def testMirroredStratSeq(self):
"""Tests RNG/MirrorStrategy interaction #1.
If an RNG is created outside strategy.scope(), all replicas will access the
same RNG object, and accesses are serialized.
"""
shape = [3, 4]
dtype = dtypes.int32
gen = random.Generator.from_seed(1234)
strat = MirroredStrategy(devices=["/cpu:0", test_util.gpu_device_name()])
with strat.scope():
def f():
t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
t = array_ops.stack([t1, t2])
return t
results = strat.extended.call_for_each_replica(
fn=f)
values = results.values
self.assertAllEqual(2, len(values))
self.assertAllDifferent(values)
@test_util.run_v2_only
def testMirroredStratParaSyncDisallowed(self):
"""Tests that generator creation in MirroredStrategy is disallowed.
"""
creators = [
lambda: random.Generator.from_seed(1234),
random.Generator.from_non_deterministic_state,
]
shape = [3, 4]
dtype = dtypes.int32
strat = MirroredStrategy(devices=["cpu:0", "cpu:1"])
for creator in creators:
with strat.scope():
with self.assertRaisesWithPredicateMatch(
ValueError, "disallowed"):
creator() # pylint: disable=cell-var-from-loop
def f():
gen = creator() # pylint: disable=cell-var-from-loop
return gen.uniform_full_int(shape=shape, dtype=dtype)
with self.assertRaisesWithPredicateMatch(
ValueError, "disallowed"):
strat.extended.call_for_each_replica(fn=f)
@test_util.run_v2_only
def testMirroredStratParaAsync(self):
"""Tests RNG/MirrorStrategy interaction #2.
The user can create n independent RNGs outside strategy.scope(), where n
is the number of replicas, and give one to each replica. The replicas can
thus get different random-number streams.
"""
shape = [3, 4]
dtype = dtypes.int32
gens = random.get_global_generator().split(count=2)
devices = ["cpu:0", "cpu:1"]
strat = MirroredStrategy(devices=devices)
# Use `PerReplica` to specify which `gen` is sent to which replica
gens = dist_values.PerReplica([[g] for g in gens])
with strat.scope():
def f(gen):
t1 = gen.uniform_full_int(shape=shape, dtype=dtype)
t2 = gen.uniform_full_int(shape=shape, dtype=dtype)
t = array_ops.stack([t1, t2])
return t
results = strat.extended.call_for_each_replica(
fn=f, args=gens)
local_results = strat.experimental_local_results(results)
self.assertAllEqual(2, len(local_results))
self.assertAllDifferent(local_results)
if __name__ == "__main__":
test.main()
| 36.58069
| 83
| 0.693752
|
29acd6cc0f1d88b3fb812cfc88899a839d394061
| 5,230
|
py
|
Python
|
sdk/python/pulumi_aws/elasticloadbalancing/load_balancer_cookie_stickiness_policy.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/elasticloadbalancing/load_balancer_cookie_stickiness_policy.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/elasticloadbalancing/load_balancer_cookie_stickiness_policy.py
|
johnktims/pulumi-aws
|
c838bc79043f5376c66fc66275a1e012edd3ab7d
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class LoadBalancerCookieStickinessPolicy(pulumi.CustomResource):
cookie_expiration_period: pulumi.Output[float]
"""
The time period after which
the session cookie should be considered stale, expressed in seconds.
"""
lb_port: pulumi.Output[float]
"""
The load balancer port to which the policy
should be applied. This must be an active listener on the load
balancer.
"""
load_balancer: pulumi.Output[str]
"""
The load balancer to which the policy
should be attached.
"""
name: pulumi.Output[str]
"""
The name of the stickiness policy.
"""
def __init__(__self__, resource_name, opts=None, cookie_expiration_period=None, lb_port=None, load_balancer=None, name=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a load balancer cookie stickiness policy, which allows an ELB to control the sticky session lifetime of the browser.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] cookie_expiration_period: The time period after which
the session cookie should be considered stale, expressed in seconds.
:param pulumi.Input[float] lb_port: The load balancer port to which the policy
should be applied. This must be an active listener on the load
balancer.
:param pulumi.Input[str] load_balancer: The load balancer to which the policy
should be attached.
:param pulumi.Input[str] name: The name of the stickiness policy.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['cookie_expiration_period'] = cookie_expiration_period
if lb_port is None:
raise TypeError("Missing required property 'lb_port'")
__props__['lb_port'] = lb_port
if load_balancer is None:
raise TypeError("Missing required property 'load_balancer'")
__props__['load_balancer'] = load_balancer
__props__['name'] = name
super(LoadBalancerCookieStickinessPolicy, __self__).__init__(
'aws:elasticloadbalancing/loadBalancerCookieStickinessPolicy:LoadBalancerCookieStickinessPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, cookie_expiration_period=None, lb_port=None, load_balancer=None, name=None):
"""
Get an existing LoadBalancerCookieStickinessPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] cookie_expiration_period: The time period after which
the session cookie should be considered stale, expressed in seconds.
:param pulumi.Input[float] lb_port: The load balancer port to which the policy
should be applied. This must be an active listener on the load
balancer.
:param pulumi.Input[str] load_balancer: The load balancer to which the policy
should be attached.
:param pulumi.Input[str] name: The name of the stickiness policy.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["cookie_expiration_period"] = cookie_expiration_period
__props__["lb_port"] = lb_port
__props__["load_balancer"] = load_balancer
__props__["name"] = name
return LoadBalancerCookieStickinessPolicy(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 45.877193
| 175
| 0.68088
|
19e9989e269fa632e9d77a25b898989d08c1f54a
| 610
|
py
|
Python
|
raven/response/_command/_commands/commands.py
|
jawahar273/Tx
|
d595ebd347194402984505a051f842854ce0fc9f
|
[
"MIT"
] | null | null | null |
raven/response/_command/_commands/commands.py
|
jawahar273/Tx
|
d595ebd347194402984505a051f842854ce0fc9f
|
[
"MIT"
] | null | null | null |
raven/response/_command/_commands/commands.py
|
jawahar273/Tx
|
d595ebd347194402984505a051f842854ce0fc9f
|
[
"MIT"
] | null | null | null |
from raven.response.abstract_response import BaseResponse
from raven.config import FOR_OUTPUT
class Commands(BaseResponse):
def __init__(self, scope=None):
super(Commands, self).__init__(self, scope=scope)
def get_class_name(self):
return self.__class__.__name__
def render(self, txObject, pretty=None):
self.class_name = self.get_class_name() # class name
super(Commands, self).render(class_name=self.class_name, sub_path="_command")
return self.render_template.render(
output_values=txObject.get(FOR_OUTPUT), pretty=pretty
)
| 25.416667
| 85
| 0.706557
|
171aa91ca52df0ba1bf66946f533381acc836279
| 837
|
py
|
Python
|
login/login/session.py
|
maosplx/L2py
|
5d81b2ea150c0096cfce184706fa226950f7f583
|
[
"MIT"
] | 7
|
2020-09-01T21:52:37.000Z
|
2022-02-25T16:00:08.000Z
|
login/login/session.py
|
maosplx/L2py
|
5d81b2ea150c0096cfce184706fa226950f7f583
|
[
"MIT"
] | 4
|
2021-09-10T22:15:09.000Z
|
2022-03-25T22:17:43.000Z
|
login/login/session.py
|
maosplx/L2py
|
5d81b2ea150c0096cfce184706fa226950f7f583
|
[
"MIT"
] | 9
|
2020-09-01T21:53:39.000Z
|
2022-03-30T12:03:04.000Z
|
from common.session import Session
from login.keys.blowfish import BlowfishKey
from login.keys.rsa import L2RsaKey
from login.keys.session import SessionKey
from login.keys.xor import LoginXorKey
class LoginSession(Session):
def __init__(self, protocol):
super().__init__()
self.id = Int32.random()
self.protocol = protocol
self.state = None
self.rsa_key = L2RsaKey.generate()
self.blowfish_key = BlowfishKey.generate()
self.session_key = SessionKey()
self.xor_key = LoginXorKey()
self.protocol_version = 50721
self.blowfish_enabled = False
@classmethod
def by_username(cls, username):
for session_id, session in cls.data().items():
if session["account"].username == username:
return {session_id: session}
| 32.192308
| 55
| 0.670251
|
a58ea8e90668d78c8ef6c6289c3f9b4de1eaf326
| 32,693
|
py
|
Python
|
src/graph_models.py
|
satyakisikdar/infinity-mirror
|
d8b44aa75f1052318dc1c40a770a7ad07e38abb7
|
[
"MIT"
] | 5
|
2020-03-13T02:54:03.000Z
|
2022-03-18T02:33:12.000Z
|
src/graph_models.py
|
satyakisikdar/infinity-mirror
|
d8b44aa75f1052318dc1c40a770a7ad07e38abb7
|
[
"MIT"
] | 2
|
2021-11-10T19:47:00.000Z
|
2022-02-10T01:24:59.000Z
|
src/graph_models.py
|
satyakisikdar/infinity-mirror
|
d8b44aa75f1052318dc1c40a770a7ad07e38abb7
|
[
"MIT"
] | 1
|
2021-05-24T21:54:44.000Z
|
2021-05-24T21:54:44.000Z
|
"""
Container for different graph models
"""
import abc
import math
import os
import platform
import random
import subprocess as sub
from itertools import combinations
from time import time
from typing import List, Dict, Any, Union, Set, Tuple
from tqdm import tqdm
import networkx as nx
import numpy as np
from scipy import sparse
from src.graph_io import networkx_to_graphtool, graphtool_to_networkx
from src.graph_stats import GraphStats
from src.utils import ColorPrint as CP
from src.utils import check_file_exists, load_pickle, delete_files, get_blank_graph, get_graph_from_prob_matrix
__all__ = ['BaseGraphModel', 'ErdosRenyi', 'UniformRandom', 'ChungLu', 'BTER', '_BTER', 'CNRG', 'HRG', 'Kronecker',
'GraphAutoEncoder', 'SBM', 'GraphForge', 'NetGAN', 'BUGGE']
class BaseGraphModel:
__slots__ = ['input_graph', 'initial_gname', 'model_name', 'params', 'trial']
def __init__(self, model_name: str, input_graph: nx.Graph, trial: int, **kwargs) -> None:
self.input_graph: nx.Graph = input_graph # networkX graph to be fitted
assert self.input_graph.name != '', 'Input graph does not have a name'
self.initial_gname: str = input_graph.name # name of the initial graph
self.model_name: str = model_name # name of the model
self.trial = trial # run id prevents files from getting clobbered
self.params: Dict[Any] = {} # dictionary of model parameters
return
@abc.abstractmethod
def _fit(self) -> None:
"""
Fits the parameters of the model
:return:
"""
pass
@abc.abstractmethod
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
"""
Generates one graph with given gname and gen_id
"""
pass
def update(self, new_input_graph: nx.Graph) -> None:
"""
Update the model to (a) update the input graph, (b) fit the parameters
:return:
"""
CP.print_none('Updating graph')
self.input_graph = new_input_graph
self._fit() # re-fit the parameters
return
def generate(self, num_graphs: int, gen_id: int) -> List[nx.Graph]:
"""
Generates num_graphs many graphs by repeatedly calling _gen
maybe use a generator
:param num_graphs:
:param gen_id: generation id
:param trial: trial keeps things separate when run in parallel
:return:
"""
generated_graphs = []
for i in range(num_graphs):
g = self._gen(gen_id=gen_id, gname=f'{self.input_graph.name}_{gen_id}_{self.trial}_{i + 1}')
if not isinstance(g, nx.Graph):
g = nx.Graph(g) # make it into an undirected graph with no parallel edges
self_loops = list(nx.selfloop_edges(g))
g.remove_edges_from(self_loops) # remove self loops
generated_graphs.append(g)
assert len(generated_graphs) == num_graphs, f'Unable to generate {num_graphs} graphs'
return generated_graphs
def __str__(self) -> str:
st = f'name: "{self.model_name}", input_graph: "{self.input_graph.name}", trial: {self.trial}'
if len(self.params) > 0:
st += f'params: {self.params}'
return st
def __repr__(self) -> str:
return str(self)
class BUGGE(BaseGraphModel):
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='BUGGE', input_graph=input_graph, trial=trial)
self.rule_min = 2
self.rule_max = 5
CP.print_blue(f'Rule sizes: min: {self.rule_min}, max: {self.rule_max}')
return
def _fit(self) -> None:
from src.bugge.generation import fit
input_graph = nx.DiGraph(self.input_graph) # BUGGE needs a directed graph
model = fit(input_graph, rule_min=self.rule_min, rule_max=self.rule_max)
self.params['model'] = model
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
from src.bugge.generation import generate
assert 'model' in self.params, 'BUGGE model is not trained'
g = generate(model=self.params['model'])
g = nx.Graph(g)
g.name = gname
g.gen_id = gen_id
return g
class ErdosRenyi(BaseGraphModel):
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='Erdos-Renyi', input_graph=input_graph, trial=trial)
if 'seed' in kwargs:
seed = kwargs['seed']
else:
seed = None
self.params['seed'] = seed
return
def _fit(self) -> None:
"""
G(n, p)
n: number of nodes
p: probability of edges
<m>: expected number of edges
for fitting, p = <m> / (n * (n - 1) / 2)
:return:
"""
n = self.input_graph.order()
m = self.input_graph.size()
self.params['n'] = n
self.params['p'] = m / (n * (n - 1) / 2)
return
def _gen(self, gname: str, gen_id: int, ) -> nx.Graph:
assert 'n' in self.params and 'p' in self.params, 'Improper parameters for Erdos-Renyi'
g = nx.fast_gnp_random_graph(n=self.params['n'], p=self.params['p'], seed=self.params['seed'])
g.name = gname
g.gen_id = gen_id
return g
class UniformRandom(BaseGraphModel):
"""
model, a graph is chosen uniformly at random from the set of all graphs with n nodes and m edges.
"""
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='Uniform-Random', input_graph=input_graph, trial=trial)
if 'seed' in kwargs:
seed = kwargs['seed']
else:
seed = None
self.params['seed'] = seed
return
def _fit(self):
n = self.input_graph.order()
m = self.input_graph.size()
self.params['n'] = n
self.params['m'] = m
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
assert 'n' in self.params and 'm' in self.params, 'Improper parameters for Uniform Random'
g = nx.gnm_random_graph(n=self.params['n'], m=self.params['m'], seed=self.params['seed'])
g.name = gname
g.gen_id = gen_id
return g
class ChungLu(BaseGraphModel):
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='Chung-Lu', input_graph=input_graph, trial=trial)
return
def _fit(self) -> None:
self.params['degree_seq'] = sorted([d for n, d in self.input_graph.degree()], reverse=True) # degree sequence
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
assert 'degree_seq' in self.params, 'imporper parameters for Chung-Lu'
try:
g = nx.configuration_model(self.params['degree_seq']) # fit the model to the degree seq
except nx.NetworkXError: # config model failed
raise Exception('Generation failed!')
else: # gets called only if the exception is not thrown
g = nx.Graph(g) # make it into a simple graph
g.remove_edges_from(nx.selfloop_edges(g)) # remove self-loops
g.name = gname
g.gen_id = gen_id
return g
class _BTER(BaseGraphModel):
"""
BTER model by Tammy Kolda
feastpack implementation at https://www.sandia.gov/~tgkolda/feastpack/feastpack_v1.2.zip
"""
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='BTER', input_graph=input_graph, trial=trial)
# self.prep_environment()
return
def _fit(self) -> None:
pass # the matlab code does the fitting
def prep_environment(self) -> None:
"""
Prepare environment - check for MATLAB
:return:
"""
completed_process = sub.run('matlab -h', shell=True, stdout=sub.DEVNULL)
assert completed_process.returncode != 0, 'MATLAB not found'
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
g = self.input_graph
# fix BTER to use the directory..
CP.print_blue('Starting BTER...')
graph_path = f'./src/bter/{g.name}_{self.trial}.mat'
np.savetxt(graph_path, nx.to_numpy_matrix(g), fmt='%d')
matlab_code = [
'mex -largeArrayDims tricnt_mex.c;',
'mex -largeArrayDims ccperdegest_mex.c;',
f"G = dlmread('{g.name}_{self.trial}.mat');",
'G = sparse(G);',
f"graphname = '{g.name}_{self.trial}';",
'',
'nnodes = size(G, 1);',
'nedges = nnz(G) / 2;',
r"fprintf('nodes: %d edges: %d\n', nnodes, nedges);",
'',
'nd = accumarray(nonzeros(sum(G,2)),1);',
"maxdegree = find(nd>0,1,'last');",
r"fprintf('Maximum degree: %d\n', maxdegree);",
'',
'[ccd,gcc] = ccperdeg(G);',
r"fprintf('Global clustering coefficient: %.2f\n', gcc);",
'',
r"fprintf('Running BTER...\n');",
't1=tic;',
'[E1,E2] = bter(nd,ccd);',
'toc(t1);',
r"fprintf('Number of edges created by BTER: %d\n', size(E1,1) + size(E2,1));",
'',
"fprintf('Turning edge list into adjacency matrix (including dedup)...');",
't2=tic;',
'G_bter = bter_edges2graph(E1,E2);',
'toc(t2);',
r"fprintf('Number of edges in dedup''d graph: %d\n', nnz(G)/2);",
'',
'G_bter = full(G_bter);',
r"dlmwrite('{}_{}_bter.mat', G_bter, ' ');".format(g.name, self.trial),
'quit;'
]
matlab_code_filename = f'{g.name}_{self.trial}_code.m'
matlab_code_path = f'./src/bter/{matlab_code_filename}'
print('\n'.join(matlab_code), file=open(matlab_code_path, 'w'))
output_path = f'./src/bter/{g.name}_{self.trial}_bter.mat'
start_time = time()
completed_process = sub.run(f'cd src/bter; cat {matlab_code_filename} | matlab -nosplash -nodesktop',
shell=True,
stdout=sub.DEVNULL, stderr=sub.DEVNULL)
CP.print_blue(f'BTER ran in {round(time() - start_time, 3)} secs')
if completed_process.returncode != 0 or not check_file_exists(output_path):
CP.print_blue('BTER failed!')
raise Exception('Generation failed!')
else:
bter_mat = np.loadtxt(output_path, dtype=int)
g_bter = nx.from_numpy_matrix(bter_mat, create_using=nx.Graph())
g_bter.name = gname
g_bter.gen_id = gen_id
delete_files(graph_path, output_path, matlab_code_path)
return g_bter
class BTER(BaseGraphModel):
"""
BTER model by Tammy Kolda
feastpack implementation at https://www.sandia.gov/~tgkolda/feastpack/feastpack_v1.2.zip
"""
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='BTER', input_graph=input_graph, trial=trial)
return
def _fit(self) -> None:
# find degree distribution and avg clustering by degree
g_stats = GraphStats(self.input_graph, trial=-1, dataset=self.initial_gname, iteration=-1, model=self.model_name)
self.params['n'] = self.input_graph.order()
self.params['degree_dist'] = g_stats.degree_dist(normalized=False) # we need the counts
self.params['degree_seq'] = g_stats['degree_seq']
self.params['avg_cc_by_deg'] = g_stats.clustering_coefficients_by_degree()
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
assert 'degree_dist' in self.params and 'avg_cc_by_deg' in self.params and 'n' in self.params, \
'insufficient parameters for BTER'
n, avg_cc_by_deg = self.params['n'], self.params['avg_cc_by_deg']
degree_seq, degree_dist = self.params['degree_seq'], self.params['degree_dist']
g = nx.empty_graph(n=n) # adding n isolated nodes
# preprocessing
# step 1: assign n1 nodes to have degree 1, n2 nodes to have degree 2, ...
assigned_deg: Dict[int, int] = {node: degree_seq[node] for node in g.nodes()} # deg seq is sorted
nx.set_node_attributes(g, values=assigned_deg, name='assigned_deg')
# step 2: partition all nodes into affinity blocks, ideally blocks with degree d as d+1 nodes - no edges yet
# ignore degree 1 nodes
node2block: Dict[int, int] = {} # keyed by node, vals are block id
block_members: Dict[int, Tuple[int, Set[int]]] = {} # keyed by block_id, vals: expected degree, set of members
idx = 0
block_id = 0
while idx < n - 1: # idx is node id
deg = assigned_deg[idx]
if deg == 1: # skip the degree 1 nodes
idx += 1
continue
for j in range(deg + 1): # assign deg+1 nodes to degree block of degree deg
node = idx + j
if node > n - 1: # if node > n, break
break
node2block[node] = block_id # assign node to block
if block_id not in block_members: # update block_members data structure
block_members[
block_id] = deg, set() # first item is the expected degree, second is the set of members
block_members[block_id][1].add(node)
block_id += 1 # update block id
idx += deg + 1 # skip deg + 1 nodes
# phase 1
# step 3: add edges within each affinity block by fitting a dense ER graph depending on avg cc by degree
phase1_edges = []
for block_id, (exp_deg, members) in block_members.items():
clustering_coeff = avg_cc_by_deg[exp_deg]
prob = math.pow(clustering_coeff, 1 / 3)
for u, v in combinations(members, 2):
r = random.random()
if r <= prob:
g.add_edge(u, v)
phase1_edges.append((u, v))
# phase 2
# step 4: Add edges between blocks by using excess degree. Expected degree: d_i, already incident: d_j. excess degree: d_i - d_j.
# Create a CL graph based on the excess degrees
excess_degs = {node: max(0, assigned_deg[node] - g.degree(node))
for node in g.nodes()} # dictionary of excess degs
if sum(
excess_degs.values()) % 2 != 0: # excess degs do not sum to even degrees, decrease the node with max degree by 1
max_deg_node, max_deg = max(excess_degs.items(), key=lambda x, y: y)
excess_degs[max_deg_node] -= 1 # decrease it by 1 to make the sum even
phase2_graph = nx.configuration_model(excess_degs.values(), create_using=nx.Graph())
selfloops = list(nx.selfloop_edges(phase2_graph))
phase2_graph.remove_edges_from(selfloops)
g.add_edges_from(phase2_graph.edges())
g.name = gname
g.gen_id = gen_id
return g
class CNRG(BaseGraphModel):
"""
Satyaki's Clustering-Based Node Replacement Grammars https://github.com/satyakisikdar/cnrg
"""
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='CNRG', input_graph=input_graph, trial=trial)
return
def _fit(self) -> None:
from src.cnrg.runner import get_grammar
grammar = get_grammar(self.input_graph, name=self.input_graph.name)
self.params['grammar'] = grammar
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
assert 'grammar' in self.params, 'Improper params. Grammar object is missing.'
from src.cnrg.runner import generate_graph
light_g = generate_graph(target_n=self.input_graph.order(), rule_dict=self.params['grammar'].rule_dict,
tolerance_bounds=0.01) # exact generation
g = nx.Graph()
g.add_edges_from(light_g.edges())
g.name = gname
g.gen_id = gen_id
return g
class HRG(BaseGraphModel):
"""
Sal's Hyperedge Replacement Graph Grammars https://github.com/abitofalchemy/hrg-nm
"""
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='HRG', input_graph=input_graph, trial=trial)
self.prep_environment()
return
def _fit(self) -> None:
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
pass # HRGs can generate multiple graphs at once
def _make_graph(self, graph) -> nx.Graph:
"""
This is needed since HRGs use NetworkX 1.x and that's incompatible with 2.x
:param graph:
:return:
"""
custom_g = nx.Graph()
custom_g.name = graph.name
for u, nbrs in graph.edge.items():
for v in nbrs.keys():
custom_g.add_edge(u, v)
return custom_g
def prep_environment(self) -> None:
"""
Prepare the Python environment
:return:
"""
if check_file_exists('./envs/hrg'):
return
CP.print_blue('Making virtual environment for HRG')
sub.run(
'python2 -m pip install --user virtualenv; python2 -m virtualenv -p python2 ./envs/hrg;. ./envs/hrg/bin/activate; which python2;',
shell=True,
stdout=sub.DEVNULL) # create and activate environment
if 'Linux' not in platform.platform():
completed_process = sub.run(
'export CC=gcc-9; export CXX=g++-9;. ./envs/hrg/bin/activate; python2 -m pip install -r ./envs/requirements_hrg.txt',
shell=True, stdout=sub.DEVNULL) # install requirements for cnrg
else:
completed_process = sub.run(
'. ./envs/hrg/bin/activate; python2 -m pip install -r ./envs/requirements_hrg.txt',
shell=True, stdout=sub.DEVNULL) # install requirements for cnrg
assert completed_process.returncode == 0, 'Error while creating environment for HRG'
return
def generate(self, num_graphs: int, gen_id: int) -> Union[List[nx.Graph], None]:
edgelist_path = f'./src/hrg/{self.initial_gname}_{self.trial}.g'
nx.write_edgelist(self.input_graph, edgelist_path, data=False)
output_pickle_path = f'./src/hrg/Results/{self.initial_gname}_{self.trial}_hstars.pickle'
completed_process = sub.run(
f'. ./envs/hrg/bin/activate; cd src/hrg; python2 exact_phrg.py --orig {self.initial_gname}_{self.trial}.g --trials {num_graphs}; deactivate;',
shell=True, stdout=sub.DEVNULL)
if completed_process.returncode != 0 or not check_file_exists(output_pickle_path):
CP.print_blue(f'Error in HRG: "{self.input_graph.name}"')
raise Exception('Generation failed!')
else:
generated_graphs = []
gen_graphs = load_pickle(output_pickle_path)
if not isinstance(gen_graphs, list) or len(gen_graphs) != num_graphs:
raise Exception('Generation failed!')
for i, gen_graph in enumerate(gen_graphs):
gen_graph = self._make_graph(gen_graph)
gen_graph.name = f'{self.input_graph.name}_{self.trial}_{i + 1}' # adding the number of graph
gen_graph.gen_id = gen_id
generated_graphs.append(gen_graph)
if not isinstance(generated_graphs, list) or len(generated_graphs) != num_graphs:
print('HRG failed')
raise Exception('Generation failed!')
# delete_files(edgelist_path, output_pickle_path)
return generated_graphs
class Kronecker(BaseGraphModel):
"""
Kronecker Graph Model from SNAP
"""
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='Kronecker', input_graph=input_graph, trial=trial)
if 'Linux' in platform.platform():
self.kronfit_exec = './kronfit_dac'
self.krongen_exec = './krongen_dac'
else:
self.kronfit_exec = './kronfit_mac'
self.krongen_exec = './krongen_mac'
return
def _fit(self) -> None:
"""
call KronFit
"""
output_file = f'./src/kronecker/{self.initial_gname}_{self.trial}-fit'
# write edgelist to the path, but graph needs to start from 1
g = nx.convert_node_labels_to_integers(self.input_graph, first_label=1, label_attribute='old_label')
directed_g = g.to_directed() # kronecker expects a directed graph
edgelist_path = f'./src/kronecker/{self.initial_gname}_{self.trial}.txt'
nx.write_edgelist(directed_g, edgelist_path, data=False)
bash_code = f'cd src/kronecker; {self.kronfit_exec} -i:{self.initial_gname}_{self.trial}.txt -o:{self.initial_gname}_{self.trial}-fit -s:50000'
completed_process = sub.run(bash_code, shell=True) # , stdout=sub.PIPE)
if completed_process.returncode != 0:
CP.print_blue(f'Error in KronFit: "{self.input_graph.name}"')
raise Exception('Generation failed!')
elif not check_file_exists(output_file):
CP.print_blue(f'Error in KronFit: "{self.input_graph.name}"')
raise Exception('Generation failed!')
else:
with open(output_file) as f:
last_line = f.readlines()[-1]
last_line = last_line.replace(']', '')
matrix = last_line[last_line.find('[') + 1:]
# CP.print_blue('Initiator matrix:', matrix)
self.params['initiator_matrix'] = matrix
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
"""
call KronGen
"""
orig_n = self.input_graph.order()
kron_iters = int(math.log2(orig_n)) # floor of log2 gives a bound on kronecker iteration count
if math.fabs(2 ** kron_iters - orig_n) > math.fabs(2 ** (kron_iters + 1) - orig_n):
kron_iters += 1
assert 'initiator_matrix' in self.params, 'Initiator matrix not found'
matrix = self.params['initiator_matrix']
output_file = f'./src/kronecker/{self.initial_gname}_{self.trial}_kron.txt'
if len(matrix) == 0: # KronFit failed
CP.print_blue(f'Error in KronGen: "{self.input_graph.name}"')
raise Exception('Generation failed!')
else:
bash_code = f'cd src/kronecker; ./{self.krongen_exec} -o:{self.initial_gname}_{self.trial}_kron.txt -m:"{matrix}" -i:{kron_iters}'
completed_process = sub.run(bash_code, shell=True, stdout=sub.PIPE)
if completed_process.returncode != 0 or not check_file_exists(output_file):
CP.print_blue(f'Error in KronGen: "{self.input_graph.name}"')
raise Exception('Generation failed!')
else:
graph = nx.read_edgelist(output_file, nodetype=int, create_using=nx.Graph())
graph.name = gname
delete_files(output_file)
graph.gen_id = gen_id
return graph
class SBM(BaseGraphModel):
"""
Stochastic Block Model - degree corrected
"""
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='SBM', input_graph=input_graph, trial=trial)
return
def _fit(self) -> None:
import graph_tool.all as gt # local import
gt_g = networkx_to_graphtool(self.input_graph) # convert to graphtool obj
state = gt.minimize_blockmodel_dl(gt_g) # run SBM fit
self.params['state'] = state
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
import graph_tool.all as gt # local import
assert 'state' in self.params, 'missing parameter: state for SBM'
state = self.params['state']
gen_gt_g = gt.generate_sbm(state.b.a,
gt.adjacency(state.get_bg(), state.get_ers()).T) # returns a graphtool graph
g = graphtool_to_networkx(gen_gt_g)
g.name = gname
g.gen_id = gen_id
return g
class GraphAutoEncoder(BaseGraphModel):
"""
Graph auto-encoders - AE, VAE, LinearAE, LinearVAE, DeepGAE, DeepGVAE
"""
def __init__(self, input_graph: nx.Graph, kind: str, trial: int, **kwargs) -> None:
assert kind in ('GCN_AE', 'GCN_VAE', 'Linear_AE', 'Linear_VAE', 'Deep_GCN_AE', 'Deep_GCN_VAE'), f'improper kind: {kind}'
super().__init__(model_name=kind, input_graph=input_graph, trial=trial)
return
def _fit(self) -> None:
from src.autoencoders.fit import fit_model
prob_mat, thresh_mat = fit_model(self.input_graph, model_name=self.model_name.lower())
self.params['prob_mat'] = sparse.csr_matrix(prob_mat)
self.params['thresh_mat'] = sparse.csr_matrix(thresh_mat)
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
assert 'prob_mat' in self.params, 'prob_mat not found'
assert 'thresh_mat' in self.params, 'thresh_mat not found'
g = get_graph_from_prob_matrix(self.params['thresh_mat'], thresh=0.5)
g.name = gname
g.gen_id = gen_id
return g
class GraphVAE(BaseGraphModel):
"""
Graph Variational Autoencoder - from T. Kipf
"""
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='GraphVAE', input_graph=input_graph, trial=trial)
return
def _fit(self) -> None:
from src.gae.fit import fit_vae
adj_mat = nx.adjacency_matrix(self.input_graph) # converts the graph into a sparse adj mat
prob_mat = fit_vae(adj_matrix=adj_mat)
self.params['prob_mat'] = sparse.csr_matrix(prob_mat) # turn this into a sparse CSR matrix
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
assert 'prob_mat' in self.params, 'Improper params. Prob matrix object is missing.'
g = get_graph_from_prob_matrix(self.params['prob_mat'], thresh=0.5)
g.name = gname
g.gen_id = gen_id
print(f'{gname}, {gen_id}, {g.order(), g.size()}')
return g
class GraphAE(BaseGraphModel):
"""
Graph Autoencoder - from T. Kipf
"""
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='GraphAE', input_graph=input_graph, trial=trial)
return
def _fit(self) -> None:
from src.gae.fit import fit_ae
adj_mat = nx.adjacency_matrix(self.input_graph) # converts the graph into a sparse adj mat
prob_mat = fit_ae(adj_matrix=adj_mat)
self.params['prob_mat'] = prob_mat
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
assert 'prob_mat' in self.params, 'Improper params. Prob matrix object is missing.'
g = get_graph_from_prob_matrix(self.params['prob_mat'])
g.name = gname
g.gen_id = gen_id
return g
class GraphForge(BaseGraphModel):
"""
Spectral Graph Forge by Baldesi et al
Copy 50% of the original
"""
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='GraphForge', input_graph=input_graph, trial=trial)
return
def _fit(self) -> None:
return # does not need to fit
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
g = nx.spectral_graph_forge(self.input_graph, alpha=0.5)
g.name = gname
g.gen_id = gen_id
return g
class NetGAN(BaseGraphModel):
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='NetGAN', input_graph=input_graph, trial=trial)
return
def _fit(self) -> None:
from src.netgan.fit import fit
sparse_adj = nx.to_scipy_sparse_matrix(self.input_graph)
try:
scores, tg_sum = fit(sparse_adj)
except Exception as e:
CP.print_orange(f'NetGAN fit failed\n{e}')
scores, tg_sum = None, None
self.params['scores'] = scores
self.params['tg_sum'] = tg_sum
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
from src.netgan.netgan.utils import graph_from_scores
assert 'scores' in self.params
assert 'tg_sum' in self.params
if self.params['scores'] is None or self.params['tg_sum'] is None:
CP.print_orange('NetGAN gen failed')
raise Exception('Generation failed!')
else:
gen_mat = graph_from_scores(self.params['scores'], self.params['tg_sum'])
g = nx.from_numpy_array(gen_mat, create_using=nx.Graph())
g.name = gname
g.gen_id = gen_id
return g
class _NetGAN(BaseGraphModel):
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='NetGAN', input_graph=input_graph, trial=trial)
self.prep_environment()
return
def prep_environment(self) -> None:
proc = sub.run('conda init bash; . ~/.bashrc; conda activate netgan', shell=True, stdout=sub.DEVNULL)
os.makedirs('./src/netgan/dumps', exist_ok=True) # make the directory to store the dumps
if proc.returncode == 0: # conda environment exists
return
CP.print_blue('Making conda environment for NetGAN')
proc = sub.run('conda env create -f ./envs/netgan.yml', shell=True,
stdout=sub.DEVNULL) # create and activate environment
assert proc.returncode == 0, 'Error while creating env for NetGAN'
return
def _fit(self) -> None:
dump = f'./src/netgan/dumps'
gname = f'{self.input_graph.name}_{self.trial}'
path = f'{dump}/{gname}.g'
nx.write_edgelist(self.input_graph, path, data=False)
proc = sub.run(
f'conda init bash; . ~/.bashrc; conda activate netgan; python src/netgan/fit.py {gname} {path}; conda deactivate',
shell=True) # , stderr=sub.DEVNULL)#, stdout=sub.DEVNULL)
assert proc.returncode == 0, 'NetGAN fit did not work'
assert check_file_exists(f'{dump}/{gname}.pkl.gz'), f'pickle not found at {dump}/{gname}.pkl.gz'
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
pass # NetGAN can generate multiple graphs at once
def generate(self, num_graphs: int, gen_id: int) -> List[nx.Graph]:
dump = f'./src/netgan/dumps'
gname = f'{self.input_graph.name}_{self.trial}'
pickle_path = f'{dump}/{gname}.pkl.gz'
proc = sub.run(
f'conda init bash; . ~/.bashrc; conda activate netgan; python src/netgan/gen.py {gname} {pickle_path} {num_graphs}',
shell=True) # , stdout=sub.DEVNULL)
assert proc.returncode == 0, 'error in NetGAN generate'
output_pickle_path = f'{dump}/{gname}_graphs.pkl.gz'
generated_graphs = []
for i, gen_graph in enumerate(load_pickle(output_pickle_path)):
gen_graph.name = f'{self.input_graph.name}_{self.trial}_{i + 1}' # adding the number of graph
gen_graph.gen_id = gen_id
generated_graphs.append(gen_graph)
delete_files(output_pickle_path)
return generated_graphs
class GraphRNN(BaseGraphModel):
def __init__(self, input_graph: nx.Graph, trial: int, **kwargs) -> None:
super().__init__(model_name='GraphRNN', input_graph=input_graph, trial=trial)
os.makedirs('./src/graphrnn/dumps', exist_ok=True) # make the directory to store the dumps
return
def _fit(self) -> None:
from src.graphrnn.fit import fit
graphs = []
for _ in range(10):
graphs.append(self.input_graph)
args, model, output = fit(graphs)
self.params['args'] = args
self.params['model'] = model
self.params['output'] = output
return
def _gen(self, gname: str, gen_id: int) -> nx.Graph:
from src.graphrnn.gen import gen
assert 'args' in self.params
assert 'model' in self.params
assert 'output' in self.params
gen_graphs = gen(args=self.params['args'], model=self.params['model'], output=self.params['output'])
g = gen_graphs[0] # gen_graphs is a list of graphs
g.name = gname
g.gen_id = gen_id
return g
| 37.535017
| 154
| 0.609427
|
25e1affaab88b5563c102a8de04d789f2b288a54
| 1,262
|
py
|
Python
|
seg3/map.py
|
iViolinSolo/2018MathYouKnow
|
b7027ef874c69c751ddfeac2a0b861de8d49e5b0
|
[
"Apache-2.0"
] | null | null | null |
seg3/map.py
|
iViolinSolo/2018MathYouKnow
|
b7027ef874c69c751ddfeac2a0b861de8d49e5b0
|
[
"Apache-2.0"
] | null | null | null |
seg3/map.py
|
iViolinSolo/2018MathYouKnow
|
b7027ef874c69c751ddfeac2a0b861de8d49e5b0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: violinsolo
# Created on 19/09/2018
import json
import pygal.maps.world
from country_codes import get_country_code
from pygal.style import RotateStyle
from pygal.style import LightColorizedStyle as LCS, RotateStyle as RS
# 将数据加载到一个列表中
filename = 'population_data.json'
with open(filename) as f:
pop_data = json.load(f)
# 打印每个国家2010年的人口数量
cc_populations = {}
for pop_dict in pop_data:
if pop_dict['Year'] == '2010':
country_name = pop_dict['Country Name']
population = int(float(pop_dict['Value']))
code = get_country_code(country_name)
if code:
cc_populations[code] = population
# 根据人口把国家分成3类
cc_pops_1, cc_pops_2, cc_pops_3 = {}, {}, {}
for cc, pop in cc_populations.items():
if pop < 10000000:
cc_pops_1[cc] = pop
elif pop < 1000000000:
cc_pops_2[cc] = pop
else:
cc_pops_3[cc] = pop
# 看看每组都包含多少个国家
print(len(cc_pops_1), len(cc_pops_2), len(cc_pops_3))
wm_style = RS('#336699', base_style=LCS)
wm = pygal.maps.world.World(style=wm_style)
wm.title = ('World Population in 2010, by Country')
wm.add('0-10m', cc_pops_1)
wm.add('10m-1bn', cc_pops_2)
wm.add('>1bn', cc_pops_3)
wm.render_to_file('world_population.svg')
| 25.755102
| 69
| 0.690174
|
1791a9b2b4749a94e1650ffea58a07ac0c71a649
| 1,361
|
py
|
Python
|
examples/copy_data_scripts/main.py
|
masoncusack/azmlops
|
abf9e6d394ed2930c7a40eb8863b3a45eb018db3
|
[
"MIT"
] | 1
|
2021-04-21T08:07:31.000Z
|
2021-04-21T08:07:31.000Z
|
examples/copy_data_scripts/main.py
|
masoncusack/azmlops
|
abf9e6d394ed2930c7a40eb8863b3a45eb018db3
|
[
"MIT"
] | 3
|
2021-04-29T15:04:23.000Z
|
2021-05-12T16:01:19.000Z
|
examples/copy_data_scripts/main.py
|
masoncusack/azmlops
|
abf9e6d394ed2930c7a40eb8863b3a45eb018db3
|
[
"MIT"
] | 1
|
2021-04-21T14:14:25.000Z
|
2021-04-21T14:14:25.000Z
|
from azureml.core import Run
import argparse
from os import makedirs, path
from shutil import copyfile
def copy_data(input_file_path, output_file_path, run):
"""
Copy input file to output file
"""
run.log("copy_data Job", 1)
with open(input_file_path, 'r') as reader:
print(f"Input file Data: {reader.read()}")
makedirs(path.dirname(output_file_path), exist_ok=True)
copyfile(input_file_path, output_file_path)
run.log("copy_data Job", 2)
if __name__ == "__main__":
RUN = Run.get_context()
# Get Parameters
PARSER = argparse.ArgumentParser("job")
PARSER.add_argument("--input_path", type=str, help="input data", required=True)
PARSER.add_argument("--output_path", type=str, help="output data", required=True)
PARSER.add_argument("--input_file", type=str, help="input file name", required=True)
PARSER.add_argument("--output_file", type=str, help="output file name", required=True)
ARGS = PARSER.parse_args()
# Prepare full file paths
input_file_path = f"{ARGS.input_path}/{ARGS.input_file}"
output_file_path = f"{ARGS.output_path}/{ARGS.output_file}"
print(f"Input file: {input_file_path}")
print(f"Output file: {output_file_path}")
# Call job entry point
copy_data(input_file_path, output_file_path, RUN)
RUN.complete()
| 30.931818
| 90
| 0.688464
|
0ef82b4db2380fea047a7080ebc2d014e52b885a
| 1,804
|
py
|
Python
|
fast_ft/server.py
|
practice9420/fast-ft
|
e0e5f2553bdd74a6770ccd3e2b8cf31abc56f6c9
|
[
"MIT"
] | 4
|
2021-02-08T07:20:27.000Z
|
2021-07-04T10:25:51.000Z
|
fast_ft/server.py
|
practice9420/fast-ft
|
e0e5f2553bdd74a6770ccd3e2b8cf31abc56f6c9
|
[
"MIT"
] | null | null | null |
fast_ft/server.py
|
practice9420/fast-ft
|
e0e5f2553bdd74a6770ccd3e2b8cf31abc56f6c9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
import os
import sys
from pathlib import Path
# 打包后添加当前目录到环境变量以便导入项目中的包
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from settings import Config
from fast_ft import create_app, socket_server
from utils.make_qrcode import get_inner_ip, open_browser, make_qrcode_
from utils.health_examination import net_is_used
from utils.process_argv import process_argv
# 项目根路径
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# 上传路径
upload = os.path.join(BASE_DIR, "upload/{}")
def main():
kwargs = process_argv(sys.argv[1:])
host = kwargs.get("host") if kwargs.get("host") else "0.0.0.0"
port = int(kwargs.get("port")) if kwargs.get("port") else 5000
# 检查 上传目录是否存在 不存在就创建
if not Path(upload.format("")).exists():
os.mkdir(upload.format(""))
# 生成二维码
inner_ip = get_inner_ip()
Config.global_inner_ip = inner_ip
if net_is_used(port, inner_ip):
for i in range(10):
port += 1
if not net_is_used(port, inner_ip):
break
Config.global_port = port
make_url = "http://{}:{}".format(inner_ip, port)
save_path = os.path.join(BASE_DIR, "static/images/qrcode/")
make_qrcode_(make_url=make_url, save_path=save_path, qrcode_name="{}.png".format(inner_ip))
# 自动打开浏览器
if kwargs.get("open_browser", True):
open_url = "http://{}:{}".format(inner_ip, port)
open_browser(open_url)
app = create_app(debug=True)
# app.run(host=host, port=port)
socket_server.run(app, host=host, port=port)
# -----------------------------------------------------------------------------
# Main entry point
# -----------------------------------------------------------------------------
if __name__ == "__main__" or "__main__" in sys.argv:
main()
| 32.214286
| 95
| 0.620288
|
fa05e2f355f3aba63ebe75f263e757aaba2f7d0f
| 9,561
|
py
|
Python
|
trace-viewer/third_party/web_dev_style/web_dev_style/css_checker.py
|
yinquan529/platform-external-chromium-trace
|
8252ae6b83ea65cf871e7981e981da07379f5a0f
|
[
"BSD-3-Clause"
] | 1
|
2019-01-22T02:36:35.000Z
|
2019-01-22T02:36:35.000Z
|
trace-viewer/third_party/web_dev_style/web_dev_style/css_checker.py
|
yinquan529/platform-external-chromium-trace
|
8252ae6b83ea65cf871e7981e981da07379f5a0f
|
[
"BSD-3-Clause"
] | null | null | null |
trace-viewer/third_party/web_dev_style/web_dev_style/css_checker.py
|
yinquan529/platform-external-chromium-trace
|
8252ae6b83ea65cf871e7981e981da07379f5a0f
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium WebUI resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl/git cl, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
# TODO(dbeam): Real CSS parser? pycss? http://code.google.com/p/pycss/
class CSSChecker(object):
def __init__(self, input_api, output_api, file_filter=None):
self.input_api = input_api
self.output_api = output_api
self.file_filter = file_filter
def RunChecks(self):
# We use this a lot, so make a nick name variable.
re = self.input_api.re
def _collapseable_hex(s):
return (len(s) == 6 and s[0] == s[1] and s[2] == s[3] and s[4] == s[5])
def _is_gray(s):
return s[0] == s[1] == s[2] if len(s) == 3 else s[0:2] == s[2:4] == s[4:6]
def _remove_all(s):
return _remove_grit(_remove_ats(_remove_comments(s)))
def _remove_ats(s):
return re.sub(re.compile(r'@\w+.*?{(.*{.*?})+.*?}', re.DOTALL), '\\1', s)
def _remove_comments(s):
return re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', s)
def _remove_grit(s):
grit_reg = r'<if[^>]+>.*?<\s*/\s*if[^>]*>|<include[^>]+>'
return re.sub(re.compile(grit_reg, re.DOTALL), '', s)
def _rgb_from_hex(s):
if len(s) == 3:
r, g, b = s[0] + s[0], s[1] + s[1], s[2] + s[2]
else:
r, g, b = s[0:2], s[2:4], s[4:6]
return int(r, base=16), int(g, base=16), int(b, base=16)
def alphabetize_props(contents):
errors = []
for rule in re.finditer(r'{(.*?)}', contents, re.DOTALL):
semis = map(lambda t: t.strip(), rule.group(1).split(';'))[:-1]
rules = filter(lambda r: ': ' in r, semis)
props = map(lambda r: r[0:r.find(':')], rules)
if props != sorted(props):
errors.append(' %s;\n' % (';\n '.join(rules)))
return errors
def braces_have_space_before_and_nothing_after(line):
return re.search(r'(?:^|\S){|{\s*\S+\s*$', line)
def classes_use_dashes(line):
# Intentionally dumbed down version of CSS 2.1 grammar for class without
# non-ASCII, escape chars, or whitespace.
m = re.search(r'\.(-?[_a-zA-Z0-9-]+).*[,{]\s*$', line)
return (m and (m.group(1).lower() != m.group(1) or
m.group(1).find('_') >= 0))
# Ignore single frames in a @keyframe, i.e. 0% { margin: 50px; }
frame_reg = r'\s*\d+%\s*{\s*[_a-zA-Z0-9-]+:(\s*[_a-zA-Z0-9-]+)+\s*;\s*}\s*'
def close_brace_on_new_line(line):
return (line.find('}') >= 0 and re.search(r'[^ }]', line) and
not re.match(frame_reg, line))
def colons_have_space_after(line):
return re.search(r'(?<!data):(?!//)\S[^;]+;\s*', line)
def favor_single_quotes(line):
return line.find('"') >= 0
# Shared between hex_could_be_shorter and rgb_if_not_gray.
hex_reg = (r'#([a-fA-F0-9]{3}|[a-fA-F0-9]{6})(?=[^_a-zA-Z0-9-]|$)'
r'(?!.*(?:{.*|,\s*)$)')
def hex_could_be_shorter(line):
m = re.search(hex_reg, line)
return (m and _is_gray(m.group(1)) and _collapseable_hex(m.group(1)))
small_seconds = r'(?:^|[^_a-zA-Z0-9-])(0?\.[0-9]+)s(?!-?[_a-zA-Z0-9-])'
def milliseconds_for_small_times(line):
return re.search(small_seconds, line)
def no_data_uris_in_source_files(line):
return re.search(r'\(\s*\'?\s*data:', line)
def one_rule_per_line(line):
return re.search(r'[_a-zA-Z0-9-](?<!data):(?!//)[^;]+;\s*[^ }]\s*', line)
any_reg = re.compile(r':(?:-webkit-)?any\(.*?\)', re.DOTALL)
multi_sels = re.compile(r'(?:}[\n\s]*)?([^,]+,(?=[^{}]+?{).*[,{])\s*$',
re.MULTILINE)
def one_selector_per_line(contents):
errors = []
for b in re.finditer(multi_sels, re.sub(any_reg, '', contents)):
errors.append(' ' + b.group(1).strip().splitlines()[-1:][0])
return errors
def rgb_if_not_gray(line):
m = re.search(hex_reg, line)
return (m and not _is_gray(m.group(1)))
def suggest_ms_from_s(line):
ms = int(float(re.search(small_seconds, line).group(1)) * 1000)
return ' (replace with %dms)' % ms
def suggest_rgb_from_hex(line):
suggestions = ['rgb(%d, %d, %d)' % _rgb_from_hex(h.group(1))
for h in re.finditer(hex_reg, line)]
return ' (replace with %s)' % ', '.join(suggestions)
def suggest_short_hex(line):
h = re.search(hex_reg, line).group(1)
return ' (replace with #%s)' % (h[0] + h[2] + h[4])
hsl = r'hsl\([^\)]*(?:[, ]|(?<=\())(?:0?\.?)?0%'
zeros = (r'^.*(?:^|\D)'
r'(?:\.0|0(?:\.0?|px|em|%|in|cm|mm|pc|pt|ex|deg|g?rad|m?s|k?hz))'
r'(?:\D|$)(?=[^{}]+?}).*$')
def zero_length_values(contents):
errors = []
for z in re.finditer(re.compile(zeros, re.MULTILINE), contents):
first_line = z.group(0).strip().splitlines()[0]
if not re.search(hsl, first_line):
errors.append(' ' + first_line)
return errors
added_or_modified_files_checks = [
{ 'desc': 'Alphabetize properties and list vendor specific (i.e. '
'-webkit) above standard.',
'test': alphabetize_props,
'multiline': True,
},
{ 'desc': 'Start braces ({) end a selector, have a space before them '
'and no rules after.',
'test': braces_have_space_before_and_nothing_after,
},
{ 'desc': 'Classes use .dash-form.',
'test': classes_use_dashes,
},
{ 'desc': 'Always put a rule closing brace (}) on a new line.',
'test': close_brace_on_new_line,
},
{ 'desc': 'Colons (:) should have a space after them.',
'test': colons_have_space_after,
},
{ 'desc': 'Use single quotes (\') instead of double quotes (") in '
'strings.',
'test': favor_single_quotes,
},
{ 'desc': 'Use abbreviated hex (#rgb) when in form #rrggbb.',
'test': hex_could_be_shorter,
'after': suggest_short_hex,
},
{ 'desc': 'Use milliseconds for time measurements under 1 second.',
'test': milliseconds_for_small_times,
'after': suggest_ms_from_s,
},
{ 'desc': 'Don\'t use data URIs in source files. Use grit instead.',
'test': no_data_uris_in_source_files,
},
{ 'desc': 'One rule per line (what not to do: color: red; margin: 0;).',
'test': one_rule_per_line,
},
{ 'desc': 'One selector per line (what not to do: a, b {}).',
'test': one_selector_per_line,
'multiline': True,
},
{ 'desc': 'Use rgb() over #hex when not a shade of gray (like #333).',
'test': rgb_if_not_gray,
'after': suggest_rgb_from_hex,
},
{ 'desc': 'Make all zero length terms (i.e. 0px) 0 unless inside of '
'hsl() or part of @keyframe.',
'test': zero_length_values,
'multiline': True,
},
]
results = []
try: # Workaround AffectedFiles exploding on deleted files.
affected_files = self.input_api.AffectedFiles(include_deletes=False,
file_filter=self.file_filter)
except:
affected_files = []
files = []
for f in affected_files:
# Remove all /*comments*/, @at-keywords, and grit <if|include> tags; we're
# not using a real parser. TODO(dbeam): Check alpha in <if> blocks.
file_contents = _remove_all('\n'.join(f.NewContents()))
files.append((f.LocalPath(), file_contents))
# Only look at CSS files for now.
for f in filter(lambda f: f[0].endswith('.css'), files):
file_errors = []
for check in added_or_modified_files_checks:
# If the check is multiline, it receieves the whole file and gives us
# back a list of things wrong. If the check isn't multiline, we pass it
# each line and the check returns something truthy if there's an issue.
if ('multiline' in check and check['multiline']):
check_errors = check['test'](f[1])
if len(check_errors) > 0:
# There are currently no multiline checks with ['after'].
file_errors.append('- %s\n%s' %
(check['desc'], '\n'.join(check_errors).rstrip()))
else:
check_errors = []
lines = f[1].splitlines()
for lnum in range(0, len(lines)):
line = lines[lnum]
if check['test'](line):
error = ' ' + line.strip()
if 'after' in check:
error += check['after'](line)
check_errors.append(error)
if len(check_errors) > 0:
file_errors.append('- %s\n%s' %
(check['desc'], '\n'.join(check_errors)))
if file_errors:
results.append(self.output_api.PresubmitPromptWarning(
'%s:\n%s' % (f[0], '\n\n'.join(file_errors))))
if results:
# Add your name if you're here often mucking around in the code.
authors = ['dbeam@chromium.org']
results.append(self.output_api.PresubmitNotifyResult(
'Was the CSS checker useful? Send feedback or hate mail to %s.' %
', '.join(authors)))
return results
| 39.672199
| 81
| 0.559042
|
809624890ea90a40d1ea56404d6663aab15f6bde
| 4,574
|
py
|
Python
|
core/rawlint.py
|
armoha/python-lnp
|
aaee5582e4024f839b8155360292a427fc5639e2
|
[
"0BSD"
] | null | null | null |
core/rawlint.py
|
armoha/python-lnp
|
aaee5582e4024f839b8155360292a427fc5639e2
|
[
"0BSD"
] | null | null | null |
core/rawlint.py
|
armoha/python-lnp
|
aaee5582e4024f839b8155360292a427fc5639e2
|
[
"0BSD"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Linter for raw files. Ported from Lethosor's Lua script:
https://github.com/lethosor/dfhack-scripts/blob/master/raw-lint.lua"""
from __future__ import print_function, unicode_literals, absolute_import
import os
from .dfraw import DFRaw
from . import log
# TODO: Handle older versions correctly
# For example, 40d and earlier use object names MATGLOSS and DESCRIPTOR
valid_objnames = [
'BODY_DETAIL_PLAN',
'BODY',
'BUILDING',
'CREATURE_VARIATION',
'CREATURE',
'DESCRIPTOR_COLOR',
'DESCRIPTOR_PATTERN',
'DESCRIPTOR_SHAPE',
'ENTITY',
'INORGANIC',
'INTERACTION',
'ITEM',
'LANGUAGE',
'MATERIAL_TEMPLATE',
'PLANT',
'REACTION',
'TISSUE_TEMPLATE',
]
objname_overrides = {
'b_detail_plan': 'BODY_DETAIL_PLAN',
'c_variation': 'CREATURE_VARIATION',
}
def check_file(path):
"""Validates the raw file located at <path>. Error details are printed to
the log with level WARNING. Returns True/False."""
# pylint:disable=too-many-branches
file_ok = True
if not path.endswith('.txt'):
log.w('Unrecognized filename')
return False
contents = DFRaw.read(path)
filename = os.path.basename(path)[:-4]
try:
realname = contents.splitlines()[0]
except IndexError:
realname = ''
try:
rawname = realname.split()[0]
except IndexError:
rawname = realname
# Everything before first whitespace must match filename
if not (realname == realname.lstrip() and rawname == filename):
log.w('Name mismatch: expected %s, found %s' % (filename, rawname))
file_ok = False
objname = filename
check_objnames = []
for k, v in objname_overrides.items():
if filename.startswith(k) and v in valid_objnames:
check_objnames.append(v)
for o in valid_objnames:
if filename.upper().startswith(o):
check_objnames.append(o)
if check_objnames:
found = False
for i, objname in enumerate(check_objnames):
objname = '[OBJECT:' + objname.upper() + ']'
if objname in contents:
found = True
check_objnames[i] = objname
if not found:
log.w('None of %s found' % ', '.join(check_objnames))
file_ok = False
else:
log.w('No valid object names')
file_ok = False
return file_ok
def check_folder(path):
"""Validates all raw files in <path> and its subfolders. Problems with
individual files are printed to the log with level WARNING. General problems
are printed to the log with level ERROR.
Returns:
(passed, failed)
two lists of paths of files that passed or failed, respectively"""
log.push_prefix('RawLint')
files = []
for d in os.walk(path):
files += [os.path.join(d[0], f) for f in d[2]]
passed = []
failed = []
if not files:
log.e('Could not find any files in '+path)
for f in files:
f_parts = f.split(os.sep)
if (f.endswith('.txt') and 'notes' not in f_parts and
'examples and notes' not in f_parts and 'text' not in f_parts):
log.push_prefix(f)
has_passed = check_file(f)
log.pop_prefix()
if has_passed:
passed.append(f)
else:
failed.append(f)
log.pop_prefix()
return (passed, failed)
def check_df(path):
"""Validates the raw/objects folder in the Dwarf Fortress folder located at
<path>. Problem with individual files are printed to the log with level
WARNING. General problems are printed to the log with level ERROR.
Returns:
(passed, failed)
two lists of paths of files that passed or failed, respectively"""
return check_folder(os.path.join(path, 'raw', 'objects'))
def check_folder_bool(path):
"""Returns True if all raw files in <path> pass validation. Problems with
individual files are printed to the log with level WARNING. General
problems are printed to the log with level ERROR."""
p, f = check_folder(path)
return len(f) == 0 and len(p) != 0
def check_df_bool(path):
"""Validates the raw/objects folder in the Dwarf Fortress folder located at
<path> and returns True if all files pass validation. Problems with
individual files are printed to the log with level WARNING. General
problems are printed to the log with level ERROR."""
p, f = check_df(path)
return len(f) == 0 and len(p) != 0
| 33.144928
| 80
| 0.636861
|
c1df5fda640d09ca221ab1f93f0546c68274ac3b
| 11,223
|
py
|
Python
|
tools/Polygraphy/polygraphy/backend/trt/runner.py
|
spradius/TensorRT
|
eb5de99b523c76c2f3ae997855ad86d3a1e86a31
|
[
"Apache-2.0"
] | 1
|
2021-08-23T01:15:16.000Z
|
2021-08-23T01:15:16.000Z
|
tools/Polygraphy/polygraphy/backend/trt/runner.py
|
spradius/TensorRT
|
eb5de99b523c76c2f3ae997855ad86d3a1e86a31
|
[
"Apache-2.0"
] | null | null | null |
tools/Polygraphy/polygraphy/backend/trt/runner.py
|
spradius/TensorRT
|
eb5de99b523c76c2f3ae997855ad86d3a1e86a31
|
[
"Apache-2.0"
] | 1
|
2022-03-29T12:39:29.000Z
|
2022-03-29T12:39:29.000Z
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import copy
import time
from collections import OrderedDict
from polygraphy import cuda, func, mod, util
from polygraphy.backend.base import BaseRunner
from polygraphy.backend.trt import util as trt_util
from polygraphy.logger import G_LOGGER
np = mod.lazy_import("numpy")
trt = mod.lazy_import("tensorrt")
@mod.export()
class TrtRunner(BaseRunner):
"""
Runs inference using TensorRT.
Note that runners are not designed for production deployment and should generally
be used only for prototyping, testing, and debugging.
"""
def __init__(self, engine, name=None):
"""
Args:
engine (Callable() -> Union[trt.ICudaEngine, trt.IExecutionContext]):
A callable that can supply either a TensorRT engine or execution context.
If an engine is provided, the runner will create a context automatically.
This callable is invoked whenever the runner is activated.
Alternatively, the engine or context may be supplied directly instead of
through a callable, in which case the runner will *not* take ownership of it,
and therefore will not destroy it.
name (str):
The human-readable name prefix to use for this runner.
A runner count and timestamp will be appended to this prefix.
"""
super().__init__(name=name, prefix="trt-runner")
self._engine_or_context = engine
@func.constantmethod
def get_input_metadata_impl(self):
start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
# This function always uses binding names of the 0th profile.
return trt_util.get_input_metadata_from_engine(self.context.engine, start_binding, end_binding)
def activate_impl(self):
def make_buffers(engine):
"""
Creates empty host and device buffers for the specified engine.
Always uses binding names from Profile 0.
"""
device_buffers = OrderedDict()
host_output_buffers = OrderedDict()
for idx in range(trt_util.get_bindings_per_profile(engine)):
binding = engine[idx]
dtype = trt_util.np_dtype_from_trt(engine.get_binding_dtype(binding))
device_buffers[binding] = cuda.DeviceArray(dtype=dtype)
if not engine.binding_is_input(binding):
host_output_buffers[binding] = np.empty(shape=tuple(), dtype=dtype)
G_LOGGER.extra_verbose("Created device buffers: {:}".format(device_buffers))
return device_buffers, host_output_buffers
engine_or_context, owning = util.invoke_if_callable(self._engine_or_context)
if isinstance(engine_or_context, trt.ICudaEngine):
self.engine = engine_or_context
self.owns_engine = owning
self.context = self.engine.create_execution_context()
self.owns_context = True
if not self.context:
G_LOGGER.critical("Invalid Context. See error log for details.")
elif isinstance(engine_or_context, trt.IExecutionContext):
self.engine = None
self.owns_engine = False
self.context = engine_or_context
self.owns_context = owning
else:
G_LOGGER.critical(
"Invalid Engine or Context. Please ensure the engine was built correctly. See error log for details."
)
if not owning:
G_LOGGER.verbose(
"Object was provided directly instead of via a Callable. This runner will not assume ownership. "
"Please ensure it is freed."
)
self.device_buffers, self.host_output_buffers = make_buffers(self.context.engine)
self.stream = cuda.Stream()
def set_profile(self, index):
"""
Sets the active optimization profile for this runner.
The runner must already be active (see ``__enter__()`` or ``activate()``).
This only applies if your engine was built with multiple
optimization profiles.
In TensorRT 8.0 and newer, the profile will be set asynchronously
using this runner's CUDA stream (``runner.stream``).
By default, the runner uses the first profile (profile 0).
Args:
index (int):
The index of the optimization profile to use.
"""
if not self.is_active:
G_LOGGER.critical("{:35} | Must be activated prior to calling set_profile()".format(self.name))
try:
self.context.set_optimization_profile_async
except AttributeError:
self.context.active_optimization_profile = index
else:
self.context.set_optimization_profile_async(index, self.stream.ptr)
def _set_shapes_from_feed_dict(self, feed_dict):
"""
Sets context shapes according to the provided feed_dict.
Note that ``infer()`` will call this function automatically, and hence
you should only use it if you plan to use this runner's context manually.
Args:
feed_dict (OrderedDict[str, numpy.ndarray]):
A mapping of input tensor names to corresponding input NumPy arrays.
Returns:
Tuple[int, int]: The start and end binding indices of the modified bindings.
"""
def is_dynamic_shape_input(binding):
try:
self.context.engine.get_profile_shape_input(0, binding)
return True
except RuntimeError:
return False
start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
for name, inp in feed_dict.items():
binding = start_binding + self.context.engine[name]
# Only set shapes if required.
# get_shape/get_binding_shape will return what a shape input/data input is currently set to.
if is_dynamic_shape_input(binding): # For input shape tensors
if isinstance(inp, cuda.DeviceView):
G_LOGGER.critical(
"A DeviceView was provided for input: {:}, but since this is a "
"shape tensor, it must reside in host memory. "
"Please use a NumPy array instead. ".format(name)
)
if tuple(self.context.get_shape(binding)) != tuple(inp):
G_LOGGER.verbose("Setting shape binding: {:} (index: {:}) to: {:}".format(name, binding, inp))
self.context.set_shape_input(binding, inp)
elif util.is_shape_dynamic(self.context.engine.get_binding_shape(binding)):
shape = inp.shape
if tuple(self.context.get_binding_shape(binding)) != tuple(shape):
G_LOGGER.verbose("Setting binding: {:} (index: {:}) to shape: {:}".format(name, binding, shape))
self.context.set_binding_shape(binding, shape)
if not self.context.all_binding_shapes_specified:
G_LOGGER.critical(
"Some input shapes were not specified.\n"
"Note: Network inputs are: {:}".format(self.get_input_metadata())
)
if not self.context.all_shape_inputs_specified:
G_LOGGER.critical(
"Some shape inputs were not specified.\n"
"Note: Network inputs are: {:}".format(self.get_input_metadata())
)
return start_binding, end_binding
def infer_impl(self, feed_dict):
start_binding, end_binding = self._set_shapes_from_feed_dict(feed_dict)
# Resize output device buffers - host buffers will be automatically resized by copy_to
for binding in range(start_binding, end_binding):
if not self.context.engine.binding_is_input(binding):
name = self.context.engine[binding - start_binding] # Use profile 0 binding names for all buffers.
shape = tuple(self.context.get_binding_shape(binding))
self.device_buffers[name].resize(shape)
start = time.time()
# Use a shallow copy in case we need to replace our allocated buffers with provided DeviceViews.
dev_bufs = copy.copy(self.device_buffers)
for name, buffer in feed_dict.items():
if isinstance(buffer, cuda.DeviceView):
dev_bufs[name] = buffer
elif isinstance(buffer, np.ndarray):
dev_bufs[name].copy_from(buffer, self.stream)
else:
G_LOGGER.critical(
"Unrecognized type in feed_dict: {:} for input: {:}.\n"
"Please provide either a NumPy array or Polygraphy DeviceView. ".format(type(buffer).__name__, name)
)
# Need to offset bindings in case the active profile is not 0.
bindings = [0] * start_binding + [buf.ptr for buf in dev_bufs.values()]
success = self.context.execute_async_v2(bindings=bindings, stream_handle=self.stream.ptr)
if not success:
G_LOGGER.critical("Model execution failed. Please see the log messages above for details")
for name, buffer in self.host_output_buffers.items():
self.host_output_buffers[name] = dev_bufs[name].copy_to(buffer, self.stream)
self.stream.synchronize()
end = time.time()
self.inference_time = end - start
return self.host_output_buffers
def deactivate_impl(self):
with contextlib.ExitStack() as stack:
if self.owns_engine:
stack.enter_context(self.engine)
if self.owns_context:
stack.enter_context(self.context)
[buf.free() for buf in self.device_buffers.values()]
self.stream.free()
del (
self.engine,
self.owns_engine,
self.context,
self.owns_context,
self.device_buffers,
self.host_output_buffers,
self.stream,
)
# Note: This can be removed once TRT 6 support is dropped.
def infer(self, feed_dict, check_inputs=None):
# Disable checks by default on TRT 6.0 due to implicit batch semantics.
if mod.version(trt.__version__) < mod.version("7.0"):
return super().infer(feed_dict, util.default(check_inputs, False))
return super().infer(feed_dict, util.default(check_inputs, True))
| 42.673004
| 120
| 0.632451
|
18855288c565f93dab59ce8b6a217d27394588dc
| 1,833
|
py
|
Python
|
neutron_plugin_contrail/tests/test_plugins_opencontrail_quota_driver.py
|
alexelshamouty/tf-neutron-plugin
|
3effc5e80f3fa0d8d0252d5f994a36386b987f7f
|
[
"Apache-2.0"
] | 3
|
2021-09-07T05:02:24.000Z
|
2022-02-11T04:25:43.000Z
|
neutron_plugin_contrail/tests/test_plugins_opencontrail_quota_driver.py
|
alexelshamouty/tf-neutron-plugin
|
3effc5e80f3fa0d8d0252d5f994a36386b987f7f
|
[
"Apache-2.0"
] | 1
|
2021-09-27T08:05:08.000Z
|
2021-09-27T08:05:08.000Z
|
neutron_plugin_contrail/tests/test_plugins_opencontrail_quota_driver.py
|
alexelshamouty/tf-neutron-plugin
|
3effc5e80f3fa0d8d0252d5f994a36386b987f7f
|
[
"Apache-2.0"
] | 5
|
2020-07-14T07:52:05.000Z
|
2022-03-24T15:08:02.000Z
|
import mock
import unittest
from neutron_plugin_contrail.plugins.opencontrail.quota.driver import QuotaDriver
class ContrailPluginQuotaDriverTest(unittest.TestCase):
def setUp(self):
print("setup quota")
def test_testenv(self):
print("testenv quota ok")
def test_get_tenant_quotas_arg(self):
"""Call neutron_plugin_contrail.plugins.opencontrail.quota.driver.QuotaDriver.[ _get_quotas, get_all_quotas ]"""
class MockContext():
tenant_id = 'f00dbeef012f411b89d68928ee8703ee'
class MockResource():
name = 'default'
default = -1
def __init__(self, name='default', default=-1):
self.name = name
self.default = default
driver = QuotaDriver()
ctx = MockContext()
foo_quotas = {'network': 5}
default_quotas = {'network': MockResource('network', 5)}
target_tenant = 'f00dbeef012f411b89d68928ee8703ee'
# TODO: restore next block - there is no such method _get_quotas
# and conf is not mocked
return
with mock.patch.object(QuotaDriver,
'get_tenant_quotas',
return_value=foo_quotas) as get_tenant_quotas:
quotas = driver._get_quotas(ctx,
default_quotas,
['network'])
self.assertEqual(quotas, foo_quotas)
quotas = driver.get_all_quotas(ctx,
default_quotas)
self.assertEqual(quotas[0], foo_quotas)
get_tenant_quotas.assert_called_once_with(ctx,
default_quotas,
target_tenant)
| 33.944444
| 120
| 0.554283
|
2db6ca8d1a677d7b043d93c3607206d047339b96
| 606
|
py
|
Python
|
var/spack/repos/builtin/packages/perl-term-readline-gnu/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/perl-term-readline-gnu/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/perl-term-readline-gnu/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlTermReadlineGnu(PerlPackage):
"""Perl extension for the GNU Readline/History Library."""
homepage = "https://metacpan.org/pod/Term::ReadLine::Gnu"
url = "https://cpan.metacpan.org/authors/id/H/HA/HAYASHI/Term-ReadLine-Gnu-1.36.tar.gz"
version('1.36', sha256='9a08f7a4013c9b865541c10dbba1210779eb9128b961250b746d26702bab6925')
depends_on('readline')
| 35.647059
| 96
| 0.747525
|
ca5355b7bd7804ad997c7576d81d6a659e3cb9ca
| 3,178
|
py
|
Python
|
tasks/transform_classification_groups.py
|
reidmv/reidmv-pe_architecture
|
33317df518d8d80ef8f7d7a47096c522eb196809
|
[
"Apache-2.0"
] | null | null | null |
tasks/transform_classification_groups.py
|
reidmv/reidmv-pe_architecture
|
33317df518d8d80ef8f7d7a47096c522eb196809
|
[
"Apache-2.0"
] | null | null | null |
tasks/transform_classification_groups.py
|
reidmv/reidmv-pe_architecture
|
33317df518d8d80ef8f7d7a47096c522eb196809
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""This module takes two classification outputs from source and targer puppet infrastructure and
takes the user defintions from the source and adds them to the infrastructure defintions of the
target. Allowing the ability to restore a backup of user node definitions"""
import json
import sys
params = json.load(sys.stdin)
source_classification_file = params['source_directory']+"/classification_backup.json"
target_classification_file = params['working_directory']+"/classification_backup.json"
transformed_classification_file = params['working_directory']+"/transformed_classification.json"
def removesubgroups(data_rsg,id_rsg):
"""
This definition allows us to traverse recursively down the json groups finding all children of
the pe infrastructure and to remove them.
Inputs are the resource group and parent ID of the resource groups
Returns
-------
data_rsg : list
The resource groups which did not have the parent ID
"""
groups = list(filter(lambda x:x ["parent"]==id_rsg,data_rsg))
for group in groups:
subid = group["id"]
data_rsg = list(filter(lambda x:x ["id"]!=subid,data_rsg)) # pylint: disable=cell-var-from-loop
data_rsg = removesubgroups(data_rsg,subid)
return data_rsg
# This defintion allows us to traverse down the pe inf tree and find all groups
def addsubgroups(data_asg,id_asg,peinf_asg):
"""
This definition allows us to traverse recursively down the json groups finding all groups in
the pe infrastructure tree and adding them to a list recursively and then returning the list.
Inputs are the list of all resource groups, infrastructure resource groups found so far and
parent ID of infrastructure groups
Returns
-------
data_asg : list
The list of resource groups of pe infrastructure groups at source
"""
groups = list(filter(lambda x:x ["parent"]==id_asg,data_asg))
peinf_asg = peinf_asg + groups
for group in groups:
subid = group["id"]
peinf_asg = addsubgroups(data_asg,subid,peinf_asg)
return peinf_asg
# open the backup classification
with open(source_classification_file) as data_file:
data = json.load(data_file)
# open the DR server classification
with open(target_classification_file) as data_fileDR:
data_DR = json.load(data_fileDR)
# find the infrastructure group and its ID
peinf = list(filter(lambda x:x ["name"]=="PE Infrastructure",data))
group_id = peinf[0]["id"]
# remove this group from the list and recursively remove all sub groups
data = list(filter(lambda x:x ["id"]!=group_id,data))
data = removesubgroups(data,group_id)
# find the dr infrastructure group and its ID
peinf_DR = list(filter(lambda x:x ["name"]=="PE Infrastructure",data_DR))
id_DR = peinf_DR[0]["id"]
# Recursively go through inf groups to get the full tree
peinf_DR = addsubgroups(data_DR,id_DR,peinf_DR)
# Add the contents of the backup classification without pe inf to the DR pe inf groups
# and write to a file
peinf_transformed_groups = data + peinf_DR
with open(transformed_classification_file, 'w') as fp:
json.dump(peinf_transformed_groups, fp)
| 40.227848
| 103
| 0.741976
|
a64c2a1c46f3b4c8efb174e0dc082f3b368a4228
| 1,094
|
py
|
Python
|
meiduo/meiduo/apps/users/utils.py
|
q934744153/meiduo_mall
|
6f5b0ae38cc80c21ddca161c428fea09584d4b95
|
[
"MIT"
] | null | null | null |
meiduo/meiduo/apps/users/utils.py
|
q934744153/meiduo_mall
|
6f5b0ae38cc80c21ddca161c428fea09584d4b95
|
[
"MIT"
] | null | null | null |
meiduo/meiduo/apps/users/utils.py
|
q934744153/meiduo_mall
|
6f5b0ae38cc80c21ddca161c428fea09584d4b95
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.backends import ModelBackend
from django.db.models import Q
from users.models import User
from django.contrib.auth.mixins import LoginRequiredMixin
from django import http
from django.conf import settings
from meiduo.utils.secret import SecretOauth
class UsernameMobileAuthBackend(ModelBackend):
def authenticate(self,request,username = None,password=None,**kwargs):
try:
user = User.objects.get(Q(username=username)|Q(mobile=username))
except User.DoesNotExist as e:
return None
if user.check_password(password):
return user
class LoginRequiredJSONMixin(LoginRequiredMixin):
def handle_no_permission(self):
return http.JsonResponse({'code': 400, 'errmsg': '用户未登录'})
def generate_verify_email_url(request):
# 1.user_id email
data_dict = {
'user_id': request.user.id,
'email': request.user.email
}
# 2.将参数加密
dumps_data = SecretOauth().dumps(data_dict)
# 3.拼接 完整的激活路由
verify_url = settings.EMAIL_VERIFY_URL + dumps_data
return verify_url
| 28.051282
| 76
| 0.712066
|
e556786e37fc3aebbb80cf3e219ee7817e3bceac
| 1,115
|
py
|
Python
|
cfg/base/GlobalConfig.py
|
vancebs/EasyCoding3
|
e78db51096ba07ef1b274becca4817a00ca0ef31
|
[
"Apache-2.0"
] | null | null | null |
cfg/base/GlobalConfig.py
|
vancebs/EasyCoding3
|
e78db51096ba07ef1b274becca4817a00ca0ef31
|
[
"Apache-2.0"
] | null | null | null |
cfg/base/GlobalConfig.py
|
vancebs/EasyCoding3
|
e78db51096ba07ef1b274becca4817a00ca0ef31
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# coding=utf-8
##########################################
# Easy Coding 3 global config File #
##########################################
class GlobalConfig(object):
# The dir where you keep all your projects
cfgGlobalBaseDir = '/home/hufan/platforms'
# URL where you download your project
# Example:
# URL: "ssh://fan.hu@172.16.11.162:29418/quic/manifests.git"
# SET: "ssh://fan.hu@172.16.11.162:29418/"
#
# URL: "git@172.16.11.162:quic/manifests.git"
# SET: "git@172.16.11.162:"
cfgGlobalUrlRepoPull = 'git@172.16.11.162:'
# URL where you push your code.
# Example:
# URL: "ssh://fan.hu@172.16.11.162:29418/quic/manifests.git"
# SET: "ssh://fan.hu@172.16.11.162:29418/"
#
# URL: "git@172.16.11.162:quic/manifests.git"
# SET: "git@172.16.11.162:"
cfgGlobalUrlRepoPush = 'ssh://fan.hu@172.16.11.162:29418/'
# Name which is required to input by repo
cfgGlobalUserName = 'fan.hu'
# Email which is required to input by repo
cfgGlobalUserEmail = 'fan.hu@t2mobile.com'
| 30.972222
| 68
| 0.569507
|
b7485d8b2ab338e6d4d9eae5b3f5ebf85b6de6c8
| 20,096
|
py
|
Python
|
thing/views/character.py
|
skyride/evething-2
|
e0778a539b7f8a56667b2508293ca7e9f515283f
|
[
"BSD-2-Clause"
] | 21
|
2017-05-24T00:06:07.000Z
|
2019-08-06T04:31:18.000Z
|
thing/views/character.py
|
skyride/evething-2
|
e0778a539b7f8a56667b2508293ca7e9f515283f
|
[
"BSD-2-Clause"
] | 11
|
2017-05-23T23:58:57.000Z
|
2018-05-27T03:21:30.000Z
|
thing/views/character.py
|
skyride/evething-2
|
e0778a539b7f8a56667b2508293ca7e9f515283f
|
[
"BSD-2-Clause"
] | 10
|
2017-06-08T18:23:51.000Z
|
2021-09-05T06:03:59.000Z
|
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
import random
import re
from collections import OrderedDict
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from core.util import json_response
from thing.models import * # NOPEP8
from thing.stuff import * # NOPEP8
def character_sheet(request, character_name):
"""Display a character page"""
characters = Character.objects.select_related('config', 'details', 'corporation__alliance')
characters = characters.filter(esitoken__status=True)
characters = characters.distinct()
char = get_object_or_404(characters, name=character_name)
# Check access
public = True
if request.user.is_authenticated() and char.esitoken.user == request.user:
public = False
# If it's for public access, make sure this character is visible
if public and not char.config.is_public:
raise Http404
return character_common(request, char, public=public)
def character_anonymous(request, anon_key):
"""Display an anonymized character page"""
char = get_object_or_404(Character.objects.select_related('config', 'details'), config__anon_key=anon_key)
return character_common(request, char, anonymous=True)
def character_common(request, char, public=True, anonymous=False):
"""Common code for character views"""
tt = TimerThing('character_common')
utcnow = datetime.datetime.utcnow()
# I don't know how this happens but hey, let's fix it here
if char.config is None:
char.config = CharacterConfig.objects.create(
character=char,
)
# Do various visibility things here instead of in awful template code
show = {
'implants': not anonymous and (not public or char.config.show_implants),
'queue': anonymous or not public or char.config.show_skill_queue,
'standings': not anonymous and (not public or char.config.show_standings),
'wallet': not anonymous and (not public or char.config.show_wallet),
'jump_clones': not anonymous and (not public or char.config.show_jumpclones)
}
# Retrieve skill queue
queue = []
training_id = None
# training_level = None
queue_duration = None
if show['queue']:
queue = list(SkillQueue.objects.select_related('skill__item', 'character__corporation', 'character__details').filter(character=char, end_time__gte=utcnow).order_by('end_time'))
if queue:
training_id = queue[0].skill.item.id
# training_level = queue[0].to_level
queue_duration = total_seconds(queue[-1].end_time - utcnow)
queue[0].z_complete = queue[0].get_complete_percentage()
tt.add_time('skill queue')
# Try retrieving skill data from cache
cache_key = 'character:skills:%s' % (char.id)
skill_data = cache.get(cache_key)
# Not cached, fetch from database and cache
if skill_data is None:
# Retrieve the list of skills and group them by market group
skills = OrderedDict()
cur = None
# Fake MarketGroup for unpublished skills
total_sp = 0
unpub_mg = MarketGroup(id=0, name="Unpublished")
unpub_mg.z_total_sp = 0
skills[unpub_mg] = []
css = CharacterSkill.objects.filter(character=char)
css = css.select_related('skill__item__market_group')
css = css.order_by('skill__item__market_group__name', 'skill__item__name')
for cs in css:
mg = cs.skill.item.market_group or unpub_mg
if mg != cur:
cur = mg
cur.z_total_sp = 0
skills[cur] = []
cs.z_icons = []
# level 5 skill = 5 special icons
if cs.level == 5:
cs.z_icons.extend(['star level5'] * 5)
cs.z_class = "level5"
# 0-4 = n icons
else:
cs.z_icons.extend(['star'] * cs.level)
# training skill can have a training icon
if show['queue'] and cs.skill.item.id == training_id:
cs.z_icons.append('star training-highlight')
cs.z_training = True
cs.z_class = "training-highlight"
# add partially trained SP to the total
total_sp += int(queue[0].get_completed_sp(cs, utcnow))
# partially trained skills get a partial icon
elif cs.points > cs.skill.get_sp_at_level(cs.level):
cs.z_icons.append('star-o training-highlight')
# then fill out the rest with empty icons
cs.z_icons.extend(['star-o'] * (5 - len(cs.z_icons)))
skills[cur].append(cs)
cur.z_total_sp += cs.points
if cur is not unpub_mg:
total_sp += cs.points
# Move the fake MarketGroup to the end if it has any skills
k, v = skills.popitem(False)
if v:
skills[k] = v
skill_data = (total_sp, skills)
cache.set(cache_key, skill_data, 300)
# Data was cached
else:
total_sp, skills = skill_data
tt.add_time('skill group')
# Retrieve skillplans
# user_ids = APIKey.objects.filter(characters__name=char.name).values_list('user_id', flat=True)
if anonymous is False and request.user.is_authenticated():
plans = SkillPlan.objects.filter(
Q(user=request.user)
| Q(visibility=SkillPlan.GLOBAL_VISIBILITY)
)
# |
# (
# Q(user__in=user_ids)
# &
# Q(visibility=SkillPlan.PUBLIC_VISIBILITY)
# )
else:
plans = SkillPlan.objects.filter(visibility=SkillPlan.GLOBAL_VISIBILITY)
plans = plans.select_related('user')
# Sort out the plans and apply icon states
user_plans = []
public_plans = []
for sp in plans:
if sp.visibility == SkillPlan.PRIVATE_VISIBILITY:
sp.z_icon = 'lock fa-fw'
elif sp.visibility == SkillPlan.PUBLIC_VISIBILITY:
sp.z_icon = 'eye fa-fw'
elif sp.visibility == SkillPlan.GLOBAL_VISIBILITY:
sp.z_icon = 'globe fa-fw'
if sp.user_id == request.user.id:
user_plans.append(sp)
else:
public_plans.append(sp)
tt.add_time('skill plans')
if show['standings']:
# Try retrieving standings data from cache
cache_key = 'character:standings:%s' % (char.id)
standings_data = cache.get(cache_key)
# Not cached, fetch from database and cache
if standings_data is None:
faction_standings = list(char.factionstanding_set.select_related().all())
corp_standings = list(char.corporationstanding_set.select_related().all())
standings_data = (faction_standings, corp_standings)
cache.set(cache_key, standings_data, 300)
# Data was cached
else:
faction_standings, corp_standings = standings_data
else:
faction_standings = []
corp_standings = []
# Icons
if request.user.id != None:
show_item_icons = request.user.profile.show_item_icons
else:
show_item_icons = True
# Render template
out = render_page(
'thing/character.html',
{
'char': char,
'public': public,
'anonymous': anonymous,
'show': show,
'total_sp': total_sp,
'skills': skills,
'queue': queue,
'queue_duration': queue_duration,
'user_plans': user_plans,
'public_plans': public_plans,
'faction_standings': faction_standings,
'corp_standings': corp_standings,
'user': request.user,
'show_item_icons': show_item_icons
},
request,
)
tt.add_time('template')
if settings.DEBUG:
tt.finished()
return out
ANON_KEY_RE = re.compile(r'^[a-z0-9]{16}$')
ANON_KEY_CHOICES = 'abcdefghijklmnopqrstuvwxyz0123456789'
@login_required
def character_settings(request, character_name):
chars = Character.objects.filter(name=character_name, esitoken__user=request.user).distinct()
if chars.count() == 0:
raise Http404
char = chars[0]
char.config.is_public = ('public' in request.POST)
char.config.show_implants = ('implants' in request.POST)
char.config.show_skill_queue = ('queue' in request.POST)
char.config.show_standings = ('standings' in request.POST)
char.config.show_wallet = ('wallet' in request.POST)
char.config.show_jumpclones = ('jumpclones' in request.POST)
# User wants to enable anonymous key
if 'anon-key-toggle' in request.POST:
anon_key = request.POST.get('anon-key', '').lower()
# Provided key is OK, use that
if ANON_KEY_RE.match(anon_key):
char.config.anon_key = anon_key
# Generate a new key
else:
char.config.anon_key = ''.join([random.choice(ANON_KEY_CHOICES) for i in range(16)])
else:
char.config.anon_key = ''
char.config.save()
return json_response(dict(anon_key=char.config.anon_key))
@login_required
def character_mastery(request, character_name):
return _character_mastery(request, 'thing.views.character_skillplan', character_name)
@login_required
def character_anonymous_mastery(request, anon_key):
return _character_mastery(request, 'thing.views.character_anonymous_skillplan', anon_key)
def _character_mastery(request, view, arg):
name = request.POST.get('mastery-input', None)
if name is None:
return json_response({'error': 'No search value specified.'})
if len(name) < 2:
return json_response({'error': 'Please specify at least two letters.'})
results = SkillPlan.objects.filter(visibility=SkillPlan.MASTERY_VISIBILITY,
name__icontains=name)
plans = []
for plan in results:
plans.append({
'url': reverse(view, args=[arg, plan.id]),
'name': plan.name
})
return json_response({'plans': plans})
def character_skillplan(request, character_name, skillplan_id):
"""Display a SkillPlan for a character"""
public = True
# If the user is logged in, check if the character belongs to them
if request.user.is_authenticated():
try:
character = Character.objects.select_related('config', 'details').distinct().get(name=character_name, esitoken__user=request.user)
except Character.DoesNotExist:
pass
else:
public = False
qs = Q(visibility=SkillPlan.GLOBAL_VISIBILITY) | Q(visibility=SkillPlan.MASTERY_VISIBILITY) | Q(user=request.user)
skillplan = get_object_or_404(SkillPlan.objects.prefetch_related('entries'), qs, pk=skillplan_id)
# Not logged in or character does not belong to user
if public is True:
character = get_object_or_404(Character.objects.select_related('config', 'details'), name=character_name, config__is_public=True)
qs = Q(visibility=SkillPlan.GLOBAL_VISIBILITY) | Q(visibility=SkillPlan.MASTERY_VISIBILITY)
if request.user.is_authenticated():
qs |= Q(user=request.user)
skillplan = get_object_or_404(SkillPlan.objects.prefetch_related('entries'), qs, pk=skillplan_id)
return character_skillplan_common(request, character, skillplan, public=public)
def character_anonymous_skillplan(request, anon_key, skillplan_id):
"""Display a SkillPlan for an anonymous character"""
character = get_object_or_404(Character.objects.select_related('config', 'details'), config__anon_key=anon_key)
qs = Q(visibility=SkillPlan.GLOBAL_VISIBILITY) | Q(visibility=SkillPlan.MASTERY_VISIBILITY)
skillplan = get_object_or_404(SkillPlan.objects.prefetch_related('entries'), qs, pk=skillplan_id)
return character_skillplan_common(request, character, skillplan, anonymous=True)
def character_skillplan_common(request, character, skillplan, public=True, anonymous=False):
tt = TimerThing('skillplan_common')
utcnow = datetime.datetime.utcnow()
implants_visible = not public
# Check our GET variables
implants = request.GET.get('implants', '')
if implants.isdigit() and 0 <= int(implants) <= 5:
implants = int(implants)
elif implants_visible is True:
implants = 0
else:
implants = 3
show_trained = ('show_trained' in request.GET)
tt.add_time('init')
# Try retrieving learned data from cache
cache_key = 'character_skillplan:learned:%s' % (character.id)
learned = cache.get(cache_key)
# Not cached, fetch from database and cache
if learned is None:
learned = {}
for cs in CharacterSkill.objects.filter(character=character).select_related('skill__item'):
learned[cs.skill.item.id] = cs
cache.set(cache_key, learned, 300)
tt.add_time('char skills')
# Possibly get training information
training_skill = None
if anonymous is True or public is False or character.config.show_skill_queue is True:
sqs = list(SkillQueue.objects.select_related('skill__item').filter(character=character, end_time__gte=utcnow))
if sqs:
training_skill = sqs[0]
tt.add_time('training')
# Initialise stat stuff
if character.details:
remap_stats = dict(
int_attribute=character.details.int_attribute,
mem_attribute=character.details.mem_attribute,
per_attribute=character.details.per_attribute,
wil_attribute=character.details.wil_attribute,
cha_attribute=character.details.cha_attribute,
)
else:
remap_stats = dict(
int_attribute=0,
mem_attribute=0,
per_attribute=0,
wil_attribute=0,
cha_attribute=0,
)
implant_stats = {}
for stat in ('int', 'mem', 'per', 'wil', 'cha'):
k = '%s_bonus' % (stat)
if implants == 0 and implants_visible is True:
implant_stats[k] = getattr(character.details, k, 0)
else:
implant_stats[k] = implants
# Iterate over all entries in this skill plan
entries = []
total_remaining = 0.0
for entry in skillplan.entries.select_related('sp_remap', 'sp_skill__skill__item__item_group'):
# It's a remap entry
if entry.sp_remap is not None:
# Delete the previous remap if it's two in a row, that makes no sense
if entries and entries[-1].sp_remap is not None:
entries.pop()
remap_stats['int_attribute'] = entry.sp_remap.int_stat
remap_stats['mem_attribute'] = entry.sp_remap.mem_stat
remap_stats['per_attribute'] = entry.sp_remap.per_stat
remap_stats['wil_attribute'] = entry.sp_remap.wil_stat
remap_stats['cha_attribute'] = entry.sp_remap.cha_stat
# It's a skill entry
if entry.sp_skill is not None:
skill = entry.sp_skill.skill
# If this skill is already learned
cs = learned.get(skill.item.id, None)
if cs is not None:
# Mark it as injected if level 0
if cs.level == 0:
entry.z_injected = True
# It might already be trained
elif cs.level >= entry.sp_skill.level:
# If we don't care about trained skills, skip this skill entirely
if not show_trained:
continue
entry.z_trained = True
# check if current skill SP > level SP AND planned skill lvl - 1 = learned skill level
elif cs.points > cs.skill.get_sp_at_level(cs.level) and entry.sp_skill.level - 1 == cs.level:
required_sp = cs.skill.get_sp_at_level(cs.level + 1) - cs.skill.get_sp_at_level(cs.level)
sp_done = cs.points - cs.skill.get_sp_at_level(cs.level)
entry.z_sp_done = sp_done
entry.z_percent_trained = round(sp_done / float(required_sp) * 100, 1)
entry.z_partial_trained = True
# Not learned, need to buy it
else:
entry.z_buy = True
# Calculate SP/hr
if remap_stats:
entry.z_sppm = skill.get_sppm_stats(remap_stats, implant_stats)
else:
if public is True or anonymous is True:
entry.z_sppm = skill.get_sp_per_minute(character, implants=implant_stats)
else:
entry.z_sppm = skill.get_sp_per_minute(character)
# 0 sppm is bad
entry.z_sppm = max(1, entry.z_sppm)
entry.z_spph = int(entry.z_sppm * 60)
# Calculate time remaining
if training_skill is not None and training_skill.skill_id == entry.sp_skill.skill_id and training_skill.to_level == entry.sp_skill.level:
entry.z_remaining = total_seconds(training_skill.end_time - utcnow)
entry.z_training = True
entry.z_percent_trained = training_skill.get_complete_percentage()
elif hasattr(entry, 'z_partial_trained'):
remaining_sp = skill.get_sp_at_level(entry.sp_skill.level) - skill.get_sp_at_level(entry.sp_skill.level - 1)
entry.z_remaining = (remaining_sp - entry.z_sp_done) / entry.z_sppm * 60
entry.z_total_time = remaining_sp / entry.z_sppm * 60
else:
entry.z_remaining = (skill.get_sp_at_level(entry.sp_skill.level) - skill.get_sp_at_level(entry.sp_skill.level - 1)) / entry.z_sppm * 60
# Add time remaining to total
if not hasattr(entry, 'z_trained'):
total_remaining += entry.z_remaining
entries.append(entry)
tt.add_time('skillplan loop')
out = render_page(
'thing/character_skillplan.html',
{
'show_trained': show_trained,
'implants': implants,
'implants_visible': implants_visible,
'anonymous': anonymous,
'char': character,
'skillplan': skillplan,
'entries': entries,
'total_remaining': total_remaining,
'user': request.user
},
request,
)
tt.add_time('template')
if settings.DEBUG:
tt.finished()
return out
| 37.703565
| 184
| 0.63749
|
4337d329f3378b8ca9e7e8a6a1ce2063d48c24ce
| 687
|
py
|
Python
|
mytests/dodo_5.py
|
OliverTED/doit
|
a6f75f312390aba352c3f00680cd32609323dbc2
|
[
"MIT"
] | null | null | null |
mytests/dodo_5.py
|
OliverTED/doit
|
a6f75f312390aba352c3f00680cd32609323dbc2
|
[
"MIT"
] | null | null | null |
mytests/dodo_5.py
|
OliverTED/doit
|
a6f75f312390aba352c3f00680cd32609323dbc2
|
[
"MIT"
] | null | null | null |
def task_file2():
yield {'basename' : 'file2',
'file_dep' : ["missing.txt"],
'targets' : ["tmp2"],
'actions' : ["date > tmp2"],
}
def task_file3():
yield {'basename' : 'file3',
'file_dep' : ["tmp2"],
'targets' : ["tmp3"],
'actions' : ["date > tmp3"],
}
def task_file4():
yield {'basename' : 'file4',
'file_dep' : ["tmp3"],
'targets' : ["tmp4"],
'actions' : ["date > tmp4"],
}
def task_file5():
yield {'basename' : 'file5',
'file_dep' : ["tmp4"],
'targets' : ["tmp5"],
'actions' : ["date > tmp5"],
}
| 27.48
| 41
| 0.41048
|
a9759307772efec4fa0bec5aa3fac4bde191c093
| 1,635
|
py
|
Python
|
src/BTrees/tests/testPersistency.py
|
pfw/BTrees
|
e9d3bfc301c24ebe2ef8c6818926de41da342f47
|
[
"ZPL-2.1"
] | 66
|
2015-02-22T23:33:22.000Z
|
2021-12-13T07:14:58.000Z
|
src/BTrees/tests/testPersistency.py
|
pfw/BTrees
|
e9d3bfc301c24ebe2ef8c6818926de41da342f47
|
[
"ZPL-2.1"
] | 160
|
2015-01-05T21:47:16.000Z
|
2022-03-09T07:12:30.000Z
|
src/BTrees/tests/testPersistency.py
|
pfw/BTrees
|
e9d3bfc301c24ebe2ef8c6818926de41da342f47
|
[
"ZPL-2.1"
] | 21
|
2015-04-03T04:28:27.000Z
|
2021-12-02T06:21:06.000Z
|
##############################################################################
#
# Copyright (c) 2020 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from unittest import TestCase
from ..OOBTree import OOBTree
from .common import _skip_wo_ZODB, ZODBAccess
BUCKET_SIZE = OOBTree.max_leaf_size
class TestPersistency(ZODBAccess, TestCase):
@_skip_wo_ZODB
def test_empty_bucket_persistency(self):
from transaction import commit
root = self._getRoot()
try:
# tree with 3 buckets (internal implementation details)
tree = OOBTree(
dict((i, i) for i in range(3 * BUCKET_SIZE // 2 + 2)))
root["tree"] = tree
commit()
# almost clear the second bucket keeping the last element
for i in range(BUCKET_SIZE // 2, BUCKET_SIZE - 1):
del tree[i]
commit()
del tree[BUCKET_SIZE - 1] # remove the last element
commit()
tree._check()
tree._p_deactivate()
tree._check() # fails in case of bad persistency
finally:
self._closeRoot(root)
| 35.543478
| 78
| 0.584098
|
890bdd03f6706e33d09ea256ff5e59e7b1c27adb
| 1,431
|
py
|
Python
|
lib/bes/python/python_installer_base.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
lib/bes/python/python_installer_base.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
lib/bes/python/python_installer_base.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
from abc import abstractmethod, ABCMeta
from collections import namedtuple
from bes.common.check import check
from bes.system.compat import with_metaclass
from bes.script.blurber import blurber
class python_installer_base(with_metaclass(ABCMeta, object)):
def __init__(self, blurber):
check.check_blurber(blurber)
self.blurber = blurber
@abstractmethod
def available_versions(self, include_all):
'Return a list of python versions available to install.'
raise NotImplemented('available_versions')
@abstractmethod
def installed_versions(self):
'Return a list of installed python versions.'
raise NotImplemented('installed_versions')
@abstractmethod
def install(self, full_version):
'Install the major.minor.revision full version of python.'
raise NotImplemented('install_full_version')
@abstractmethod
def uninstall(self, full_version):
'Uninstall the major.minor.revision full version of python.'
raise NotImplemented('uninstall_full_version')
def blurb(self, message, output = None, fit = False):
'Print a blurb'
self.blurber.blurb(message, output = output, fit = fit)
def blurb_verbose(self, message, output = None, fit = False):
'Print a blurb but only in verbose mode'
self.blurber.blurb_verbose(message, output = output, fit = fit)
| 31.8
| 90
| 0.740042
|
6b7ea195c68d39e50481dfb64e6364e3693b13a3
| 2,238
|
py
|
Python
|
addons/l10n_de_purchase/models/purchase.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/l10n_de_purchase/models/purchase.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/l10n_de_purchase/models/purchase.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
from odoo import models, fields, api, _
from odoo.tools import format_date
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
l10n_de_template_data = fields.Binary(compute='_compute_l10n_de_template_data')
l10n_de_document_title = fields.Char(compute='_compute_l10n_de_document_title')
l10n_de_addresses = fields.Binary(compute='_compute_l10n_de_addresses')
def _compute_l10n_de_template_data(self):
for record in self:
record.l10n_de_template_data = data = []
if record.state == 'draft':
data.append((_("Request for Quotation No."), record.name))
elif record.state in ['sent', 'to approve', 'purchase', 'done']:
data.append((_("Purchase Order No."), record.name))
elif record.state == 'cancel':
data.append((_("Cancelled Purchase Order No."), record.name))
if record.user_id:
data.append((_("Purchase Representative"), record.user_id.name))
if record.partner_ref:
data.append((_("Order Reference"), record.partner_ref))
if record.date_order:
data.append((_("Order Date"), format_date(self.env, record.date_order)))
if record.incoterm_id:
data.append((_("Incoterm"), record.incoterm_id.code))
def _compute_l10n_de_document_title(self):
for record in self:
if record.state == 'draft':
record.l10n_de_document_title = _("Request for Quotation")
elif record.state in ['sent', 'to approve', 'purchase', 'done']:
record.l10n_de_document_title = _("Purchase Order")
elif record.state == 'cancel':
record.l10n_de_document_title = _("Cancelled Purchase Order")
def _compute_l10n_de_addresses(self):
for record in self:
record.l10n_de_addresses = data = []
if record.dest_address_id:
data.append((_("Shipping Address:"), record.dest_address_id))
elif 'picking_type_id' in record._fields and record.picking_type_id.warehouse_id:
data.append((_("Shipping Address:"), record.picking_type_id.warehouse_id.partner_id))
| 45.673469
| 101
| 0.632261
|
90f7ac64caffb645020448c467e272bca78102fd
| 641
|
py
|
Python
|
python/path-sum-iii.py
|
alirezaghey/leetcode-solutions
|
676b71b4790c64d21af91dce02e97ee47e78d523
|
[
"MIT"
] | 3
|
2020-10-10T00:14:23.000Z
|
2022-03-02T21:16:29.000Z
|
python/path-sum-iii.py
|
alirezaghey/leetcode-solutions
|
676b71b4790c64d21af91dce02e97ee47e78d523
|
[
"MIT"
] | null | null | null |
python/path-sum-iii.py
|
alirezaghey/leetcode-solutions
|
676b71b4790c64d21af91dce02e97ee47e78d523
|
[
"MIT"
] | 1
|
2021-09-14T05:16:54.000Z
|
2021-09-14T05:16:54.000Z
|
import collections
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def pathSum(self, root: TreeNode, s: int) -> int:
def dfs(node, prev_sum, dic):
if not node: return 0
curr_sum = prev_sum + node.val
res = dic[curr_sum - s]
dic[curr_sum] += 1
res += dfs(node.left, curr_sum, dic)
res += dfs(node.right, curr_sum, dic)
dic[curr_sum] -= 1
return res
return dfs(root, 0, collections.defaultdict(int, {0: 1}))
| 27.869565
| 65
| 0.50702
|
e2557f2f69bd39ef5f7b7d067d3e7b3d095ffabe
| 323
|
py
|
Python
|
litebot/__main__.py
|
rybot666/LiteBot
|
9598c021a59ee7983a1515c54a6cbc43bfcb5eb9
|
[
"MIT"
] | 22
|
2020-10-18T22:36:51.000Z
|
2022-03-27T07:49:25.000Z
|
litebot/__main__.py
|
rybot666/LiteBot
|
9598c021a59ee7983a1515c54a6cbc43bfcb5eb9
|
[
"MIT"
] | 8
|
2021-07-14T06:46:47.000Z
|
2021-08-17T06:09:52.000Z
|
litebot/__main__.py
|
rybot666/LiteBot
|
9598c021a59ee7983a1515c54a6cbc43bfcb5eb9
|
[
"MIT"
] | 7
|
2021-05-04T16:56:19.000Z
|
2021-10-12T05:44:31.000Z
|
from .litebot import LiteBot
from .core.components import DiscordComponents
def main():
bot_instance = LiteBot()
DiscordComponents(bot_instance)
bot_instance.start_server()
bot_instance.plugin_manager.load_plugins()
bot_instance.run(bot_instance.config["token"])
if __name__ == "__main__":
main()
| 24.846154
| 50
| 0.749226
|
a8696bb25458bea07ef6e957409d65c32bf81cac
| 4,424
|
py
|
Python
|
setup.py
|
initit/lexicon
|
91107879c6a6f966557c3ce1ee354aa22d8a654c
|
[
"MIT"
] | null | null | null |
setup.py
|
initit/lexicon
|
91107879c6a6f966557c3ce1ee354aa22d8a654c
|
[
"MIT"
] | null | null | null |
setup.py
|
initit/lexicon
|
91107879c6a6f966557c3ce1ee354aa22d8a654c
|
[
"MIT"
] | null | null | null |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path, listdir
version = 'unknown'
with open(path.join(path.dirname(path.abspath(__file__)), 'VERSION'), encoding='utf-8') as version_file:
version = version_file.read().strip()
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get a list of all the providers
current_filepath = path.join(here, 'lexicon', 'providers')
providers = [path.splitext(f)[0] for f in listdir(current_filepath) if path.isfile(path.join(current_filepath, f))]
providers = list(sorted(set(providers)))
providers.remove('base')
providers.remove('__init__')
# Define optional dependencies for specific providers.
# Each key of the dict should match a provider name.
extras_require = {
'namecheap': ['PyNamecheap'],
'route53': ['boto3'],
'softlayer': ['SoftLayer'],
'subreg': ['zeep'],
'transip': ['transip>=0.3.0'],
'plesk': ['xmltodict'],
'henet': ['beautifulsoup4'],
'hetzner': ['dnspython>=1.15.0', 'beautifulsoup4'],
'easyname': ['beautifulsoup4'],
'localzone': ['localzone'],
'gratisdns': ['beautifulsoup4'],
# Define dev/test dependencies
'dev': [
'pytest==4.6.5',
'pytest-cov==2.8.1',
'pytest-xdist==1.30.0',
'python-coveralls==2.9.3',
'vcrpy==2.1.0',
'mock==3.0.5',
]
}
# Add a 'full' extra, gathering all external dependencies for providers
extras_require['full'] = [dep for name, deps in extras_require.items() if name != 'dev' for dep in deps]
setup(
name='dns-lexicon',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='Manipulate DNS records on various DNS providers in a standardized/agnostic way',
long_description=long_description,
long_description_content_type="text/markdown",
# The project's main homepage.
url='https://github.com/AnalogJ/lexicon',
# Author details
author='Jason Kulatunga',
author_email='jason@thesparktree.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='dns lexicon dns-lexicon dehydrated letsencrypt ' + ' '.join(providers),
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'requests[security]',
'tldextract',
'future',
'cryptography',
'pyyaml',
],
extras_require=extras_require,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'lexicon=lexicon.cli:main',
],
},
test_suite='tests'
)
| 33.770992
| 115
| 0.663879
|
7066fb9c40052bf54812111bbc55652c2d194baf
| 3,604
|
py
|
Python
|
classification/models/CausalNormClassifier.py
|
kratos236/ICU-loss
|
ca05e54c33c03da6ac7df2c0913ba08d05713531
|
[
"Apache-2.0"
] | null | null | null |
classification/models/CausalNormClassifier.py
|
kratos236/ICU-loss
|
ca05e54c33c03da6ac7df2c0913ba08d05713531
|
[
"Apache-2.0"
] | null | null | null |
classification/models/CausalNormClassifier.py
|
kratos236/ICU-loss
|
ca05e54c33c03da6ac7df2c0913ba08d05713531
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from utils import *
from os import path
import math
class Causal_Norm_Classifier(nn.Module):
def __init__(self, num_classes=1000, feat_dim=2048, use_effect=True, num_head=2, tau=16.0, alpha=3.0, gamma=0.03125, *args):
super(Causal_Norm_Classifier, self).__init__()
self.weight = nn.Parameter(torch.Tensor(num_classes, feat_dim).cuda(), requires_grad=True)
self.scale = tau / num_head # 16.0 / num_head
self.norm_scale = gamma # 1.0 / 32.0
self.alpha = alpha # 3.0
self.num_head = num_head
self.head_dim = feat_dim // num_head
self.use_effect = use_effect
self.reset_parameters(self.weight)
self.relu = nn.ReLU(inplace=True)
def reset_parameters(self, weight):
stdv = 1. / math.sqrt(weight.size(1))
weight.data.uniform_(-stdv, stdv)
def forward(self, x, label, embed):
# calculate capsule normalized feature vector and predict
normed_w = self.multi_head_call(self.causal_norm, self.weight, weight=self.norm_scale)
normed_x = self.multi_head_call(self.l2_norm, x)
y = torch.mm(normed_x * self.scale, normed_w.t())
# remove the effect of confounder c during test
if (not self.training) and self.use_effect:
self.embed = torch.from_numpy(embed).view(1, -1).to(x.device)
normed_c = self.multi_head_call(self.l2_norm, self.embed)
head_dim = x.shape[1] // self.num_head
x_list = torch.split(normed_x, head_dim, dim=1)
c_list = torch.split(normed_c, head_dim, dim=1)
w_list = torch.split(normed_w, head_dim, dim=1)
output = []
for nx, nc, nw in zip(x_list, c_list, w_list):
cos_val, sin_val = self.get_cos_sin(nx, nc)
y0 = torch.mm((nx - cos_val * self.alpha * nc) * self.scale, nw.t())
output.append(y0)
y = sum(output)
return y, None
def get_cos_sin(self, x, y):
cos_val = (x * y).sum(-1, keepdim=True) / torch.norm(x, 2, 1, keepdim=True) / torch.norm(y, 2, 1, keepdim=True)
sin_val = (1 - cos_val * cos_val).sqrt()
return cos_val, sin_val
def multi_head_call(self, func, x, weight=None):
assert len(x.shape) == 2
x_list = torch.split(x, self.head_dim, dim=1)
if weight:
y_list = [func(item, weight) for item in x_list]
else:
y_list = [func(item) for item in x_list]
assert len(x_list) == self.num_head
assert len(y_list) == self.num_head
return torch.cat(y_list, dim=1)
def l2_norm(self, x):
normed_x = x / torch.norm(x, 2, 1, keepdim=True)
return normed_x
def capsule_norm(self, x):
norm= torch.norm(x.clone(), 2, 1, keepdim=True)
normed_x = (norm / (1 + norm)) * (x / norm)
return normed_x
def causal_norm(self, x, weight):
norm= torch.norm(x, 2, 1, keepdim=True)
normed_x = x / (norm + weight)
return normed_x
def create_model(feat_dim, num_classes=1000, stage1_weights=False, dataset=None, log_dir=None, test=False, use_effect=True, num_head=None, tau=None, alpha=None, gamma=None, *args):
print('Loading Causal Norm Classifier with use_effect: {}, num_head: {}, tau: {}, alpha: {}, gamma: {}.'.format(str(use_effect), num_head, tau, alpha, gamma))
clf = Causal_Norm_Classifier(num_classes, feat_dim, use_effect=use_effect, num_head=num_head, tau=tau, alpha=alpha, gamma=gamma)
return clf
| 43.421687
| 180
| 0.617092
|
8156ff9f2623077a7d1af7b1ee96bf6621076123
| 6,157
|
py
|
Python
|
dataset cleaning.py
|
MbProg/BughouseAlphaZero
|
25d2f25417713a85b24eac3ce9a3a7f5c55ff5e5
|
[
"MIT"
] | null | null | null |
dataset cleaning.py
|
MbProg/BughouseAlphaZero
|
25d2f25417713a85b24eac3ce9a3a7f5c55ff5e5
|
[
"MIT"
] | null | null | null |
dataset cleaning.py
|
MbProg/BughouseAlphaZero
|
25d2f25417713a85b24eac3ce9a3a7f5c55ff5e5
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import csv
import re
import os
import pickle
from collections import Counter
import numpy as np
def filter_bpgn_file(bpgn_file, output_file):
#check if outputfile already exists. If so, delete it.
if os.path.exists(output_file):
os.remove(output_file)
else:
print("Can not delete the file as it doesn't exists")
f = open(bpgn_file, "r")
linereader = f.readlines()
cache = {}
line_counter = 0
list_of_game_outcomes = []
firstround = True
elo_counter = 0
elo_distribution = []
elo_list = []
low_elo_counter = 0
removed_matches = 0
matches_not_checkmated_nor_resigned_nor_drawn = 0
matches_with_too_few_moves = 0
saved_games = 0
for x in linereader:
line_counter += 1
if firstround or not("Event" in x):
firstround = False
value_of_curly_bracket = re.search(r'(?<=^{)(\S+)(?:\s)(\S+)', x) #gets the result description in the curly bracket like 'resigned' or 'checkmated'
if('1A.' in x):
if(x.startswith("{")):
moves = re.search(r"\}(.*)", x).group(1)
cache['moves'] = moves
else:
cache['moves'] = x
elif (value_of_curly_bracket):
cache["result_description"] = value_of_curly_bracket.group(2)
if not cache["result_description"] in list_of_game_outcomes:
list_of_game_outcomes.append(cache["result_description"])
cache["looser"] = value_of_curly_bracket.group(1)
else:
value = re.search(r'\"(.*)\"', x) #starts with " and ends with "
key = re.search(r'(?<=^.)(\w+)', x)
if(value and key):
if 'Elo' in x:
y = x.split('Elo',1)[1]
elovalue = re.search(r'\"(.*)\"',y)
elo_counter += 1
if elovalue and elovalue.group(1) != "":
elo_list.append(int(elovalue.group(1)))
if elo_counter == 4:
if elo_list:
elo_tuple = (elo_min, elo_max, elo_avg) = (min(elo_list), max(elo_list), sum(elo_list)/len(elo_list))
elo_distribution.append(elo_tuple[2])
elo_list = []
elo_counter = 0
if(key.group() in ["WhiteA", "WhiteB", "BlackA", "BlackB"]):
playername = re.search(r'^(.*?)\".*', value.group(1))
cache[playername.group(1)] = key.group()
else:
cache[key.group()] = value.group(1)
else:
if (cache["result_description"] == "checkmated}" or cache["result_description"] == "resigns}" or cache["result_description"] == "drawn") and ('moves' in cache) and ('Result' in cache) and elo_tuple[2] and (elo_tuple[2] >= 1800) and (len(cache['moves']) > 170):
dataelement = list()
time = re.search(r'^\w+', cache['TimeControl']).group() #get the time without increment e.g. get '180' from '180+0'
dataelement.append(time)
dataelement.append(cache['moves'])
dataelement.append(cache['Result'])
dataelement.append(cache['result_description'])
saved_games += 1
if(cache["result_description"] == "drawn"):
dataelement.append("drawn")
else:
dataelement.append(cache[cache["looser"]])
with open(output_file, 'a') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(dataelement)
csvFile.close()
else:
removed_matches += 1
if not elo_tuple[2] or elo_tuple[2] < 1800:
low_elo_counter += 1
elif 'moves' in cache and len(cache['moves']) <= 170:
matches_with_too_few_moves += 1
else:
matches_not_checkmated_nor_resigned_nor_drawn += 1
cache = {}
print("all removed games: %i, games with to few moves: %i, games with too low Elo: %i, aborted games: %i, saved games: %i" % (removed_matches, matches_with_too_few_moves, low_elo_counter, matches_not_checkmated_nor_resigned_nor_drawn, saved_games))
print("creating dataset finished!")
# in the precleaned dataset some games had just one move, which means that they were not played until chessmate, although in the tags they were declared as
# games that were played till the end.
#we check if the moves from the input file has more than one move and save these games into the output file
def clean_dataset_from_games_with_just_one_move(input_file, output_file):
#check if outputfile already exists. If so, delete it.
if os.path.exists(output_file):
os.remove(output_file)
else:
print("Can not delete the file as it doesn't exists")
line = 0
aborted_games = list()
with open(input_file) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
if not row or (row[0] in (None, "")): # checks if row is empty
# if (row[0] in (None, "")):
line += 1
else:
if(len(row[1]) < 18):
aborted_games.append(row[1])
line += 1
print(len(aborted_games))
with open(output_file, 'w') as output_file, open(input_file, 'r') as input_file :
line = 0
reader = csv.reader(input_file)
writer = csv.writer(output_file)
for row in reader:
if not row or (row[0] in (None, "")): # checks if row is empty
line += 1
else:
if row[1] in aborted_games:
continue
else:
writer.writerow(row)
line += 1
# filter_bpgn_file('export2005.bpgn', 'filtered_dataset_2005.csv')
| 43.978571
| 274
| 0.544583
|
53f7606db45d2a1f047e0ee0e34791a2086bf964
| 2,396
|
py
|
Python
|
supg/datasource/datasource.py
|
stanford-futuredata/supg
|
cff4e7eb9b657e79a8f1d8e245e23bcad543126c
|
[
"Apache-2.0"
] | 3
|
2020-08-31T21:03:31.000Z
|
2021-01-17T08:08:51.000Z
|
supg/datasource/datasource.py
|
stanford-futuredata/supg
|
cff4e7eb9b657e79a8f1d8e245e23bcad543126c
|
[
"Apache-2.0"
] | 1
|
2021-07-29T06:07:25.000Z
|
2021-07-29T07:04:57.000Z
|
supg/datasource/datasource.py
|
stanford-futuredata/supg
|
cff4e7eb9b657e79a8f1d8e245e23bcad543126c
|
[
"Apache-2.0"
] | 4
|
2020-09-03T03:13:37.000Z
|
2021-12-20T12:13:51.000Z
|
from typing import List, Sequence
import pandas as pd
import numpy as np
class DataSource:
def lookup(self, idxs: Sequence) -> np.ndarray:
raise NotImplemented()
def filter(self, ids) -> np.ndarray:
labels = self.lookup(ids)
return np.array([ids[i] for i in range(len(ids)) if labels[i]])
def get_ordered_idxs(self) -> np.ndarray:
raise NotImplemented()
def get_y_prob(self) -> np.ndarray:
raise NotImplemented()
def lookup_yprob(self, ids) -> np.ndarray:
raise NotImplemented()
class RealtimeDataSource(DataSource):
def __init__(
self,
y_pred,
y_true,
seed=123041,
):
self.y_pred = y_pred
self.y_true = y_true
self.random = np.random.RandomState(seed)
self.proxy_score_sort = np.lexsort((self.random.random(y_pred.size), y_pred))[::-1]
self.lookups = 0
def lookup(self, ids):
self.lookups += len(ids)
return self.y_true[ids]
def get_ordered_idxs(self) -> np.ndarray:
return self.proxy_score_sort
def get_y_prob(self) -> np.ndarray:
return self.y_pred[self.proxy_score_sort]
def lookup_yprob(self, ids) -> np.ndarray:
return self.y_pred[ids]
class DFDataSource(DataSource):
def __init__(
self,
df,
drop_p=None,
seed=123041
):
self.random = np.random.RandomState(seed)
if drop_p is not None:
pos = df[df['label'] == 1]
remove_n = int(len(pos) * drop_p)
drop_indices = self.random.choice(pos.index, remove_n, replace=False)
df = df.drop(drop_indices).reset_index(drop=True)
df.id = df.index
print(len(df[df['label'] == 1]) / len(df))
self.df_indexed = df.set_index(["id"])
self.df_sorted = df.sort_values(
["proxy_score"], axis=0, ascending=False).reset_index(drop=True)
self.lookups = 0
def lookup(self, ids):
self.lookups += len(ids)
return self.df_indexed.loc[ids]["label"].values
def get_ordered_idxs(self) -> np.ndarray:
return self.df_sorted["id"].values
def get_y_prob(self) -> np.ndarray:
return self.df_sorted["proxy_score"].values
def lookup_yprob(self, ids) -> np.ndarray:
return self.df_indexed.loc[ids]['proxy_score'].values
| 28.188235
| 91
| 0.604758
|
2b29ce1c95b5c09098c8204f246e9d218722e97b
| 3,247
|
py
|
Python
|
django_libphon/settings.py
|
Aladom/django-libphon
|
47814242d02fe7915293c49fcec25766553bbd01
|
[
"MIT"
] | null | null | null |
django_libphon/settings.py
|
Aladom/django-libphon
|
47814242d02fe7915293c49fcec25766553bbd01
|
[
"MIT"
] | null | null | null |
django_libphon/settings.py
|
Aladom/django-libphon
|
47814242d02fe7915293c49fcec25766553bbd01
|
[
"MIT"
] | null | null | null |
"""
Django settings for django_libphon project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'i dont care about the safety of this string'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'libphon',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_libphon.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_libphon.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| 25.769841
| 91
| 0.701571
|
1ec4475a2f79e19a0d151e2617ca12cae5ba445d
| 246
|
py
|
Python
|
models/effdet/config/__init__.py
|
Rakeshiva/vehicle-counting
|
b178780b4829c4e6f8e1089e57bc56cd57a93d0a
|
[
"MIT"
] | null | null | null |
models/effdet/config/__init__.py
|
Rakeshiva/vehicle-counting
|
b178780b4829c4e6f8e1089e57bc56cd57a93d0a
|
[
"MIT"
] | null | null | null |
models/effdet/config/__init__.py
|
Rakeshiva/vehicle-counting
|
b178780b4829c4e6f8e1089e57bc56cd57a93d0a
|
[
"MIT"
] | null | null | null |
from .config_utils import set_config_readonly, set_config_writeable
from .fpn_config import get_fpn_config
from .model_config import get_efficientdet_config, default_detection_model_configs
from .train_config import default_detection_train_config
| 61.5
| 82
| 0.906504
|
9d233b81cee11e16c102820c007ef1d9c8cb0d0a
| 749
|
py
|
Python
|
Python/101symmetric_tree.py
|
Apocrypse/LeetCode
|
3ada2605ce8c8f6dadebf37a30c9c00a0d1ede39
|
[
"MIT"
] | 4
|
2020-03-17T03:08:51.000Z
|
2022-03-14T17:33:28.000Z
|
Python/101symmetric_tree.py
|
Apocrypse/LeetCode
|
3ada2605ce8c8f6dadebf37a30c9c00a0d1ede39
|
[
"MIT"
] | null | null | null |
Python/101symmetric_tree.py
|
Apocrypse/LeetCode
|
3ada2605ce8c8f6dadebf37a30c9c00a0d1ede39
|
[
"MIT"
] | 3
|
2021-04-29T16:51:02.000Z
|
2022-03-19T17:37:56.000Z
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root:
return self.isSym(root.left, root.right)
return True
def isSym(self, left, right):
"""
:type left: TreeNode
:type right: TreeNode
:rtype: bool
"""
if right == None and left == None:
return True
if right and left and left.val == right.val:
return self.isSym(left.left, right.right) and self.isSym(left.right, right.left)
return False
| 25.827586
| 92
| 0.53271
|
3b6dd6da04b6f1be201d1ff50dfa1f080c52c71b
| 439
|
py
|
Python
|
Estruturas de dados/WHILE/DF064_Tratando_valores(gambiarra).py
|
Camilla-Carvalho/List-of-algorithms
|
b7b3a68332b370879351f08566edcd9ce81bfe53
|
[
"MIT"
] | 5
|
2020-11-12T02:53:15.000Z
|
2021-09-15T19:52:26.000Z
|
Estruturas de dados/WHILE/DF064_Tratando_valores(gambiarra).py
|
Camilla-Carvalho/List-of-algorithms
|
b7b3a68332b370879351f08566edcd9ce81bfe53
|
[
"MIT"
] | null | null | null |
Estruturas de dados/WHILE/DF064_Tratando_valores(gambiarra).py
|
Camilla-Carvalho/List-of-algorithms
|
b7b3a68332b370879351f08566edcd9ce81bfe53
|
[
"MIT"
] | null | null | null |
#Soma de vários valores com condição de parada
value = soma = cont = 0
while value != 999:
value = int(input('Digite um número [999 para parar]: '))
if value != 999:
soma += value
cont += 1
else:
soma += value
cont += 1
print(f'Você digitou {cont - 1} números e a soma entre eles foi {soma - 999}.')
print('Acabou')
#Não fazer desta forma pois a variável soma estará com o valor errado
| 29.266667
| 87
| 0.6082
|
a46d670d7016573f4d985e2aa167a11cbc971365
| 20,443
|
py
|
Python
|
ansible/library/lambda.py
|
gshakir/aws-price-api
|
bcf03bd03eb7b408ba0b5a50620a42c6b36c3b23
|
[
"MIT"
] | null | null | null |
ansible/library/lambda.py
|
gshakir/aws-price-api
|
bcf03bd03eb7b408ba0b5a50620a42c6b36c3b23
|
[
"MIT"
] | null | null | null |
ansible/library/lambda.py
|
gshakir/aws-price-api
|
bcf03bd03eb7b408ba0b5a50620a42c6b36c3b23
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import base64
import os
import sys
# TODO: used temporarily for backward compatibility with older versions of ansible but should be removed once included in the distro.
try:
import boto
except ImportError:
pass
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
from boto3.s3.transfer import S3Transfer
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
DOCUMENTATION = '''
---
module: lambda
short_description: Creates, updates or deletes AWS Lambda functions, configs and versions.
description:
- This module allows the management of AWS Lambda functions and their related resources via the Ansible
framework. It is idempotent and supports "Check" mode. Use M(lambda_alias) to manage lambda function aliases,
M(lambda_event) to manage event source mappings such as Kinesis streams, M(lambda_invoke)
to execute a lambda function and M(lambda_facts) to gather facts relating to one or more lambda functions.
version_added: "2.2"
author: Pierre Jodouin (@pjodouin)
options:
name:
description:
- The name you want to assign to the function. You can specify an unqualified function
name (for example, "Thumbnail") or you can specify Amazon Resource Name (ARN) of the function
(for example, 'arn:aws:lambda:us-west-2:account-id:function:ThumbNail'). AWS Lambda also allows you to
specify only the account ID qualifier (for example, 'account-id:Thumbnail'). Note that the length
constraint applies only to the ARN. If you specify only the function name, it is limited to 64 character
in length.
required: true
aliases: [ "function_name" ]
state:
description:
- Describes the desired state.
required: true
default: "present"
choices: ["present", "absent"]
runtime:
description:
- Runtime environment of the Lambda function. Cannot be changed after creating the function.
required: true
code_s3_bucket:
description:
- S3 bucket name where the .zip file containing your deployment package is stored.
This bucket must reside in the same AWS region where you are creating the Lambda function.
required: true
aliases: ['s3_bucket']
code_s3_key:
description:
- S3 object (the deployment package) key name you want to upload.
required: true
aliases: ['s3_key']
code_s3_object_version:
description:
- S3 object (the deployment package) version you want to upload.
required: false
aliases: ['s3_object_version']
local_path:
description:
- Complete local file path of the deployment package bundled in a ZIP archive.
required: true
handler:
description:
- The function within your code that Lambda calls to begin execution.
required: true
role:
description:
- The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access
any other AWS resources. If the role belongs to the same account, the simple role name can be used.
required: true
timeout:
description:
- The function execution time (in seconds) at which Lambda should terminate the function. Because the execution
time has cost implications, we recommend you set this value based on your expected execution time.
required: false
default: 3
memory_size:
description:
- The amount of memory, in MB, your Lambda function is given. Lambda uses this memory size to infer the amount of
CPU and memory allocated to your function. Your function use-case determines your CPU and memory requirements.
For example, a database operation might need less memory compared to an image processing function.
The value must be a multiple of 64 and between 128 and 1536.
required: false
default: 128
publish:
description:
- This boolean parameter is used to publish a version of the function from the current snapshot of $LATEST.
The code and configuration cannot be modified after publication.
required: false
default: false
description:
description:
- A short, user-defined function description. Lambda does not use this value. Assign a meaningful description
as you see fit. There is no documented limit.
required: false
version:
description:
- Version number of the Lambda function to be deleted. This parameter cannot be used with I(state=present).
A value of 0 is ignored.
required: false
vpc_subnet_ids:
description:
- If your Lambda function accesses resources in a VPC, you provide this parameter identifying the list of
subnet IDs. These must belong to the same VPC. You must provide at least one subnet ID.
required: false
aliases: ['subnet_ids']
vpc_security_group_ids:
description:
- If your Lambda function accesses resources in a VPC, you provide this parameter identifying the list of
security group IDs. You must provide at least one security group ID.
required: false
aliases: ['security_group_ids']
requirements:
- boto3
extends_documentation_fragment:
- aws
notes:
- Parameter C(version) is only used to deleted a specific version of a lambda function. It cannot be used for
anything else as new versions get I(published) after which they cannot be modified.
'''
EXAMPLES = '''
---
# Simple example to create a lambda function and publish a version
- hosts: localhost
gather_facts: no
vars:
state: present
project_folder: /path/to/deployment/package
deployment_package: lambda.zip
account: 123456789012
version_to_delete: 0
tasks:
- name: AWS Lambda Function
lambda:
state: "{{ state | default('present') }}"
name: myLambdaFunction
publish: True
description: lambda function description
code_s3_bucket: package-bucket
code_s3_key: "lambda/{{ deployment_package }}"
local_path: "{{ project_folder }}/{{ deployment_package }}"
runtime: python2.7
timeout: 5
handler: lambda.handler
memory_size: 128
role: API2LambdaExecRole
version: "{{ version_to_delete }}"
vpc_subnet_ids:
- subnet-9993085c
- subnet-99910cc3
vpc_security_group_ids:
- sg-999b9ca8
- name: show results
debug: var=lambda_facts
'''
RETURN = '''
---
lambda_results:
description: dictionary of items returned by the API describing the function configuration
returned: success
type: dict
sample: lambda_results.Version can be useful when publishing a new version
'''
MIN_MEMORY_SIZE = 2 * 64
MAX_MEMORY_SIZE = 24 * 64
class AWSConnection:
"""
Create the connection object and client objects as required.
"""
def __init__(self, ansible_obj, resources, boto3=True):
try:
self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
self.resource_client = dict()
if not resources:
resources = ['lambda']
resources.append('iam')
for resource in resources:
aws_connect_kwargs.update(dict(region=self.region,
endpoint=self.endpoint,
conn_type='client',
resource=resource
))
self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
# if region is not provided, then get default profile/session region
if not self.region:
self.region = self.resource_client['lambda'].meta.region_name
except (ClientError, ParamValidationError, MissingParametersError) as e:
ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
try:
self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
except (ClientError, ValueError, KeyError, IndexError):
self.account_id = ''
def client(self, resource='lambda'):
return self.resource_client[resource]
def pc(key):
"""
Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
:param key:
:return:
"""
return "".join([token.capitalize() for token in key.split('_')])
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict()
for param in module_params:
module_param = module.params.get(param, None)
if module_param:
api_params[pc(param)] = module_param
return api_params
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
function_name = module.params['function_name']
# validate function name
if not re.search('^[\w\-:]+$', function_name):
module.fail_json(
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
)
if len(function_name) > 64:
module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
# validate local path of deployment package
local_path = module.params['local_path']
if not os.path.isfile(local_path):
module.fail_json(msg='Invalid local file path for deployment package: {0}'.format(local_path))
# parameter 'version' can only be used with state=absent
if module.params['state'] == 'present' and module.params['version'] > 0:
module.fail_json(msg="Cannot specify a version with state='present'.")
# validate memory_size
memory_size = module.params['memory_size']
if memory_size not in range(MIN_MEMORY_SIZE, MAX_MEMORY_SIZE + 1, 64):
module.fail_json(
msg='Parameter "memory_size" must be between {0} and {1} and be a multiple of 64.'.format(MIN_MEMORY_SIZE, MAX_MEMORY_SIZE)
)
# check if 'role' needs to be expanded in full ARN format
if not module.params['role'].startswith('arn:aws:iam:'):
role = module.params['role']
module.params['role'] = 'arn:aws:iam::{0}:role/{1}'.format(aws.account_id, role)
return
def upload_to_s3(module, aws):
"""
Upload local deployment package to s3.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
client = aws.client('s3')
s3 = S3Transfer(client)
local_path = module.params['local_path']
s3_bucket = module.params['s3_bucket']
s3_key = module.params['s3_key']
try:
s3.upload_file(local_path, s3_bucket, s3_key)
except Exception as e:
module.fail_json(msg='Error uploading package to s3: {0}'.format(e))
return
def get_local_package_hash(module):
"""
Returns the base64 encoded sha256 hash value for the deployment package at local_path.
:param module:
:return:
"""
local_path = module.params['local_path']
block_size = os.statvfs(local_path).f_bsize
hash_lib = hashlib.sha256()
with open(local_path, 'rb') as zip_file:
for data_chunk in iter(lambda: zip_file.read(block_size), b''):
hash_lib.update(data_chunk)
return base64.b64encode(hash_lib.digest())
def get_lambda_config(module, aws):
"""
Returns the lambda function configuration if it exists.
:param module: Ansible module reference
:param aws: AWS client connection
:return:
"""
client = aws.client('lambda')
# set API parameters
api_params = dict(FunctionName=module.params['function_name'])
if module.params['version'] > 0:
api_params.update(Qualifier=str(module.params['version']))
# check if function exists and get facts, including sha256 hash
try:
results = client.get_function_configuration(**api_params)
except (ClientError, ParamValidationError, MissingParametersError) as e:
if e.response['Error']['Code'] == 'ResourceNotFoundException':
results = None
else:
module.fail_json(msg='Error retrieving function configuration: {0}'.format(e))
return results
def lambda_function(module, aws):
"""
Adds, updates or deletes lambda function code and configuration.
:param module: Ansible module reference
:param aws: AWS client connection
:return dict:
"""
client = aws.client('lambda')
results = dict()
changed = False
current_state = 'absent'
state = module.params['state']
facts = get_lambda_config(module, aws)
if facts:
current_state = 'present'
if state == 'present':
if current_state == 'present':
# check if the code has changed
s3_hash = facts.get(pc('code_sha256'))
local_hash = get_local_package_hash(module)
if s3_hash != local_hash:
# code has changed so upload to s3
if not module.check_mode:
upload_to_s3(module, aws)
api_params = set_api_params(module, ('function_name', ))
api_params.update(set_api_params(module, ('s3_bucket', 's3_key', 's3_object_version')))
try:
if not module.check_mode:
results = client.update_function_code(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating function code: {0}'.format(e))
# check if config has changed
config_changed = False
config_params = ('role', 'handler', 'description', 'timeout', 'memory_size')
for param in config_params:
if module.params.get(param) != facts.get(pc(param)):
config_changed = True
break
# check if VPC config has changed
vpc_changed = False
vpc_params = ('subnet_ids', 'security_group_ids')
for param in vpc_params:
current_vpc_config = facts.get('VpcConfig', dict())
if sorted(module.params.get(param, [])) != sorted(current_vpc_config.get(pc(param), [])):
vpc_changed = True
break
if config_changed or vpc_changed:
api_params = set_api_params(module, ('function_name', ))
api_params.update(set_api_params(module, config_params))
if module.params.get('subnet_ids'):
api_params.update(VpcConfig=set_api_params(module, vpc_params))
else:
# to remove the VPC config, its parameters must be explicitly set to empty lists
api_params.update(VpcConfig=dict(SubnetIds=[], SecurityGroupIds=[]))
try:
if not module.check_mode:
results = client.update_function_configuration(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error updating function config: {0}'.format(e))
# check if function needs to be published
if changed and module.params['publish']:
api_params = set_api_params(module, ('function_name', 'description'))
try:
if not module.check_mode:
results = client.publish_version(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error publishing version: {0}'.format(e))
else: # create function
if not module.check_mode:
upload_to_s3(module, aws)
api_params = set_api_params(module, ('function_name', 'runtime', 'role', 'handler'))
api_params.update(set_api_params(module, ('memory_size', 'timeout', 'description', 'publish')))
api_params.update(Code=set_api_params(module, ('s3_bucket', 's3_key', 's3_object_version')))
api_params.update(VpcConfig=set_api_params(module, ('subnet_ids', 'security_group_ids')))
try:
if not module.check_mode:
results = client.create_function(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating: {0}'.format(e))
else: # state = 'absent'
if current_state == 'present':
# delete the function
api_params = set_api_params(module, ('function_name', ))
version = module.params['version']
if version > 0:
api_params.update(Qualifier=str(version))
try:
if not module.check_mode:
results = client.delete_function(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error deleting function: {0}'.format(e))
return dict(changed=changed, ansible_facts=dict(lambda_results=results or facts))
def main():
"""
Main entry point.
:return dict: ansible facts
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
function_name=dict(required=True, default=None, aliases=['name']),
runtime=dict(required=True, default=None),
role=dict(required=True, default=None),
handler=dict(required=True, default=None),
s3_bucket=dict(required=True, default=None, aliases=['code_s3_bucket']),
s3_key=dict(required=True, default=None, aliases=['code_s3_key']),
s3_object_version=dict(required=False, default=None, aliases=['code_s3_object_version']),
local_path=dict(required=True, default=None),
subnet_ids=dict(type='list', required=False, default=[], aliases=['vpc_subnet_ids']),
security_group_ids=dict(type='list', required=False, default=[], aliases=['vpc_security_group_ids']),
timeout=dict(type='int', required=False, default=3),
memory_size=dict(type='int', required=False, default=128),
description=dict(required=False, default=None),
publish=dict(type='bool', required=False, default=False),
version=dict(type='int', required=False, default=0),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[],
required_together=[['subnet_ids', 'security_group_ids']]
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['lambda', 's3'])
validate_params(module, aws)
results = lambda_function(module, aws)
module.exit_json(**results)
# ansible import module(s) kept at ~eof as recommended
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| 36.310835
| 135
| 0.652595
|
26685c1ed916123eae87f08fe1a134445d69a752
| 587
|
py
|
Python
|
examples/ping_client_requests.py
|
recht/thriftrw-python
|
aad5ee4e9ca21fe59c9bea479465615ef3825dec
|
[
"MIT"
] | 40
|
2016-02-18T18:01:23.000Z
|
2022-03-31T10:34:33.000Z
|
examples/ping_client_requests.py
|
recht/thriftrw-python
|
aad5ee4e9ca21fe59c9bea479465615ef3825dec
|
[
"MIT"
] | 56
|
2016-02-10T16:51:07.000Z
|
2020-02-07T05:28:49.000Z
|
examples/ping_client_requests.py
|
recht/thriftrw-python
|
aad5ee4e9ca21fe59c9bea479465615ef3825dec
|
[
"MIT"
] | 12
|
2016-03-29T17:29:28.000Z
|
2021-10-30T14:36:39.000Z
|
from __future__ import absolute_import, unicode_literals, print_function
import os.path
import requests
import thriftrw
ping = thriftrw.load(
os.path.join(os.path.dirname(__file__), 'ping.thrift'),
)
def main():
req = ping.Ping.ping.request('world')
response = requests.post(
'http://127.0.0.1:8888/thrift',
data=ping.dumps.message(req, seqid=42),
)
reply = ping.loads.message(ping.Ping, response.content)
assert reply.name == 'ping'
assert reply.seqid == 42
resp = reply.body
print(resp)
if __name__ == "__main__":
main()
| 20.241379
| 72
| 0.666099
|
2842b6288c5162e51f047842f4ea0ee38694a97e
| 1,660
|
py
|
Python
|
chas/client/chaslib/server.py
|
Owen-Cochell/chas
|
303bf090e6db19a326888f3e31268f8035576255
|
[
"Apache-2.0"
] | null | null | null |
chas/client/chaslib/server.py
|
Owen-Cochell/chas
|
303bf090e6db19a326888f3e31268f8035576255
|
[
"Apache-2.0"
] | 1
|
2018-08-09T18:51:43.000Z
|
2020-08-31T20:54:00.000Z
|
chas/client/chaslib/server.py
|
Owen-Cochell/C.H.A.S
|
303bf090e6db19a326888f3e31268f8035576255
|
[
"Apache-2.0"
] | null | null | null |
# A class representing the CHAS server
# TODO: Add ping, test for socketserver discrepancies, ...
import uuid
class Server:
def __init__(self, chas, ip, port, sck, uu_id):
self.chas = chas # CHAS Masterclass
self.ip = ip # IP Address of the CHAS server
self.port = port # Websocket port of the CHAS server
self.sock = sck # Websocket
self.uuid = uu_id # UUID of this node
self.auth = False # Value determine if we are authenticated
self.queue = [] # Message queue
def send(self, content, id_num, encoding='utf-8'):
# Function for sending data to CHAS server
data = {'id': id_num,
'uuid': self.uuid,
'content': content}
self.sock.write(data, encoding=encoding)
def add_queue(self, data):
self.queue.append(data)
return
def get(self, content, id_num, encoding='utf-8'):
# Function for getting data from CHAS server
content_uuid = str(uuid.uuid4())
inner_data = {'content-uuid': content_uuid,
'content-id': id_num,
'content-status': 0,
'content-type': 0,
'content': content}
data = {'id': 3,
'uuid': self.uuid,
'content': inner_data}
self.sock.write(data)
while True:
for i in self.queue:
if i['content-uuid'] == content_uuid:
# Found our request:
message = i['content']
self.queue.remove(i)
return message[0]
| 24.776119
| 68
| 0.526506
|
99fd402a701bd31d613dabfb6b413198642cdadd
| 5,039
|
py
|
Python
|
films101.py
|
tgadf/movies
|
28ae1a0798029d3d3f1034aba456390d06e7e0dc
|
[
"MIT"
] | null | null | null |
films101.py
|
tgadf/movies
|
28ae1a0798029d3d3f1034aba456390d06e7e0dc
|
[
"MIT"
] | null | null | null |
films101.py
|
tgadf/movies
|
28ae1a0798029d3d3f1034aba456390d06e7e0dc
|
[
"MIT"
] | null | null | null |
import re
from time import sleep
from timeUtils import clock, elapsed
from ioUtils import saveFile, getFile
from fsUtils import setDir, isDir, mkDir, setFile, isFile, setSubFile
from fileUtils import getBaseFilename
from searchUtils import findSubPatternExt, findPatternExt, findExt, findNearest
from strUtils import convertCurrency
from webUtils import getWebData, getHTML
from movieDB import movieDB
from os import getcwd
import operator
##############################################################################################################################
# films101
##############################################################################################################################
class films101(movieDB):
def __init__(self, basedir=None):
self.name = "films101"
movieDB.__init__(self, dbdir=self.name)
###########################################################################################################################
# Get Box Office Weekend Files
###########################################################################################################################
def downloadFilms101YearlyData(self, year, outdir, debug=False):
url="http://www.films101.com/y{0}r.htm".format(year)
savename = setFile(outdir, "{0}.p".format(year))
if isFile(savename): return
try:
if debug:
print("Downloading/Saving {0}".format(savename))
getWebData(base=url, savename=savename, useSafari=False)
except:
return
sleep(2)
def getFilms101YearlyData(self, startYear = 1900, endYear = 2018, debug=False):
outdir = self.getDataDir()
if debug:
print("Data Directory: {0}".format(outdir))
#outdir = setDir(getBoxOfficeDir(), "data")
if not isDir(outdir): mkDir(outdir)
years = range(int(startYear), int(endYear)+1)
for year in years:
self.downloadFilms101YearlyData(year, outdir, debug)
###########################################################################################################################
# Parse Box Office Weekend Files
###########################################################################################################################
def parseFilms101YearlyData(self, ifile, debug=False):
if debug:
print(ifile)
htmldata = getFile(ifile)
bsdata = getHTML(htmldata)
movies = []
headertables = bsdata.findAll("table", {"class": "lsthdg"})
datatables = bsdata.findAll("table", {"class": "lstdta"})
if len(headertables) < len(datatables):
print(headertables)
raise ValueError("Found {0} headers and {1} data tables".format(len(headertables), len(datatables)))
if debug:
print("Found {0} tables".format(len(datatables)))
for i in range(len(datatables)):
headertable = headertables[i]
tds = headertable.findAll("td")
headers = [x.text for x in tds if x is not None]
headers = [x.strip() for x in headers]
datatable = datatables[i]
trs = datatable.findAll("tr")
expect = len(trs)
for tr in trs:
tds = tr.findAll("td")
tds = [x.text for x in tds if x is not None]
if len(tds) != len(headers):
print(headers)
print(tds)
1/0
try:
mdata = dict(zip(headers, tds))
except:
print(headers)
print(tds)
raise ValueError("Could not combine headers and data")
try:
movie = mdata['TITLE']
except:
raise ValueError("Could not get movie name from TITLE key! {0}".format(mdata))
movies.append(movie)
if debug:
print("Found {0}/{1} movies".format(len(movies), expect))
return movies
def parseFilms101Data(self, debug=False):
outdir = self.getDataDir()
resultsdir = self.getResultsDir()
files = findExt(outdir, ext=".p")
movies = {}
for ifile in sorted(files):
year = getBaseFilename(ifile)
results = self.parseFilms101YearlyData(ifile, debug=debug)
movies[year] = []
for movie in results:
movies[year].append([movie,10])
print("Found {0} movies in {1}".format(len(movies[year]),year))
savename = setFile(self.getResultsDir(), "{0}.json".format(self.name))
print("Saving {0} Years of films101 Data to {1}".format(len(movies), savename))
saveFile(savename, movies)
_, _ = clock("Last Run")
| 39.367188
| 127
| 0.471721
|
3c0c7cb44a0cb191dd8a2386338c09d8e7e5e996
| 22,601
|
py
|
Python
|
open/Edgecortix/code/mobilenetv2/SingleStream/python/main.py
|
wom-ai/inference_results_v1.0
|
af4bfffd5b6c4815f305a272cb42ae6de09f44e1
|
[
"Apache-2.0"
] | null | null | null |
open/Edgecortix/code/mobilenetv2/SingleStream/python/main.py
|
wom-ai/inference_results_v1.0
|
af4bfffd5b6c4815f305a272cb42ae6de09f44e1
|
[
"Apache-2.0"
] | 24
|
2021-07-19T01:09:35.000Z
|
2022-03-17T11:44:02.000Z
|
open/Edgecortix/code/mobilenetv2/SingleStream/python/main.py
|
wom-ai/inference_results_v1.0
|
af4bfffd5b6c4815f305a272cb42ae6de09f44e1
|
[
"Apache-2.0"
] | null | null | null |
"""
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import array
import collections
import json
import logging
import os
import sys
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
import coco
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1e9
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_pytorch":
(imagenet.Imagenet, dataset.pre_process_imagenet_pytorch, dataset.PostProcessArgMax(offset=0),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"coco-300":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [300, 300, 3]}),
"coco-300-pt":
(coco.Coco, dataset.pre_process_coco_pt_mobilenet, coco.PostProcessCocoPt(False,0.3),
{"image_size": [300, 300, 3]}),
"coco-640-pt-yolov3":
(coco.Coco, dataset.pre_process_coco_pt_yolov3, coco.PostProcessCocoPt(True,0.001),
{"image_size": [640, 640, 3], "use_label_map": True}),
"coco-1200":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCoco(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-onnx":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoOnnx(),
{"image_size": [1200, 1200, 3]}),
"coco-1200-pt":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoPt(True,0.05),
{"image_size": [1200, 1200, 3],"use_label_map": True}),
"coco-1200-tf":
(coco.Coco, dataset.pre_process_coco_resnet34, coco.PostProcessCocoTf(),
{"image_size": [1200, 1200, 3],"use_label_map": False}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"max-batchsize": 32,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
"model-name": "resnet50",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
"model-name": "resnet50",
},
"resnet50-pt": {
"dataset": "imagenet_pytorch",
"backend": "pytorch-jit-traced",
"model-name": "resnet50",
},
"resnet50-pt32": {
"dataset": "imagenet_pytorch",
"backend": "pytorch-fp32",
"model-name": "resnet50",
},
"resnet50-edgecortix": {
"dataset": "imagenet_pytorch",
"backend": "edgecortix",
"model-name": "resnet50",
},
"mobilenetv2-edgecortix": {
"dataset": "imagenet_pytorch",
"backend": "edgecortix",
"model-name": "mobilenetv2",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
"model-name": "mobilenet",
},
"mobilenet-onnxruntime": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
"model-name": "mobilenet",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco-300",
"backend": "tensorflow",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-native",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco-300",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-mobilenet",
},
"ssd-mobilenet-pt": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-300-pt",
"backend": "pytorch-ssd-jit-traced",
"model-name": "ssd-mobilenet",
},
"yolov3-pt": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-640-pt-yolov3",
"backend": "pytorch-yolov3-jit-traced",
"model-name": "yolov3",
},
"yolov3-pt32": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-640-pt-yolov3",
"backend": "pytorch-yolov3-fp32",
"model-name": "yolov3",
},
# ssd-resnet34
"ssd-resnet34-tf": {
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"dataset": "coco-1200-tf",
"backend": "tensorflow",
"data-format": "NCHW",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-pytorch": {
"inputs": "image",
"outputs": "bboxes,labels,scores",
"dataset": "coco-1200-pt",
"backend": "pytorch-native",
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime": {
"dataset": "coco-1200-onnx",
"inputs": "image",
"outputs": "bboxes,labels,scores",
"backend": "onnxruntime",
"data-format": "NCHW",
"max-batchsize": 1,
"model-name": "ssd-resnet34",
},
"ssd-resnet34-onnxruntime-tf": {
"dataset": "coco-1200-tf",
"inputs": "image:0",
"outputs": "detection_bboxes:0,detection_classes:0,detection_scores:0",
"backend": "onnxruntime",
"data-format": "NHWC",
"model-name": "ssd-resnet34",
},
}
SCENARIO_MAP = {
"SingleStream": lg.TestScenario.SingleStream,
"MultiStream": lg.TestScenario.MultiStream,
"Server": lg.TestScenario.Server,
"Offline": lg.TestScenario.Offline,
}
last_timeing = []
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--dataset-calibration-list", help="path to the dataset calibration list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--scenario", default="SingleStream",
help="mlperf benchmark scenario, one of " + str(list(SCENARIO_MAP.keys())))
parser.add_argument("--max-batchsize", type=int, help="max batch size in a single inference")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--output", default="output", help="test results")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--model-name", help="name of the mlperf model, ie. resnet50")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--qps", type=int, help="target qps")
parser.add_argument("--cache", type=int, default=0, help="use cache")
parser.add_argument("--accuracy", action="store_true", help="enable accuracy pass")
parser.add_argument("--find-peak-performance", action="store_true", help="enable finding peak performance pass")
parser.add_argument("--debug", action="store_true", help="debug, turn traces on")
# file to use mlperf rules compliant parameters
parser.add_argument("--mlperf_conf", default="../../mlperf.conf", help="mlperf rules config")
# file for user LoadGen settings such as target QPS
parser.add_argument("--user_conf", default="user.conf", help="user config for user LoadGen settings such as target QPS")
# below will override mlperf rules compliant settings - don't use for official submission
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=float, help="mlperf max latency in pct tile")
parser.add_argument("--samples-per-query", type=int, help="mlperf multi-stream sample per query")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.scenario not in SCENARIO_MAP:
parser.error("valid scanarios:" + str(list(SCENARIO_MAP.keys())))
return args
def get_backend(backend, dataset_path, dataset_calibration_list):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "pytorch-native":
from backend_pytorch_native import BackendPytorchNative
backend = BackendPytorchNative()
elif backend == "pytorch-jit-traced":
from backend_pytorch_jit_traced import BackendPytorchJITTraced
backend = BackendPytorchJITTraced()
elif backend == "pytorch-fp32":
from backend_pytorch_fp32 import BackendPytorchFP32
backend = BackendPytorchFP32()
elif backend == "pytorch-ssd-jit-traced":
from backend_pytorch_ssd_jit_traced import BackendPytorchSSDJITTraced
backend = BackendPytorchSSDJITTraced()
elif backend == "pytorch-yolov3-jit-traced":
from backend_pytorch_yolov3_jit_traced import BackendPytorchYOLOv3JITTraced
backend = BackendPytorchYOLOv3JITTraced()
elif backend == "pytorch-yolov3-fp32":
from backend_pytorch_yolov3_fp32 import BackendPytorchYOLOv3FP32
backend = BackendPytorchYOLOv3FP32()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
elif backend == "edgecortix":
from backend_edgecortix import BackendEdgecortix
backend = BackendEdgecortix(dataset_path, dataset_calibration_list)
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class RunnerBase:
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
self.take_accuracy = False
self.ds = ds
self.model = model
self.post_process = post_proc
self.threads = threads
self.take_accuracy = False
self.max_batchsize = max_batchsize
self.result_timing = []
def handle_tasks(self, tasks_queue):
pass
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.result_timing = []
self.take_accuracy = take_accuracy
self.post_process.start()
def run_one_item(self, qitem):
# run the prediction
processed_results = []
try:
results = self.model.predict({self.model.inputs[0]: qitem.img})
processed_results = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)
if self.take_accuracy:
self.post_process.add_results(processed_results)
self.result_timing.append(time.time() - qitem.start)
except Exception as ex: # pylint: disable=broad-except
src = [self.ds.get_item_loc(i) for i in qitem.content_id]
log.error("thread: failed on contentid=%s, %s", src, ex)
# since post_process will not run, fake empty responses
processed_results = [[]] * len(qitem.query_id)
finally:
response_array_refs = []
response = []
for idx, query_id in enumerate(qitem.query_id):
response_array = array.array("B", np.array(processed_results[idx], np.float32).tobytes())
response_array_refs.append(response_array)
bi = response_array.buffer_info()
response.append(lg.QuerySampleResponse(query_id, bi[0], bi[1]))
lg.QuerySamplesComplete(response)
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.run_one_item(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
data, label = self.ds.get_samples(idx[i:i+bs])
self.run_one_item(Item(query_id[i:i+bs], idx[i:i+bs], data, label))
def finish(self):
pass
class QueueRunner(RunnerBase):
def __init__(self, model, ds, threads, post_proc=None, max_batchsize=128):
super().__init__(model, ds, threads, post_proc, max_batchsize)
self.tasks = Queue(maxsize=threads * 4)
self.workers = []
self.result_dict = {}
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
self.run_one_item(qitem)
tasks_queue.task_done()
def enqueue(self, query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
if len(query_samples) < self.max_batchsize:
data, label = self.ds.get_samples(idx)
self.tasks.put(Item(query_id, idx, data, label))
else:
bs = self.max_batchsize
for i in range(0, len(idx), bs):
ie = i + bs
data, label = self.ds.get_samples(idx[i:ie])
self.tasks.put(Item(query_id[i:ie], idx[i:ie], data, label))
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took, show_accuracy=False):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
if result_dict["total"] == 0:
result_dict["total"] = len(result_list)
# this is what we record for each run
result = {
"took": took,
"mean": np.mean(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"qps": len(result_list) / took,
"count": len(result_list),
"good_items": result_dict["good"],
"total_items": result_dict["total"],
}
acc_str = ""
if show_accuracy:
result["accuracy"] = 100. * result_dict["good"] / result_dict["total"]
acc_str = ", acc={:.3f}%".format(result["accuracy"])
if "mAP" in result_dict:
result["mAP"] = 100. * result_dict["mAP"]
acc_str += ", mAP={:.3f}%".format(result["mAP"])
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.4f}, time={:.3f}{}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, acc_str,
len(result_list), buckets_str))
def main():
global last_timeing
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend, args.dataset_path, args.dataset_calibration_list)
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# --count applies to accuracy mode only and can be used to limit the number of images
# for testing. For perf model we always limit count to 200.
count_override = False
count = args.count
if count:
count_override = True
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count=count, **kwargs)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
mlperf_conf = os.path.abspath(args.mlperf_conf)
if not os.path.exists(mlperf_conf):
log.error("{} not found".format(mlperf_conf))
sys.exit(1)
user_conf = os.path.abspath(args.user_conf)
if not os.path.exists(user_conf):
log.error("{} not found".format(user_conf))
sys.exit(1)
if args.output:
output_dir = os.path.abspath(args.output)
os.makedirs(output_dir, exist_ok=True)
os.chdir(output_dir)
#
# make one pass over the dataset to validate accuracy
#
count = ds.get_item_count()
# warmup
ds.load_query_samples([0])
for _ in range(5):
img, _ = ds.get_samples([0])
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
scenario = SCENARIO_MAP[args.scenario]
runner_map = {
lg.TestScenario.SingleStream: RunnerBase,
lg.TestScenario.MultiStream: QueueRunner,
lg.TestScenario.Server: QueueRunner,
lg.TestScenario.Offline: QueueRunner
}
runner = runner_map[scenario](model, ds, args.threads, post_proc=post_proc, max_batchsize=args.max_batchsize)
def issue_queries(query_samples):
runner.enqueue(query_samples)
def flush_queries():
pass
def process_latencies(latencies_ns):
# called by loadgen to show us the recorded latencies
global last_timeing
last_timeing = [t / NANO_SEC for t in latencies_ns]
log_output_settings = lg.LogOutputSettings()
log_output_settings.outdir = output_dir
log_output_settings.copy_summary_to_stdout = False
log_settings = lg.LogSettings()
log_settings.enable_trace = args.debug
log_settings.log_output = log_output_settings
settings = lg.TestSettings()
settings.FromConfig(mlperf_conf, args.model_name, args.scenario)
settings.FromConfig(user_conf, args.model_name, args.scenario)
settings.scenario = scenario
settings.mode = lg.TestMode.PerformanceOnly
if args.accuracy:
settings.mode = lg.TestMode.AccuracyOnly
if args.find_peak_performance:
settings.mode = lg.TestMode.FindPeakPerformance
if args.time:
# override the time we want to run
settings.min_duration_ms = args.time * MILLI_SEC
settings.max_duration_ms = args.time * MILLI_SEC
if args.qps:
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
if count_override:
settings.min_query_count = count
settings.max_query_count = count
if args.samples_per_query:
settings.multi_stream_samples_per_query = args.samples_per_query
if args.max_latency:
settings.server_target_latency_ns = int(args.max_latency * NANO_SEC)
settings.multi_stream_target_latency_ns = int(args.max_latency * NANO_SEC)
sut = lg.ConstructSUT(issue_queries, flush_queries, process_latencies)
qsl = lg.ConstructQSL(count, min(count, 500), ds.load_query_samples, ds.unload_query_samples)
log.info("starting {}".format(scenario))
result_dict = {"good": 0, "total": 0, "scenario": str(scenario)}
runner.start_run(result_dict, args.accuracy)
lg.StartTestWithLogSettings(sut, qsl, settings, log_settings)
if not last_timeing:
last_timeing = runner.result_timing
if args.accuracy:
post_proc.finalize(result_dict, ds, output_dir=args.output)
add_results(final_results, "{}".format(scenario),
result_dict, last_timeing, time.time() - ds.last_loaded, args.accuracy)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open("results.json", "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
| 36.336013
| 124
| 0.633777
|
ed0c05140639b92c31e97ba72cb399dd88caaa87
| 999
|
py
|
Python
|
examples/opencensus/python-app/app/views.py
|
gaybro8777/ApplicationInsights-LocalForwarder
|
fca29ddde58622dcd8ac51f5e16ee0a8c455654c
|
[
"MIT"
] | 24
|
2018-09-17T15:40:37.000Z
|
2019-04-28T11:37:20.000Z
|
examples/opencensus/python-app/app/views.py
|
gaybro8777/ApplicationInsights-LocalForwarder
|
fca29ddde58622dcd8ac51f5e16ee0a8c455654c
|
[
"MIT"
] | 10
|
2018-08-31T19:15:13.000Z
|
2019-03-27T16:25:32.000Z
|
examples/opencensus/python-app/app/views.py
|
gaybro8777/ApplicationInsights-LocalForwarder
|
fca29ddde58622dcd8ac51f5e16ee0a8c455654c
|
[
"MIT"
] | 9
|
2019-07-02T11:30:02.000Z
|
2021-09-18T09:05:48.000Z
|
from django.http import HttpResponse
from django.shortcuts import render
from opencensus.trace import config_integration
from opencensus.trace.exporters.ocagent import trace_exporter
from opencensus.trace import tracer as tracer_module
from opencensus.trace.propagation.trace_context_http_header_format import TraceContextPropagator
from opencensus.trace.exporters.transports.background_thread \
import BackgroundThreadTransport
import time
import os
import requests
INTEGRATIONS = ['httplib']
service_name = os.getenv('SERVICE_NAME', 'python-service')
config_integration.trace_integrations(INTEGRATIONS, tracer=tracer_module.Tracer(
exporter=trace_exporter.TraceExporter(
service_name=service_name,
endpoint=os.getenv('OCAGENT_TRACE_EXPORTER_ENDPOINT'),
transport=BackgroundThreadTransport),
propagator=TraceContextPropagator()))
def call(request):
requests.get("http://go-app:50030/call")
return HttpResponse("hello world from " + service_name)
| 32.225806
| 96
| 0.811812
|
f54b8c8203744c41fdd5c840c406c09764c530d1
| 10,140
|
py
|
Python
|
tests/unit/plugins/service/test_gdns_publisher.py
|
bcleenders/gordon-gcp
|
0cd7d154891e94f9b0174bb4d8daa20cff594996
|
[
"Apache-2.0"
] | 14
|
2018-02-14T19:29:23.000Z
|
2021-09-15T12:15:57.000Z
|
tests/unit/plugins/service/test_gdns_publisher.py
|
bcleenders/gordon-gcp
|
0cd7d154891e94f9b0174bb4d8daa20cff594996
|
[
"Apache-2.0"
] | 101
|
2018-01-02T14:30:48.000Z
|
2021-04-08T20:08:24.000Z
|
tests/unit/plugins/service/test_gdns_publisher.py
|
bcleenders/gordon-gcp
|
0cd7d154891e94f9b0174bb4d8daa20cff594996
|
[
"Apache-2.0"
] | 4
|
2018-10-10T19:29:52.000Z
|
2022-03-19T09:56:37.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import pytest
from gordon import interfaces
from gordon_gcp import exceptions
from gordon_gcp.plugins.service import event_consumer
from gordon_gcp.plugins.service import gdns_publisher
#####
# GDNS Publisher Fixtures
#####
@pytest.fixture
def config():
return {
'dns_zone': 'example.com.',
'project': 'fakeproject',
'api_version': 'v1',
'default_ttl': 300,
'publish_wait_timeout': 10
}
@pytest.fixture
def resource_record():
return {
'name': 'service.example.com.',
'rrdatas': ['127.0.0.1'],
'type': 'A',
'ttl': 3600
}
@pytest.fixture
def event_msg_data(resource_record):
return {
'action': 'additions',
'resourceName':
'projects/a-project-id/zones/a-zone-name/instances/an-instance',
'resourceRecords': [
resource_record,
{
'name': 'subservice.example.com.',
'rrdatas': ['127.0.0.2'],
'type': 'A',
'ttl': 3600
}
]
}
@pytest.fixture
def event_msg_data_with_invalid_zone(event_msg_data):
event_msg_data['resourceRecords'][0]['name'] = 'brokenexample.com.'
return event_msg_data
@pytest.fixture
def event_msg_data_with_no_resource_records(event_msg_data):
event_msg_data['resourceRecords'] = []
return event_msg_data
@pytest.fixture
def initial_changes_req():
return {
'kind': 'dns#change',
'additions': [
{
'kind': 'dns#resourceRecordSet',
'name': 'service.example.com.',
'type': 'A',
'ttl': 3600,
'rrdatas': ['127.0.0.1', '127.0.0.2']
}
]
}
@pytest.fixture
def initial_changes_pending_json_resp():
return """{
"kind": "dns#change",
"additions": [{
"kind": "dns#resourceRecordSet",
"name": "service.example.com.",
"rrdatas": ["127.0.0.1", "127.0.0.2"],
"type": "A",
"ttl": 3600
}],
"startTime": "2018-04-26T15:02:17.541Z",
"id": "999",
"status": "pending"
}"""
@pytest.fixture
def initial_changes_resp():
return {
'kind': 'dns#change',
"additions": [{
"kind": "dns#resourceRecordSet",
"name": "service.example.com.",
"rrdatas": ["127.0.0.1", "127.0.0.2"],
"type": "A",
"ttl": 3600
}],
'startTime': '2018-04-26T15:02:17.541Z',
'id': '999',
'status': 'done'
}
@pytest.fixture
def handled_conflict_changes_req():
return {
'kind': 'dns#change',
'additions': [
{
'kind': 'dns#resourceRecordSet',
'name': 'service.example.com.',
'type': 'A',
'ttl': 3600,
'rrdatas': ['127.0.0.1', '127.0.0.2']
}
],
'deletions': [
{
'kind': 'dns#resourceRecordSet',
'name': 'service.example.com.',
'type': 'A',
'ttl': 3600,
'rrdatas': ['127.0.0.1']
}
],
}
@pytest.fixture
def matching_zone_records():
return [{
'kind': 'dns#resourceRecordSet',
'name': 'service.example.com.',
'type': 'A',
'ttl': 3600,
'rrdatas': ['127.0.0.1']
}]
@pytest.fixture
def event_message(mocker, event_msg_data):
event_msg = mocker.MagicMock(event_consumer.GEventMessage)
event_msg.msg_id = 'some-id-1234567890'
event_msg.data = event_msg_data
event_msg.phase = ''
return event_msg
@pytest.fixture
def mock_dns_client(mocker, create_mock_coro, matching_zone_records,
initial_changes_resp):
client = mocker.MagicMock()
get_mock, get_coro = create_mock_coro()
get_mock.return_value = matching_zone_records
mocker.patch.object(client, 'get_records_for_zone', get_coro)
mocker.patch.object(client, '_get_records_for_zone_mock', get_mock)
post_mock, post_coro = create_mock_coro()
post_mock.return_value = initial_changes_resp
mocker.patch.object(client, 'publish_changes', post_coro)
mocker.patch.object(client, '_publish_changes_mock', post_mock)
done_mock, done_coro = create_mock_coro()
done_mock.return_value = True
mocker.patch.object(client, 'is_change_done', done_coro)
mocker.patch.object(client, '_is_change_done_mock', done_mock)
return client
@pytest.fixture
def gdns_publisher_instance(mocker, mock_dns_client, config, metrics):
pb = gdns_publisher.GDNSPublisher(config, metrics, mock_dns_client)
return pb
@pytest.fixture
def mock_sleep(mocker):
sleep = mocker.Mock()
async def mock_sleep(*args, **kwargs):
sleep(*args, **kwargs)
mocker.patch('asyncio.sleep', mock_sleep)
return sleep
#####
# GDNS Publisher Tests
#####
def test_implements_interface(gdns_publisher_instance, metrics):
"""GDNSPublisher implements IMessageHandler"""
assert interfaces.IMessageHandler.providedBy(gdns_publisher_instance)
assert interfaces.IMessageHandler.implementedBy(
gdns_publisher.GDNSPublisher)
assert 'publish' == gdns_publisher_instance.phase
@pytest.mark.asyncio
async def test_handle_message_raises_exception_on_invalid_zone(
gdns_publisher_instance, event_message,
event_msg_data_with_invalid_zone, caplog):
"""Ensure exception raised on invalid zone"""
event_message.data = event_msg_data_with_invalid_zone
with pytest.raises(exceptions.InvalidDNSZoneInMessageError) as error:
await gdns_publisher_instance.handle_message(
event_message)
assert error.match('Error when asserting zone for record:')
assert 2 == len(caplog.records)
@pytest.mark.asyncio
async def test_handle_message_handles_update_conflict(
gdns_publisher_instance, event_message,
initial_changes_req, handled_conflict_changes_req,
initial_changes_pending_json_resp, initial_changes_resp, caplog,
mocker, mock_dns_client):
"""Ensure changes with update conflicts are successfully published"""
event_message.data['resourceRecords'] = initial_changes_req['additions']
mock_dns_client._publish_changes_mock.side_effect = [
exceptions.GCPHTTPResponseError('409', 409),
initial_changes_pending_json_resp]
await gdns_publisher_instance.handle_message(event_message)
mock_dns_client._publish_changes_mock.assert_called_with(
'example.com.', handled_conflict_changes_req)
assert 3 == len(caplog.records)
@pytest.mark.asyncio
async def test_handle_message_raises_exception_on_publish_timeout(
gdns_publisher_instance, event_message, mock_dns_client,
initial_changes_req, mock_sleep, caplog, mocker):
"""Ensure exception raised when publish wait timeout exceeded."""
start = datetime.datetime(2018, 1, 1, 11, 30, 0)
mockdatetime = mocker.Mock()
mockdatetime.now = mocker.Mock(side_effect=[
start, start, start + datetime.timedelta(
seconds=gdns_publisher_instance.config['publish_wait_timeout'] + 1)
])
mocker.patch(
'gordon_gcp.plugins.service.gdns_publisher.datetime.datetime',
mockdatetime)
event_message.data['resourceRecords'] = initial_changes_req['additions']
mock_dns_client._is_change_done_mock.return_value = False
with pytest.raises(exceptions.GCPPublishRecordTimeoutError) as e:
await gdns_publisher_instance.handle_message(event_message)
assert e.match('Timed out while waiting for DNS changes to transition '
'to \'done\' status.')
mock_sleep.assert_called_with(1)
assert 2 == len(caplog.records)
http_exceptions = [
(('404', 404), exceptions.GCPHTTPResponseError),
(('500', 500), exceptions.GCPHTTPResponseError),
(('no_code',), exceptions.GCPHTTPError)
]
@pytest.mark.parametrize('exception_args,http_exception', http_exceptions)
@pytest.mark.asyncio
async def test_dispatch_changes_http_exceptions_raised(
gdns_publisher_instance, resource_record,
exception_args, http_exception):
"""Exception is raised when getting HTTP error from Google API."""
gdns_publisher_instance.dns_client._publish_changes_mock.side_effect = \
http_exception(*exception_args)
with pytest.raises(http_exception):
await gdns_publisher_instance._dispatch_changes(
resource_record, None, None, None)
@pytest.mark.asyncio
async def test_handle_message_no_resource_records(
gdns_publisher_instance, event_message,
event_msg_data_with_no_resource_records, caplog):
"""Ensure message with no resource records is logged"""
event_message.data = event_msg_data_with_no_resource_records
await gdns_publisher_instance.handle_message(event_message)
assert "Publisher received new message." in caplog.text
assert ('No records published or deleted as no resource records were'
' present' in caplog.text)
@pytest.mark.asyncio
async def test_get_rrsets_by_name_and_type(
gdns_publisher_instance, initial_changes_req,
handled_conflict_changes_req):
"""Test correctly handling an additions conflict."""
deletions = await gdns_publisher_instance._get_rrsets_by_name_and_type(
'example.com.', initial_changes_req['additions'][0])
assert handled_conflict_changes_req['deletions'] == deletions
| 30.820669
| 79
| 0.663609
|
bb8ed1633ed071882209b2133f463aaef826d5b6
| 322
|
py
|
Python
|
smb_report/config/docs.py
|
bhavesh95863/SMB-Report
|
672b6b6ec05382e15d7255c8073e560dffe7e917
|
[
"MIT"
] | null | null | null |
smb_report/config/docs.py
|
bhavesh95863/SMB-Report
|
672b6b6ec05382e15d7255c8073e560dffe7e917
|
[
"MIT"
] | null | null | null |
smb_report/config/docs.py
|
bhavesh95863/SMB-Report
|
672b6b6ec05382e15d7255c8073e560dffe7e917
|
[
"MIT"
] | null | null | null |
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/smb_report"
# docs_base_url = "https://[org_name].github.io/smb_report"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Smb Report"
| 26.833333
| 68
| 0.726708
|
1b5a41e694514377d8ae94403c7b372695634444
| 1,804
|
py
|
Python
|
scripts/py_scripts/get_hpo_names.py
|
Elenadisa/PhenoClusters
|
8736c257bf9b498469388afe8df7a41ab7248622
|
[
"MIT"
] | null | null | null |
scripts/py_scripts/get_hpo_names.py
|
Elenadisa/PhenoClusters
|
8736c257bf9b498469388afe8df7a41ab7248622
|
[
"MIT"
] | null | null | null |
scripts/py_scripts/get_hpo_names.py
|
Elenadisa/PhenoClusters
|
8736c257bf9b498469388afe8df7a41ab7248622
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python
import functions as fn
##############################################################################################################################################
# OPTPARSE
##############################################################################################################################################
import optparse
parser = optparse.OptionParser()
parser.add_option("-d", "--dictionary", dest="main_dictionary",
help="dictionary", metavar="FILE")
parser.add_option("-A", "--key_col", dest="key_col_dictionary",
help="key id col for the main dictionary", type='int')
parser.add_option("-a", "--value_col", dest="value_col_dictionary",
help="value col for main dictionary", type='int')
parser.add_option("-l", "--file to analyse", dest="file_to_analyse",
help="dictionary to analyse", metavar="FILE")
parser.add_option("-B", "--key_id", dest="key_col_analyse",
help="key id col for the dictionary to analyse", type='int')
(options, args) = parser.parse_args()
###############################################################################################################################################
# MAIN
###############################################################################################################################################
hpo_dictionary = fn.build_dictionary(options.main_dictionary, options.key_col_dictionary, options.value_col_dictionary)
cluster_file = open(options.file_to_analyse)
print("hpo" + "\t" + "cluster" + "\t" + "name")
for line in cluster_file:
line = line.rstrip("\n")
HPO, cluster = line.split("\t")
if HPO in hpo_dictionary:
print(line + "\t" + "".join(hpo_dictionary[HPO]))
| 47.473684
| 143
| 0.441796
|
b4ac18f651950bbc63d31233fcaf1be67757871d
| 13,964
|
py
|
Python
|
test/functional/feature_dbcrash.py
|
raininfotech/Guapcoin
|
3df58f6d5ffab0d6c4c97fb79ad9722891fad528
|
[
"MIT"
] | null | null | null |
test/functional/feature_dbcrash.py
|
raininfotech/Guapcoin
|
3df58f6d5ffab0d6c4c97fb79ad9722891fad528
|
[
"MIT"
] | null | null | null |
test/functional/feature_dbcrash.py
|
raininfotech/Guapcoin
|
3df58f6d5ffab0d6c4c97fb79ad9722891fad528
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php.
"""Test recovery from a crash during chainstate writing.
- 4 nodes
* node0, node1, and node2 will have different dbcrash ratios, and different
dbcache sizes
* node3 will be a regular node, with no crashing.
* The nodes will not connect to each other.
- use default test framework starting chain. initialize starting_tip_height to
tip height.
- Main loop:
* generate lots of transactions on node3, enough to fill up a block.
* uniformly randomly pick a tip height from starting_tip_height to
tip_height; with probability 1/(height_difference+4), invalidate this block.
* mine enough blocks to overtake tip_height at start of loop.
* for each node in [node0,node1,node2]:
- for each mined block:
* submit block to node
* if node crashed on/after submitting:
- restart until recovery succeeds
- check that utxo matches node3 using gettxoutsetinfo"""
import errno
import http.client
import random
import sys
import time
from test_framework.test_framework import GuapcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
HTTP_DISCONNECT_ERRORS = [http.client.CannotSendRequest]
try:
HTTP_DISCONNECT_ERRORS.append(http.client.RemoteDisconnected)
except AttributeError:
pass
class ChainstateWriteCrashTest(GuapcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.rpc_timewait = 600
self.setup_clean_chain = False
# Need a bit of extra time for the nodes to start up for this test
self.chain_params = ['-nuparams=v5_shield:90000', '-nuparams=Guapcoin_v4.0:90000',
'-nuparams=Guapcoin_v3.4:90000', '-nuparams=Zerocoin_Public:90000',
'-nuparams=Zerocoin_v2:90000', '-nuparams=Zerocoin:90000',
'-nuparams=PoS_v2:90000', '-nuparams=PoS:90000']
# Set -maxmempool=0 to turn off mempool memory sharing with dbcache
# Set -rpcservertimeout=900 to reduce socket disconnects in this
# long-running test
self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900"] + self.chain_params
# Set different crash ratios and cache sizes. Note that not all of
# -dbcache goes to the in-memory coins cache.
self.node0_args = ["-dbcrashratio=8", "-dbcache=4", "-dbbatchsize=200000"] + self.base_args
self.node1_args = ["-dbcrashratio=16", "-dbcache=8", "-dbbatchsize=200000"] + self.base_args
self.node2_args = ["-dbcrashratio=24", "-dbcache=16", "-dbbatchsize=200000"] + self.base_args
# Node3 is a normal node with default args, except will mine full blocks
self.node3_args = ["-blockmaxsize=1999000"] + self.chain_params # future: back port blockmaxweight
self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args]
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
# Leave them unconnected, we'll use submitblock directly in this test
def restart_node(self, node_index, expected_tip):
"""Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash.
Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up
after 60 seconds. Returns the utxo hash of the given node."""
time_start = time.time()
while time.time() - time_start < 120:
try:
# Any of these RPC calls could throw due to node crash
self.start_node(node_index)
self.nodes[node_index].waitforblock(expected_tip)
utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2']
return utxo_hash
except:
# An exception here should mean the node is about to crash.
# If guapcoind exits, then try again. wait_for_node_exit()
# should raise an exception if guapcoind doesn't exit.
self.wait_for_node_exit(node_index, timeout=10)
self.crashed_on_restart += 1
time.sleep(1)
# If we got here, guapcoind isn't coming back up on restart. Could be a
# bug in guapcoind, or we've gotten unlucky with our dbcrash ratio --
# perhaps we generated a test case that blew up our cache?
# TODO: If this happens a lot, we should try to restart without -dbcrashratio
# and make sure that recovery happens.
raise AssertionError("Unable to successfully restart node %d in allotted time", node_index)
def submit_block_catch_error(self, node_index, block):
"""Try submitting a block to the given node.
Catch any exceptions that indicate the node has crashed.
Returns true if the block was submitted successfully; false otherwise."""
try:
self.nodes[node_index].submitblock(block)
return True
except http.client.BadStatusLine as e:
# Prior to 3.5 BadStatusLine('') was raised for a remote disconnect error.
if sys.version_info[0] == 3 and sys.version_info[1] < 5 and e.line == "''":
self.log.debug("node %d submitblock raised exception: %s", node_index, e)
return False
else:
raise
except tuple(HTTP_DISCONNECT_ERRORS) as e:
self.log.debug("node %d submitblock raised exception: %s", node_index, e)
return False
except OSError as e:
self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno)
if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET, errno.EPROTOTYPE]:
# The node has likely crashed
return False
else:
# Unexpected exception, raise
raise
def sync_node3blocks(self, block_hashes):
"""Use submitblock to sync node3's chain with the other nodes
If submitblock fails, restart the node and get the new utxo hash.
If any nodes crash while updating, we'll compare utxo hashes to
ensure recovery was successful."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
# Retrieve all the blocks from node3
blocks = []
for block_hash in block_hashes:
blocks.append([block_hash, self.nodes[3].getblock(block_hash, False)])
# Deliver each block to each other node
for i in range(3):
nodei_utxo_hash = None
self.log.debug("Syncing blocks to node %d", i)
for (block_hash, block) in blocks:
# Get the block from node3, and submit to node_i
self.log.debug("submitting block %s", block_hash)
if not self.submit_block_catch_error(i, block):
# TODO: more carefully check that the crash is due to -dbcrashratio
# (change the exit code perhaps, and check that here?)
self.wait_for_node_exit(i, timeout=30)
self.log.debug("Restarting node %d after block hash %s", i, block_hash)
nodei_utxo_hash = self.restart_node(i, block_hash)
assert nodei_utxo_hash is not None
self.restart_counts[i] += 1
else:
# Clear it out after successful submitblock calls -- the cached
# utxo hash will no longer be correct
nodei_utxo_hash = None
# Check that the utxo hash matches node3's utxo set
# NOTE: we only check the utxo set if we had to restart the node
# after the last block submitted:
# - checking the utxo hash causes a cache flush, which we don't
# want to do every time; so
# - we only update the utxo cache after a node restart, since flushing
# the cache is a no-op at that point
if nodei_utxo_hash is not None:
self.log.debug("Checking txoutsetinfo matches for node %d", i)
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def verify_utxo_hash(self):
"""Verify that the utxo hash of each node matches node3.
Restart any nodes that crash while querying."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
self.log.info("Verifying utxo hash matches for all nodes")
for i in range(3):
try:
nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2']
except OSError:
# probably a crash on db flushing
nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash())
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def generate_small_transactions(self, node, count, utxo_list):
FEE = 10000 # TODO: replace this with node relay fee based calculation
num_transactions = 0
random.shuffle(utxo_list)
while len(utxo_list) >= 2 and num_transactions < count:
tx = CTransaction()
input_amount = 0
for _ in range(2):
utxo = utxo_list.pop()
tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
input_amount += int(utxo['amount'] * COIN)
output_amount = (input_amount - FEE) // 3
if output_amount <= 0:
# Sanity check -- if we chose inputs that are too small, skip
continue
for _ in range(3):
tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
# Sign and send the transaction to get into the mempool
tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex']
node.sendrawtransaction(tx_signed_hex)
num_transactions += 1
def run_test(self):
# Track test coverage statistics
self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2
self.crashed_on_restart = 0 # Track count of crashes during recovery
# Start by creating a lot of utxos on node3
initial_height = self.nodes[3].getblockcount()
utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000)
self.log.info("Prepped %d utxo entries", len(utxo_list))
# Sync these blocks with the other nodes
block_hashes_to_sync = []
for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1):
block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync))
# Syncing the blocks could cause nodes to crash, so the test begins here.
self.sync_node3blocks(block_hashes_to_sync)
starting_tip_height = self.nodes[3].getblockcount()
# Main test loop:
# each time through the loop, generate a bunch of transactions,
# and then either mine a single new block on the tip, or some-sized reorg.
for i in range(40):
self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts)
# Generate a bunch of small-ish transactions
self.generate_small_transactions(self.nodes[3], 2500, utxo_list)
# Pick a random block between current tip, and starting tip
current_height = self.nodes[3].getblockcount()
random_height = random.randint(starting_tip_height, current_height)
self.log.debug("At height %d, considering height %d", current_height, random_height)
if random_height > starting_tip_height:
# Randomly reorg from this point with some probability (1/4 for
# tip, 1/5 for tip-1, ...)
if random.random() < 1.0 / (current_height + 4 - random_height):
self.log.debug("Invalidating block at height %d", random_height)
self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height))
# Now generate new blocks until we pass the old tip height
self.log.debug("Mining longer tip")
block_hashes = []
while current_height + 1 > self.nodes[3].getblockcount():
block_hashes.extend(self.nodes[3].generate(min(10, current_height + 1 - self.nodes[3].getblockcount())))
self.log.debug("Syncing %d new blocks...", len(block_hashes))
self.sync_node3blocks(block_hashes)
utxo_list = self.nodes[3].listunspent()
self.log.debug("Node3 utxo count: %d", len(utxo_list))
# Check that the utxo hashes agree with node3
# Useful side effect: each utxo cache gets flushed here, so that we
# won't get crashes on shutdown at the end of the test.
self.verify_utxo_hash()
# Check the test coverage
self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart)
# If no nodes were restarted, we didn't test anything.
assert self.restart_counts != [0, 0, 0]
# Make sure we tested the case of crash-during-recovery.
assert self.crashed_on_restart > 0
# Warn if any of the nodes escaped restart.
for i in range(3):
if self.restart_counts[i] == 0:
self.log.warning("Node %d never crashed during utxo flush!", i)
if __name__ == "__main__":
ChainstateWriteCrashTest().main()
| 47.986254
| 120
| 0.639215
|
570106dad9961824d3a7d1324f945be54cf4e6d5
| 36,457
|
py
|
Python
|
sympy/geometry/tests/test_line.py
|
rvbcldud/sympy
|
7f6044484997a67c5af7a6319ed46745195d6b0b
|
[
"BSD-3-Clause"
] | 1
|
2022-02-03T04:35:56.000Z
|
2022-02-03T04:35:56.000Z
|
sympy/geometry/tests/test_line.py
|
rvbcldud/sympy
|
7f6044484997a67c5af7a6319ed46745195d6b0b
|
[
"BSD-3-Clause"
] | 3
|
2022-02-04T14:45:16.000Z
|
2022-02-04T14:45:45.000Z
|
sympy/geometry/tests/test_line.py
|
rvbcldud/sympy
|
7f6044484997a67c5af7a6319ed46745195d6b0b
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy.core.numbers import (Float, Rational, oo, pi)
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import (acos, cos, sin)
from sympy.sets import EmptySet
from sympy.simplify.simplify import simplify
from sympy.functions.elementary.trigonometric import tan
from sympy.geometry import (Circle, GeometryError, Line, Point, Ray,
Segment, Triangle, intersection, Point3D, Line3D, Ray3D, Segment3D,
Point2D, Line2D)
from sympy.geometry.line import Undecidable
from sympy.geometry.polygon import _asa as asa
from sympy.utilities.iterables import cartes
from sympy.testing.pytest import raises, warns
x = Symbol('x', real=True)
y = Symbol('y', real=True)
z = Symbol('z', real=True)
k = Symbol('k', real=True)
x1 = Symbol('x1', real=True)
y1 = Symbol('y1', real=True)
t = Symbol('t', real=True)
a, b = symbols('a,b', real=True)
m = symbols('m', real=True)
def test_object_from_equation():
from sympy.abc import x, y, a, b
assert Line(3*x + y + 18) == Line2D(Point2D(0, -18), Point2D(1, -21))
assert Line(3*x + 5 * y + 1) == Line2D(
Point2D(0, Rational(-1, 5)), Point2D(1, Rational(-4, 5)))
assert Line(3*a + b + 18, x="a", y="b") == Line2D(
Point2D(0, -18), Point2D(1, -21))
assert Line(3*x + y) == Line2D(Point2D(0, 0), Point2D(1, -3))
assert Line(x + y) == Line2D(Point2D(0, 0), Point2D(1, -1))
assert Line(Eq(3*a + b, -18), x="a", y=b) == Line2D(
Point2D(0, -18), Point2D(1, -21))
# issue 22361
assert Line(x - 1) == Line2D(Point2D(1, 0), Point2D(1, 1))
assert Line(2*x - 2, y=x) == Line2D(Point2D(0, 1), Point2D(1, 1))
assert Line(y) == Line2D(Point2D(0, 0), Point2D(1, 0))
assert Line(2*y, x=y) == Line2D(Point2D(0, 0), Point2D(0, 1))
assert Line(y, x=y) == Line2D(Point2D(0, 0), Point2D(0, 1))
raises(ValueError, lambda: Line(x / y))
raises(ValueError, lambda: Line(a / b, x='a', y='b'))
raises(ValueError, lambda: Line(y / x))
raises(ValueError, lambda: Line(b / a, x='a', y='b'))
raises(ValueError, lambda: Line((x + 1)**2 + y))
def feq(a, b):
"""Test if two floating point values are 'equal'."""
t_float = Float("1.0E-10")
return -t_float < a - b < t_float
def test_angle_between():
a = Point(1, 2, 3, 4)
b = a.orthogonal_direction
o = a.origin
assert feq(Line.angle_between(Line(Point(0, 0), Point(1, 1)),
Line(Point(0, 0), Point(5, 0))).evalf(), pi.evalf() / 4)
assert Line(a, o).angle_between(Line(b, o)) == pi / 2
z = Point3D(0, 0, 0)
assert Line3D.angle_between(Line3D(z, Point3D(1, 1, 1)),
Line3D(z, Point3D(5, 0, 0))) == acos(sqrt(3) / 3)
# direction of points is used to determine angle
assert Line3D.angle_between(Line3D(z, Point3D(1, 1, 1)),
Line3D(Point3D(5, 0, 0), z)) == acos(-sqrt(3) / 3)
def test_closing_angle():
a = Ray((0, 0), angle=0)
b = Ray((1, 2), angle=pi/2)
assert a.closing_angle(b) == -pi/2
assert b.closing_angle(a) == pi/2
assert a.closing_angle(a) == 0
def test_smallest_angle():
a = Line(Point(1, 1), Point(1, 2))
b = Line(Point(1, 1),Point(2, 3))
assert a.smallest_angle_between(b) == acos(2*sqrt(5)/5)
def test_svg():
a = Line(Point(1, 1),Point(1, 2))
assert a._svg() == '<path fill-rule="evenodd" fill="#66cc99" stroke="#555555" stroke-width="2.0" opacity="0.6" d="M 1.00000000000000,1.00000000000000 L 1.00000000000000,2.00000000000000" marker-start="url(#markerReverseArrow)" marker-end="url(#markerArrow)"/>'
a = Segment(Point(1, 0),Point(1, 1))
assert a._svg() == '<path fill-rule="evenodd" fill="#66cc99" stroke="#555555" stroke-width="2.0" opacity="0.6" d="M 1.00000000000000,0 L 1.00000000000000,1.00000000000000" />'
a = Ray(Point(2, 3), Point(3, 5))
assert a._svg() == '<path fill-rule="evenodd" fill="#66cc99" stroke="#555555" stroke-width="2.0" opacity="0.6" d="M 2.00000000000000,3.00000000000000 L 3.00000000000000,5.00000000000000" marker-start="url(#markerCircle)" marker-end="url(#markerArrow)"/>'
def test_arbitrary_point():
l1 = Line3D(Point3D(0, 0, 0), Point3D(1, 1, 1))
l2 = Line(Point(x1, x1), Point(y1, y1))
assert l2.arbitrary_point() in l2
assert Ray((1, 1), angle=pi / 4).arbitrary_point() == \
Point(t + 1, t + 1)
assert Segment((1, 1), (2, 3)).arbitrary_point() == Point(1 + t, 1 + 2 * t)
assert l1.perpendicular_segment(l1.arbitrary_point()) == l1.arbitrary_point()
assert Ray3D((1, 1, 1), direction_ratio=[1, 2, 3]).arbitrary_point() == \
Point3D(t + 1, 2 * t + 1, 3 * t + 1)
assert Segment3D(Point3D(0, 0, 0), Point3D(1, 1, 1)).midpoint == \
Point3D(S.Half, S.Half, S.Half)
assert Segment3D(Point3D(x1, x1, x1), Point3D(y1, y1, y1)).length == sqrt(3) * sqrt((x1 - y1) ** 2)
assert Segment3D((1, 1, 1), (2, 3, 4)).arbitrary_point() == \
Point3D(t + 1, 2 * t + 1, 3 * t + 1)
raises(ValueError, (lambda: Line((x, 1), (2, 3)).arbitrary_point(x)))
def test_are_concurrent_2d():
l1 = Line(Point(0, 0), Point(1, 1))
l2 = Line(Point(x1, x1), Point(x1, 1 + x1))
assert Line.are_concurrent(l1) is False
assert Line.are_concurrent(l1, l2)
assert Line.are_concurrent(l1, l1, l1, l2)
assert Line.are_concurrent(l1, l2, Line(Point(5, x1), Point(Rational(-3, 5), x1)))
assert Line.are_concurrent(l1, Line(Point(0, 0), Point(-x1, x1)), l2) is False
def test_are_concurrent_3d():
p1 = Point3D(0, 0, 0)
l1 = Line(p1, Point3D(1, 1, 1))
parallel_1 = Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0))
parallel_2 = Line3D(Point3D(0, 1, 0), Point3D(1, 1, 0))
assert Line3D.are_concurrent(l1) is False
assert Line3D.are_concurrent(l1, Line(Point3D(x1, x1, x1), Point3D(y1, y1, y1))) is False
assert Line3D.are_concurrent(l1, Line3D(p1, Point3D(x1, x1, x1)),
Line(Point3D(x1, x1, x1), Point3D(x1, 1 + x1, 1))) is True
assert Line3D.are_concurrent(parallel_1, parallel_2) is False
def test_arguments():
"""Functions accepting `Point` objects in `geometry`
should also accept tuples, lists, and generators and
automatically convert them to points."""
from sympy.utilities.iterables import subsets
singles2d = ((1, 2), [1, 3], Point(1, 5))
doubles2d = subsets(singles2d, 2)
l2d = Line(Point2D(1, 2), Point2D(2, 3))
singles3d = ((1, 2, 3), [1, 2, 4], Point(1, 2, 6))
doubles3d = subsets(singles3d, 2)
l3d = Line(Point3D(1, 2, 3), Point3D(1, 1, 2))
singles4d = ((1, 2, 3, 4), [1, 2, 3, 5], Point(1, 2, 3, 7))
doubles4d = subsets(singles4d, 2)
l4d = Line(Point(1, 2, 3, 4), Point(2, 2, 2, 2))
# test 2D
test_single = ['contains', 'distance', 'equals', 'parallel_line', 'perpendicular_line', 'perpendicular_segment',
'projection', 'intersection']
for p in doubles2d:
Line2D(*p)
for func in test_single:
for p in singles2d:
getattr(l2d, func)(p)
# test 3D
for p in doubles3d:
Line3D(*p)
for func in test_single:
for p in singles3d:
getattr(l3d, func)(p)
# test 4D
for p in doubles4d:
Line(*p)
for func in test_single:
for p in singles4d:
getattr(l4d, func)(p)
def test_basic_properties_2d():
p1 = Point(0, 0)
p2 = Point(1, 1)
p10 = Point(2000, 2000)
p_r3 = Ray(p1, p2).random_point()
p_r4 = Ray(p2, p1).random_point()
l1 = Line(p1, p2)
l3 = Line(Point(x1, x1), Point(x1, 1 + x1))
l4 = Line(p1, Point(1, 0))
r1 = Ray(p1, Point(0, 1))
r2 = Ray(Point(0, 1), p1)
s1 = Segment(p1, p10)
p_s1 = s1.random_point()
assert Line((1, 1), slope=1) == Line((1, 1), (2, 2))
assert Line((1, 1), slope=oo) == Line((1, 1), (1, 2))
assert Line((1, 1), slope=oo).bounds == (1, 1, 1, 2)
assert Line((1, 1), slope=-oo) == Line((1, 1), (1, 2))
assert Line(p1, p2).scale(2, 1) == Line(p1, Point(2, 1))
assert Line(p1, p2) == Line(p1, p2)
assert Line(p1, p2) != Line(p2, p1)
assert l1 != Line(Point(x1, x1), Point(y1, y1))
assert l1 != l3
assert Line(p1, p10) != Line(p10, p1)
assert Line(p1, p10) != p1
assert p1 in l1 # is p1 on the line l1?
assert p1 not in l3
assert s1 in Line(p1, p10)
assert Ray(Point(0, 0), Point(0, 1)) in Ray(Point(0, 0), Point(0, 2))
assert Ray(Point(0, 0), Point(0, 2)) in Ray(Point(0, 0), Point(0, 1))
assert Ray(Point(0, 0), Point(0, 2)).xdirection == S.Zero
assert Ray(Point(0, 0), Point(1, 2)).xdirection == S.Infinity
assert Ray(Point(0, 0), Point(-1, 2)).xdirection == S.NegativeInfinity
assert Ray(Point(0, 0), Point(2, 0)).ydirection == S.Zero
assert Ray(Point(0, 0), Point(2, 2)).ydirection == S.Infinity
assert Ray(Point(0, 0), Point(2, -2)).ydirection == S.NegativeInfinity
assert (r1 in s1) is False
assert Segment(p1, p2) in s1
assert Ray(Point(x1, x1), Point(x1, 1 + x1)) != Ray(p1, Point(-1, 5))
assert Segment(p1, p2).midpoint == Point(S.Half, S.Half)
assert Segment(p1, Point(-x1, x1)).length == sqrt(2 * (x1 ** 2))
assert l1.slope == 1
assert l3.slope is oo
assert l4.slope == 0
assert Line(p1, Point(0, 1)).slope is oo
assert Line(r1.source, r1.random_point()).slope == r1.slope
assert Line(r2.source, r2.random_point()).slope == r2.slope
assert Segment(Point(0, -1), Segment(p1, Point(0, 1)).random_point()).slope == Segment(p1, Point(0, 1)).slope
assert l4.coefficients == (0, 1, 0)
assert Line((-x, x), (-x + 1, x - 1)).coefficients == (1, 1, 0)
assert Line(p1, Point(0, 1)).coefficients == (1, 0, 0)
# issue 7963
r = Ray((0, 0), angle=x)
assert r.subs(x, 3 * pi / 4) == Ray((0, 0), (-1, 1))
assert r.subs(x, 5 * pi / 4) == Ray((0, 0), (-1, -1))
assert r.subs(x, -pi / 4) == Ray((0, 0), (1, -1))
assert r.subs(x, pi / 2) == Ray((0, 0), (0, 1))
assert r.subs(x, -pi / 2) == Ray((0, 0), (0, -1))
for ind in range(0, 5):
assert l3.random_point() in l3
assert p_r3.x >= p1.x and p_r3.y >= p1.y
assert p_r4.x <= p2.x and p_r4.y <= p2.y
assert p1.x <= p_s1.x <= p10.x and p1.y <= p_s1.y <= p10.y
assert hash(s1) != hash(Segment(p10, p1))
assert s1.plot_interval() == [t, 0, 1]
assert Line(p1, p10).plot_interval() == [t, -5, 5]
assert Ray((0, 0), angle=pi / 4).plot_interval() == [t, 0, 10]
def test_basic_properties_3d():
p1 = Point3D(0, 0, 0)
p2 = Point3D(1, 1, 1)
p3 = Point3D(x1, x1, x1)
p5 = Point3D(x1, 1 + x1, 1)
l1 = Line3D(p1, p2)
l3 = Line3D(p3, p5)
r1 = Ray3D(p1, Point3D(-1, 5, 0))
r3 = Ray3D(p1, p2)
s1 = Segment3D(p1, p2)
assert Line3D((1, 1, 1), direction_ratio=[2, 3, 4]) == Line3D(Point3D(1, 1, 1), Point3D(3, 4, 5))
assert Line3D((1, 1, 1), direction_ratio=[1, 5, 7]) == Line3D(Point3D(1, 1, 1), Point3D(2, 6, 8))
assert Line3D((1, 1, 1), direction_ratio=[1, 2, 3]) == Line3D(Point3D(1, 1, 1), Point3D(2, 3, 4))
assert Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0)).direction_cosine == [1, 0, 0]
assert Line3D(Line3D(p1, Point3D(0, 1, 0))) == Line3D(p1, Point3D(0, 1, 0))
assert Ray3D(Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0))) == Ray3D(p1, Point3D(1, 0, 0))
assert Line3D(p1, p2) != Line3D(p2, p1)
assert l1 != l3
assert l1 != Line3D(p3, Point3D(y1, y1, y1))
assert r3 != r1
assert Ray3D(Point3D(0, 0, 0), Point3D(1, 1, 1)) in Ray3D(Point3D(0, 0, 0), Point3D(2, 2, 2))
assert Ray3D(Point3D(0, 0, 0), Point3D(2, 2, 2)) in Ray3D(Point3D(0, 0, 0), Point3D(1, 1, 1))
assert Ray3D(Point3D(0, 0, 0), Point3D(2, 2, 2)).xdirection == S.Infinity
assert Ray3D(Point3D(0, 0, 0), Point3D(2, 2, 2)).ydirection == S.Infinity
assert Ray3D(Point3D(0, 0, 0), Point3D(2, 2, 2)).zdirection == S.Infinity
assert Ray3D(Point3D(0, 0, 0), Point3D(-2, 2, 2)).xdirection == S.NegativeInfinity
assert Ray3D(Point3D(0, 0, 0), Point3D(2, -2, 2)).ydirection == S.NegativeInfinity
assert Ray3D(Point3D(0, 0, 0), Point3D(2, 2, -2)).zdirection == S.NegativeInfinity
assert Ray3D(Point3D(0, 0, 0), Point3D(0, 2, 2)).xdirection == S.Zero
assert Ray3D(Point3D(0, 0, 0), Point3D(2, 0, 2)).ydirection == S.Zero
assert Ray3D(Point3D(0, 0, 0), Point3D(2, 2, 0)).zdirection == S.Zero
assert p1 in l1
assert p1 not in l3
assert l1.direction_ratio == [1, 1, 1]
assert s1.midpoint == Point3D(S.Half, S.Half, S.Half)
# Test zdirection
assert Ray3D(p1, Point3D(0, 0, -1)).zdirection is S.NegativeInfinity
def test_contains():
p1 = Point(0, 0)
r = Ray(p1, Point(4, 4))
r1 = Ray3D(p1, Point3D(0, 0, -1))
r2 = Ray3D(p1, Point3D(0, 1, 0))
r3 = Ray3D(p1, Point3D(0, 0, 1))
l = Line(Point(0, 1), Point(3, 4))
# Segment contains
assert Point(0, (a + b) / 2) in Segment((0, a), (0, b))
assert Point((a + b) / 2, 0) in Segment((a, 0), (b, 0))
assert Point3D(0, 1, 0) in Segment3D((0, 1, 0), (0, 1, 0))
assert Point3D(1, 0, 0) in Segment3D((1, 0, 0), (1, 0, 0))
assert Segment3D(Point3D(0, 0, 0), Point3D(1, 0, 0)).contains([]) is True
assert Segment3D(Point3D(0, 0, 0), Point3D(1, 0, 0)).contains(
Segment3D(Point3D(2, 2, 2), Point3D(3, 2, 2))) is False
# Line contains
assert l.contains(Point(0, 1)) is True
assert l.contains((0, 1)) is True
assert l.contains((0, 0)) is False
# Ray contains
assert r.contains(p1) is True
assert r.contains((1, 1)) is True
assert r.contains((1, 3)) is False
assert r.contains(Segment((1, 1), (2, 2))) is True
assert r.contains(Segment((1, 2), (2, 5))) is False
assert r.contains(Ray((2, 2), (3, 3))) is True
assert r.contains(Ray((2, 2), (3, 5))) is False
assert r1.contains(Segment3D(p1, Point3D(0, 0, -10))) is True
assert r1.contains(Segment3D(Point3D(1, 1, 1), Point3D(2, 2, 2))) is False
assert r2.contains(Point3D(0, 0, 0)) is True
assert r3.contains(Point3D(0, 0, 0)) is True
assert Ray3D(Point3D(1, 1, 1), Point3D(1, 0, 0)).contains([]) is False
assert Line3D((0, 0, 0), (x, y, z)).contains((2 * x, 2 * y, 2 * z))
with warns(UserWarning):
assert Line3D(p1, Point3D(0, 1, 0)).contains(Point(1.0, 1.0)) is False
with warns(UserWarning):
assert r3.contains(Point(1.0, 1.0)) is False
def test_contains_nonreal_symbols():
u, v, w, z = symbols('u, v, w, z')
l = Segment(Point(u, w), Point(v, z))
p = Point(u*Rational(2, 3) + v/3, w*Rational(2, 3) + z/3)
assert l.contains(p)
def test_distance_2d():
p1 = Point(0, 0)
p2 = Point(1, 1)
half = S.Half
s1 = Segment(Point(0, 0), Point(1, 1))
s2 = Segment(Point(half, half), Point(1, 0))
r = Ray(p1, p2)
assert s1.distance(Point(0, 0)) == 0
assert s1.distance((0, 0)) == 0
assert s2.distance(Point(0, 0)) == 2 ** half / 2
assert s2.distance(Point(Rational(3) / 2, Rational(3) / 2)) == 2 ** half
assert Line(p1, p2).distance(Point(-1, 1)) == sqrt(2)
assert Line(p1, p2).distance(Point(1, -1)) == sqrt(2)
assert Line(p1, p2).distance(Point(2, 2)) == 0
assert Line(p1, p2).distance((-1, 1)) == sqrt(2)
assert Line((0, 0), (0, 1)).distance(p1) == 0
assert Line((0, 0), (0, 1)).distance(p2) == 1
assert Line((0, 0), (1, 0)).distance(p1) == 0
assert Line((0, 0), (1, 0)).distance(p2) == 1
assert r.distance(Point(-1, -1)) == sqrt(2)
assert r.distance(Point(1, 1)) == 0
assert r.distance(Point(-1, 1)) == sqrt(2)
assert Ray((1, 1), (2, 2)).distance(Point(1.5, 3)) == 3 * sqrt(2) / 4
assert r.distance((1, 1)) == 0
def test_dimension_normalization():
with warns(UserWarning):
assert Ray((1, 1), (2, 1, 2)) == Ray((1, 1, 0), (2, 1, 2))
def test_distance_3d():
p1, p2 = Point3D(0, 0, 0), Point3D(1, 1, 1)
p3 = Point3D(Rational(3) / 2, Rational(3) / 2, Rational(3) / 2)
s1 = Segment3D(Point3D(0, 0, 0), Point3D(1, 1, 1))
s2 = Segment3D(Point3D(S.Half, S.Half, S.Half), Point3D(1, 0, 1))
r = Ray3D(p1, p2)
assert s1.distance(p1) == 0
assert s2.distance(p1) == sqrt(3) / 2
assert s2.distance(p3) == 2 * sqrt(6) / 3
assert s1.distance((0, 0, 0)) == 0
assert s2.distance((0, 0, 0)) == sqrt(3) / 2
assert s1.distance(p1) == 0
assert s2.distance(p1) == sqrt(3) / 2
assert s2.distance(p3) == 2 * sqrt(6) / 3
assert s1.distance((0, 0, 0)) == 0
assert s2.distance((0, 0, 0)) == sqrt(3) / 2
# Line to point
assert Line3D(p1, p2).distance(Point3D(-1, 1, 1)) == 2 * sqrt(6) / 3
assert Line3D(p1, p2).distance(Point3D(1, -1, 1)) == 2 * sqrt(6) / 3
assert Line3D(p1, p2).distance(Point3D(2, 2, 2)) == 0
assert Line3D(p1, p2).distance((2, 2, 2)) == 0
assert Line3D(p1, p2).distance((1, -1, 1)) == 2 * sqrt(6) / 3
assert Line3D((0, 0, 0), (0, 1, 0)).distance(p1) == 0
assert Line3D((0, 0, 0), (0, 1, 0)).distance(p2) == sqrt(2)
assert Line3D((0, 0, 0), (1, 0, 0)).distance(p1) == 0
assert Line3D((0, 0, 0), (1, 0, 0)).distance(p2) == sqrt(2)
# Ray to point
assert r.distance(Point3D(-1, -1, -1)) == sqrt(3)
assert r.distance(Point3D(1, 1, 1)) == 0
assert r.distance((-1, -1, -1)) == sqrt(3)
assert r.distance((1, 1, 1)) == 0
assert Ray3D((0, 0, 0), (1, 1, 2)).distance((-1, -1, 2)) == 4 * sqrt(3) / 3
assert Ray3D((1, 1, 1), (2, 2, 2)).distance(Point3D(1.5, -3, -1)) == Rational(9) / 2
assert Ray3D((1, 1, 1), (2, 2, 2)).distance(Point3D(1.5, 3, 1)) == sqrt(78) / 6
def test_equals():
p1 = Point(0, 0)
p2 = Point(1, 1)
l1 = Line(p1, p2)
l2 = Line((0, 5), slope=m)
l3 = Line(Point(x1, x1), Point(x1, 1 + x1))
assert l1.perpendicular_line(p1.args).equals(Line(Point(0, 0), Point(1, -1)))
assert l1.perpendicular_line(p1).equals(Line(Point(0, 0), Point(1, -1)))
assert Line(Point(x1, x1), Point(y1, y1)).parallel_line(Point(-x1, x1)). \
equals(Line(Point(-x1, x1), Point(-y1, 2 * x1 - y1)))
assert l3.parallel_line(p1.args).equals(Line(Point(0, 0), Point(0, -1)))
assert l3.parallel_line(p1).equals(Line(Point(0, 0), Point(0, -1)))
assert (l2.distance(Point(2, 3)) - 2 * abs(m + 1) / sqrt(m ** 2 + 1)).equals(0)
assert Line3D(p1, Point3D(0, 1, 0)).equals(Point(1.0, 1.0)) is False
assert Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0)).equals(Line3D(Point3D(-5, 0, 0), Point3D(-1, 0, 0))) is True
assert Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0)).equals(Line3D(p1, Point3D(0, 1, 0))) is False
assert Ray3D(p1, Point3D(0, 0, -1)).equals(Point(1.0, 1.0)) is False
assert Ray3D(p1, Point3D(0, 0, -1)).equals(Ray3D(p1, Point3D(0, 0, -1))) is True
assert Line3D((0, 0), (t, t)).perpendicular_line(Point(0, 1, 0)).equals(
Line3D(Point3D(0, 1, 0), Point3D(S.Half, S.Half, 0)))
assert Line3D((0, 0), (t, t)).perpendicular_segment(Point(0, 1, 0)).equals(Segment3D((0, 1), (S.Half, S.Half)))
assert Line3D(p1, Point3D(0, 1, 0)).equals(Point(1.0, 1.0)) is False
def test_equation():
p1 = Point(0, 0)
p2 = Point(1, 1)
l1 = Line(p1, p2)
l3 = Line(Point(x1, x1), Point(x1, 1 + x1))
assert simplify(l1.equation()) in (x - y, y - x)
assert simplify(l3.equation()) in (x - x1, x1 - x)
assert simplify(l1.equation()) in (x - y, y - x)
assert simplify(l3.equation()) in (x - x1, x1 - x)
assert Line(p1, Point(1, 0)).equation(x=x, y=y) == y
assert Line(p1, Point(0, 1)).equation() == x
assert Line(Point(2, 0), Point(2, 1)).equation() == x - 2
assert Line(p2, Point(2, 1)).equation() == y - 1
assert Line3D(Point(x1, x1, x1), Point(y1, y1, y1)
).equation() == (-x + y, -x + z)
assert Line3D(Point(1, 2, 3), Point(2, 3, 4)
).equation() == (-x + y - 1, -x + z - 2)
assert Line3D(Point(1, 2, 3), Point(1, 3, 4)
).equation() == (x - 1, -y + z - 1)
assert Line3D(Point(1, 2, 3), Point(2, 2, 4)
).equation() == (y - 2, -x + z - 2)
assert Line3D(Point(1, 2, 3), Point(2, 3, 3)
).equation() == (-x + y - 1, z - 3)
assert Line3D(Point(1, 2, 3), Point(1, 2, 4)
).equation() == (x - 1, y - 2)
assert Line3D(Point(1, 2, 3), Point(1, 3, 3)
).equation() == (x - 1, z - 3)
assert Line3D(Point(1, 2, 3), Point(2, 2, 3)
).equation() == (y - 2, z - 3)
def test_intersection_2d():
p1 = Point(0, 0)
p2 = Point(1, 1)
p3 = Point(x1, x1)
p4 = Point(y1, y1)
l1 = Line(p1, p2)
l3 = Line(Point(0, 0), Point(3, 4))
r1 = Ray(Point(1, 1), Point(2, 2))
r2 = Ray(Point(0, 0), Point(3, 4))
r4 = Ray(p1, p2)
r6 = Ray(Point(0, 1), Point(1, 2))
r7 = Ray(Point(0.5, 0.5), Point(1, 1))
s1 = Segment(p1, p2)
s2 = Segment(Point(0.25, 0.25), Point(0.5, 0.5))
s3 = Segment(Point(0, 0), Point(3, 4))
assert intersection(l1, p1) == [p1]
assert intersection(l1, Point(x1, 1 + x1)) == []
assert intersection(l1, Line(p3, p4)) in [[l1], [Line(p3, p4)]]
assert intersection(l1, l1.parallel_line(Point(x1, 1 + x1))) == []
assert intersection(l3, l3) == [l3]
assert intersection(l3, r2) == [r2]
assert intersection(l3, s3) == [s3]
assert intersection(s3, l3) == [s3]
assert intersection(Segment(Point(-10, 10), Point(10, 10)), Segment(Point(-5, -5), Point(-5, 5))) == []
assert intersection(r2, l3) == [r2]
assert intersection(r1, Ray(Point(2, 2), Point(0, 0))) == [Segment(Point(1, 1), Point(2, 2))]
assert intersection(r1, Ray(Point(1, 1), Point(-1, -1))) == [Point(1, 1)]
assert intersection(r1, Segment(Point(0, 0), Point(2, 2))) == [Segment(Point(1, 1), Point(2, 2))]
assert r4.intersection(s2) == [s2]
assert r4.intersection(Segment(Point(2, 3), Point(3, 4))) == []
assert r4.intersection(Segment(Point(-1, -1), Point(0.5, 0.5))) == [Segment(p1, Point(0.5, 0.5))]
assert r4.intersection(Ray(p2, p1)) == [s1]
assert Ray(p2, p1).intersection(r6) == []
assert r4.intersection(r7) == r7.intersection(r4) == [r7]
assert Ray3D((0, 0), (3, 0)).intersection(Ray3D((1, 0), (3, 0))) == [Ray3D((1, 0), (3, 0))]
assert Ray3D((1, 0), (3, 0)).intersection(Ray3D((0, 0), (3, 0))) == [Ray3D((1, 0), (3, 0))]
assert Ray(Point(0, 0), Point(0, 4)).intersection(Ray(Point(0, 1), Point(0, -1))) == \
[Segment(Point(0, 0), Point(0, 1))]
assert Segment3D((0, 0), (3, 0)).intersection(
Segment3D((1, 0), (2, 0))) == [Segment3D((1, 0), (2, 0))]
assert Segment3D((1, 0), (2, 0)).intersection(
Segment3D((0, 0), (3, 0))) == [Segment3D((1, 0), (2, 0))]
assert Segment3D((0, 0), (3, 0)).intersection(
Segment3D((3, 0), (4, 0))) == [Point3D((3, 0))]
assert Segment3D((0, 0), (3, 0)).intersection(
Segment3D((2, 0), (5, 0))) == [Segment3D((2, 0), (3, 0))]
assert Segment3D((0, 0), (3, 0)).intersection(
Segment3D((-2, 0), (1, 0))) == [Segment3D((0, 0), (1, 0))]
assert Segment3D((0, 0), (3, 0)).intersection(
Segment3D((-2, 0), (0, 0))) == [Point3D(0, 0)]
assert s1.intersection(Segment(Point(1, 1), Point(2, 2))) == [Point(1, 1)]
assert s1.intersection(Segment(Point(0.5, 0.5), Point(1.5, 1.5))) == [Segment(Point(0.5, 0.5), p2)]
assert s1.intersection(Segment(Point(4, 4), Point(5, 5))) == []
assert s1.intersection(Segment(Point(-1, -1), p1)) == [p1]
assert s1.intersection(Segment(Point(-1, -1), Point(0.5, 0.5))) == [Segment(p1, Point(0.5, 0.5))]
assert s1.intersection(Line(Point(1, 0), Point(2, 1))) == []
assert s1.intersection(s2) == [s2]
assert s2.intersection(s1) == [s2]
assert asa(120, 8, 52) == \
Triangle(
Point(0, 0),
Point(8, 0),
Point(-4 * cos(19 * pi / 90) / sin(2 * pi / 45),
4 * sqrt(3) * cos(19 * pi / 90) / sin(2 * pi / 45)))
assert Line((0, 0), (1, 1)).intersection(Ray((1, 0), (1, 2))) == [Point(1, 1)]
assert Line((0, 0), (1, 1)).intersection(Segment((1, 0), (1, 2))) == [Point(1, 1)]
assert Ray((0, 0), (1, 1)).intersection(Ray((1, 0), (1, 2))) == [Point(1, 1)]
assert Ray((0, 0), (1, 1)).intersection(Segment((1, 0), (1, 2))) == [Point(1, 1)]
assert Ray((0, 0), (10, 10)).contains(Segment((1, 1), (2, 2))) is True
assert Segment((1, 1), (2, 2)) in Line((0, 0), (10, 10))
assert s1.intersection(Ray((1, 1), (4, 4))) == [Point(1, 1)]
# This test is disabled because it hangs after rref changes which simplify
# intermediate results and return a different representation from when the
# test was written.
# # 16628 - this should be fast
# p0 = Point2D(Rational(249, 5), Rational(497999, 10000))
# p1 = Point2D((-58977084786*sqrt(405639795226) + 2030690077184193 +
# 20112207807*sqrt(630547164901) + 99600*sqrt(255775022850776494562626))
# /(2000*sqrt(255775022850776494562626) + 1991998000*sqrt(405639795226)
# + 1991998000*sqrt(630547164901) + 1622561172902000),
# (-498000*sqrt(255775022850776494562626) - 995999*sqrt(630547164901) +
# 90004251917891999 +
# 496005510002*sqrt(405639795226))/(10000*sqrt(255775022850776494562626)
# + 9959990000*sqrt(405639795226) + 9959990000*sqrt(630547164901) +
# 8112805864510000))
# p2 = Point2D(Rational(497, 10), Rational(-497, 10))
# p3 = Point2D(Rational(-497, 10), Rational(-497, 10))
# l = Line(p0, p1)
# s = Segment(p2, p3)
# n = (-52673223862*sqrt(405639795226) - 15764156209307469 -
# 9803028531*sqrt(630547164901) +
# 33200*sqrt(255775022850776494562626))
# d = sqrt(405639795226) + 315274080450 + 498000*sqrt(
# 630547164901) + sqrt(255775022850776494562626)
# assert intersection(l, s) == [
# Point2D(n/d*Rational(3, 2000), Rational(-497, 10))]
def test_line_intersection():
# see also test_issue_11238 in test_matrices.py
x0 = tan(pi*Rational(13, 45))
x1 = sqrt(3)
x2 = x0**2
x, y = [8*x0/(x0 + x1), (24*x0 - 8*x1*x2)/(x2 - 3)]
assert Line(Point(0, 0), Point(1, -sqrt(3))).contains(Point(x, y)) is True
def test_intersection_3d():
p1 = Point3D(0, 0, 0)
p2 = Point3D(1, 1, 1)
l1 = Line3D(p1, p2)
l2 = Line3D(Point3D(0, 0, 0), Point3D(3, 4, 0))
r1 = Ray3D(Point3D(1, 1, 1), Point3D(2, 2, 2))
r2 = Ray3D(Point3D(0, 0, 0), Point3D(3, 4, 0))
s1 = Segment3D(Point3D(0, 0, 0), Point3D(3, 4, 0))
assert intersection(l1, p1) == [p1]
assert intersection(l1, Point3D(x1, 1 + x1, 1)) == []
assert intersection(l1, l1.parallel_line(p1)) == [Line3D(Point3D(0, 0, 0), Point3D(1, 1, 1))]
assert intersection(l2, r2) == [r2]
assert intersection(l2, s1) == [s1]
assert intersection(r2, l2) == [r2]
assert intersection(r1, Ray3D(Point3D(1, 1, 1), Point3D(-1, -1, -1))) == [Point3D(1, 1, 1)]
assert intersection(r1, Segment3D(Point3D(0, 0, 0), Point3D(2, 2, 2))) == [
Segment3D(Point3D(1, 1, 1), Point3D(2, 2, 2))]
assert intersection(Ray3D(Point3D(1, 0, 0), Point3D(-1, 0, 0)), Ray3D(Point3D(0, 1, 0), Point3D(0, -1, 0))) \
== [Point3D(0, 0, 0)]
assert intersection(r1, Ray3D(Point3D(2, 2, 2), Point3D(0, 0, 0))) == \
[Segment3D(Point3D(1, 1, 1), Point3D(2, 2, 2))]
assert intersection(s1, r2) == [s1]
assert Line3D(Point3D(4, 0, 1), Point3D(0, 4, 1)).intersection(Line3D(Point3D(0, 0, 1), Point3D(4, 4, 1))) == \
[Point3D(2, 2, 1)]
assert Line3D((0, 1, 2), (0, 2, 3)).intersection(Line3D((0, 1, 2), (0, 1, 1))) == [Point3D(0, 1, 2)]
assert Line3D((0, 0), (t, t)).intersection(Line3D((0, 1), (t, t))) == \
[Point3D(t, t)]
assert Ray3D(Point3D(0, 0, 0), Point3D(0, 4, 0)).intersection(Ray3D(Point3D(0, 1, 1), Point3D(0, -1, 1))) == []
def test_is_parallel():
p1 = Point3D(0, 0, 0)
p2 = Point3D(1, 1, 1)
p3 = Point3D(x1, x1, x1)
l2 = Line(Point(x1, x1), Point(y1, y1))
l2_1 = Line(Point(x1, x1), Point(x1, 1 + x1))
assert Line.is_parallel(Line(Point(0, 0), Point(1, 1)), l2)
assert Line.is_parallel(l2, Line(Point(x1, x1), Point(x1, 1 + x1))) is False
assert Line.is_parallel(l2, l2.parallel_line(Point(-x1, x1)))
assert Line.is_parallel(l2_1, l2_1.parallel_line(Point(0, 0)))
assert Line3D(p1, p2).is_parallel(Line3D(p1, p2)) # same as in 2D
assert Line3D(Point3D(4, 0, 1), Point3D(0, 4, 1)).is_parallel(Line3D(Point3D(0, 0, 1), Point3D(4, 4, 1))) is False
assert Line3D(p1, p2).parallel_line(p3) == Line3D(Point3D(x1, x1, x1),
Point3D(x1 + 1, x1 + 1, x1 + 1))
assert Line3D(p1, p2).parallel_line(p3.args) == \
Line3D(Point3D(x1, x1, x1), Point3D(x1 + 1, x1 + 1, x1 + 1))
assert Line3D(Point3D(4, 0, 1), Point3D(0, 4, 1)).is_parallel(Line3D(Point3D(0, 0, 1), Point3D(4, 4, 1))) is False
def test_is_perpendicular():
p1 = Point(0, 0)
p2 = Point(1, 1)
l1 = Line(p1, p2)
l2 = Line(Point(x1, x1), Point(y1, y1))
l1_1 = Line(p1, Point(-x1, x1))
# 2D
assert Line.is_perpendicular(l1, l1_1)
assert Line.is_perpendicular(l1, l2) is False
p = l1.random_point()
assert l1.perpendicular_segment(p) == p
# 3D
assert Line3D.is_perpendicular(Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0)),
Line3D(Point3D(0, 0, 0), Point3D(0, 1, 0))) is True
assert Line3D.is_perpendicular(Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0)),
Line3D(Point3D(0, 1, 0), Point3D(1, 1, 0))) is False
assert Line3D.is_perpendicular(Line3D(Point3D(0, 0, 0), Point3D(1, 1, 1)),
Line3D(Point3D(x1, x1, x1), Point3D(y1, y1, y1))) is False
def test_is_similar():
p1 = Point(2000, 2000)
p2 = p1.scale(2, 2)
r1 = Ray3D(Point3D(1, 1, 1), Point3D(1, 0, 0))
r2 = Ray(Point(0, 0), Point(0, 1))
s1 = Segment(Point(0, 0), p1)
assert s1.is_similar(Segment(p1, p2))
assert s1.is_similar(r2) is False
assert r1.is_similar(Line3D(Point3D(1, 1, 1), Point3D(1, 0, 0))) is True
assert r1.is_similar(Line3D(Point3D(0, 0, 0), Point3D(0, 1, 0))) is False
def test_length():
s2 = Segment3D(Point3D(x1, x1, x1), Point3D(y1, y1, y1))
assert Line(Point(0, 0), Point(1, 1)).length is oo
assert s2.length == sqrt(3) * sqrt((x1 - y1) ** 2)
assert Line3D(Point3D(0, 0, 0), Point3D(1, 1, 1)).length is oo
def test_projection():
p1 = Point(0, 0)
p2 = Point3D(0, 0, 0)
p3 = Point(-x1, x1)
l1 = Line(p1, Point(1, 1))
l2 = Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0))
l3 = Line3D(p2, Point3D(1, 1, 1))
r1 = Ray(Point(1, 1), Point(2, 2))
s1 = Segment(Point2D(0, 0), Point2D(0, 1))
s2 = Segment(Point2D(1, 0), Point2D(2, 1/2))
assert Line(Point(x1, x1), Point(y1, y1)).projection(Point(y1, y1)) == Point(y1, y1)
assert Line(Point(x1, x1), Point(x1, 1 + x1)).projection(Point(1, 1)) == Point(x1, 1)
assert Segment(Point(-2, 2), Point(0, 4)).projection(r1) == Segment(Point(-1, 3), Point(0, 4))
assert Segment(Point(0, 4), Point(-2, 2)).projection(r1) == Segment(Point(0, 4), Point(-1, 3))
assert s2.projection(s1) == EmptySet
assert l1.projection(p3) == p1
assert l1.projection(Ray(p1, Point(-1, 5))) == Ray(Point(0, 0), Point(2, 2))
assert l1.projection(Ray(p1, Point(-1, 1))) == p1
assert r1.projection(Ray(Point(1, 1), Point(-1, -1))) == Point(1, 1)
assert r1.projection(Ray(Point(0, 4), Point(-1, -5))) == Segment(Point(1, 1), Point(2, 2))
assert r1.projection(Segment(Point(-1, 5), Point(-5, -10))) == Segment(Point(1, 1), Point(2, 2))
assert r1.projection(Ray(Point(1, 1), Point(-1, -1))) == Point(1, 1)
assert r1.projection(Ray(Point(0, 4), Point(-1, -5))) == Segment(Point(1, 1), Point(2, 2))
assert r1.projection(Segment(Point(-1, 5), Point(-5, -10))) == Segment(Point(1, 1), Point(2, 2))
assert l3.projection(Ray3D(p2, Point3D(-1, 5, 0))) == Ray3D(Point3D(0, 0, 0), Point3D(Rational(4, 3), Rational(4, 3), Rational(4, 3)))
assert l3.projection(Ray3D(p2, Point3D(-1, 1, 1))) == Ray3D(Point3D(0, 0, 0), Point3D(Rational(1, 3), Rational(1, 3), Rational(1, 3)))
assert l2.projection(Point3D(5, 5, 0)) == Point3D(5, 0)
assert l2.projection(Line3D(Point3D(0, 1, 0), Point3D(1, 1, 0))).equals(l2)
def test_perpendicular_bisector():
s1 = Segment(Point(0, 0), Point(1, 1))
aline = Line(Point(S.Half, S.Half), Point(Rational(3, 2), Rational(-1, 2)))
on_line = Segment(Point(S.Half, S.Half), Point(Rational(3, 2), Rational(-1, 2))).midpoint
assert s1.perpendicular_bisector().equals(aline)
assert s1.perpendicular_bisector(on_line).equals(Segment(s1.midpoint, on_line))
assert s1.perpendicular_bisector(on_line + (1, 0)).equals(aline)
def test_raises():
d, e = symbols('a,b', real=True)
s = Segment((d, 0), (e, 0))
raises(TypeError, lambda: Line((1, 1), 1))
raises(ValueError, lambda: Line(Point(0, 0), Point(0, 0)))
raises(Undecidable, lambda: Point(2 * d, 0) in s)
raises(ValueError, lambda: Ray3D(Point(1.0, 1.0)))
raises(ValueError, lambda: Line3D(Point3D(0, 0, 0), Point3D(0, 0, 0)))
raises(TypeError, lambda: Line3D((1, 1), 1))
raises(ValueError, lambda: Line3D(Point3D(0, 0, 0)))
raises(TypeError, lambda: Ray((1, 1), 1))
raises(GeometryError, lambda: Line(Point(0, 0), Point(1, 0))
.projection(Circle(Point(0, 0), 1)))
def test_ray_generation():
assert Ray((1, 1), angle=pi / 4) == Ray((1, 1), (2, 2))
assert Ray((1, 1), angle=pi / 2) == Ray((1, 1), (1, 2))
assert Ray((1, 1), angle=-pi / 2) == Ray((1, 1), (1, 0))
assert Ray((1, 1), angle=-3 * pi / 2) == Ray((1, 1), (1, 2))
assert Ray((1, 1), angle=5 * pi / 2) == Ray((1, 1), (1, 2))
assert Ray((1, 1), angle=5.0 * pi / 2) == Ray((1, 1), (1, 2))
assert Ray((1, 1), angle=pi) == Ray((1, 1), (0, 1))
assert Ray((1, 1), angle=3.0 * pi) == Ray((1, 1), (0, 1))
assert Ray((1, 1), angle=4.0 * pi) == Ray((1, 1), (2, 1))
assert Ray((1, 1), angle=0) == Ray((1, 1), (2, 1))
assert Ray((1, 1), angle=4.05 * pi) == Ray(Point(1, 1),
Point(2, -sqrt(5) * sqrt(2 * sqrt(5) + 10) / 4 - sqrt(
2 * sqrt(5) + 10) / 4 + 2 + sqrt(5)))
assert Ray((1, 1), angle=4.02 * pi) == Ray(Point(1, 1),
Point(2, 1 + tan(4.02 * pi)))
assert Ray((1, 1), angle=5) == Ray((1, 1), (2, 1 + tan(5)))
assert Ray3D((1, 1, 1), direction_ratio=[4, 4, 4]) == Ray3D(Point3D(1, 1, 1), Point3D(5, 5, 5))
assert Ray3D((1, 1, 1), direction_ratio=[1, 2, 3]) == Ray3D(Point3D(1, 1, 1), Point3D(2, 3, 4))
assert Ray3D((1, 1, 1), direction_ratio=[1, 1, 1]) == Ray3D(Point3D(1, 1, 1), Point3D(2, 2, 2))
def test_symbolic_intersect():
# Issue 7814.
circle = Circle(Point(x, 0), y)
line = Line(Point(k, z), slope=0)
assert line.intersection(circle) == [Point(x + sqrt((y - z) * (y + z)), z), Point(x - sqrt((y - z) * (y + z)), z)]
def test_issue_2941():
def _check():
for f, g in cartes(*[(Line, Ray, Segment)] * 2):
l1 = f(a, b)
l2 = g(c, d)
assert l1.intersection(l2) == l2.intersection(l1)
# intersect at end point
c, d = (-2, -2), (-2, 0)
a, b = (0, 0), (1, 1)
_check()
# midline intersection
c, d = (-2, -3), (-2, 0)
_check()
def test_parameter_value():
t = Symbol('t')
p1, p2 = Point(0, 1), Point(5, 6)
l = Line(p1, p2)
assert l.parameter_value((5, 6), t) == {t: 1}
raises(ValueError, lambda: l.parameter_value((0, 0), t))
def test_bisectors():
r1 = Line3D(Point3D(0, 0, 0), Point3D(1, 0, 0))
r2 = Line3D(Point3D(0, 0, 0), Point3D(0, 1, 0))
bisections = r1.bisectors(r2)
assert bisections == [Line3D(Point3D(0, 0, 0), Point3D(1, 1, 0)),
Line3D(Point3D(0, 0, 0), Point3D(1, -1, 0))]
ans = [Line3D(Point3D(0, 0, 0), Point3D(1, 0, 1)),
Line3D(Point3D(0, 0, 0), Point3D(-1, 0, 1))]
l1 = (0, 0, 0), (0, 0, 1)
l2 = (0, 0), (1, 0)
for a, b in cartes((Line, Segment, Ray), repeat=2):
assert a(*l1).bisectors(b(*l2)) == ans
def test_issue_8615():
a = Line3D(Point3D(6, 5, 0), Point3D(6, -6, 0))
b = Line3D(Point3D(6, -1, 19/10), Point3D(6, -1, 0))
assert a.intersection(b) == [Point3D(6, -1, 0)]
| 44.030193
| 264
| 0.572538
|
87797d986df11052d6f5abdbf955094f31446415
| 253
|
py
|
Python
|
apps/api/apps.py
|
luisito666/Mt2Web.py
|
e37ca79a5a21373c8b773cf30c622beba6d28ec4
|
[
"MIT"
] | 5
|
2018-02-04T05:41:39.000Z
|
2021-11-09T10:51:58.000Z
|
apps/api/apps.py
|
vps-hosting/Mt2Web.py
|
e37ca79a5a21373c8b773cf30c622beba6d28ec4
|
[
"MIT"
] | 4
|
2020-11-22T16:07:47.000Z
|
2022-01-13T03:27:05.000Z
|
apps/api/apps.py
|
vps-hosting/Mt2Web.py
|
e37ca79a5a21373c8b773cf30c622beba6d28ec4
|
[
"MIT"
] | 4
|
2018-12-22T23:56:47.000Z
|
2021-07-31T11:00:54.000Z
|
# Copyright (c) 2017-2018 luispenagos91@gmail.com
# Distribuido bajo la licencia MIT Software Licence
# Mas informacion http://www.opensource.org/licenses/mit-license.php
from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'api'
| 25.3
| 68
| 0.774704
|
43b1d076df4d9abc5091b53124f1c4d3760d0238
| 894
|
py
|
Python
|
sips/lines/bov/utils/divy.py
|
AnandIJain/sip
|
7eadee83a1bf5d447efd42ebab69197a4e73d52a
|
[
"MIT"
] | 5
|
2019-09-11T15:44:56.000Z
|
2021-06-13T13:53:59.000Z
|
sips/lines/bov/utils/divy.py
|
AnandIJain/sip
|
7eadee83a1bf5d447efd42ebab69197a4e73d52a
|
[
"MIT"
] | 19
|
2019-09-26T21:17:24.000Z
|
2020-05-03T06:35:33.000Z
|
sips/lines/bov/utils/divy.py
|
anandijain/sips
|
7eadee83a1bf5d447efd42ebab69197a4e73d52a
|
[
"MIT"
] | 2
|
2019-09-05T16:55:59.000Z
|
2019-09-05T17:22:50.000Z
|
def divy_games(events):
"""
we want to divy up the games by locations closest to our server.
prereq: dataframe for arena latitude and longitudes,
as well as the events
1. determine location of each game based on home away (in terms of arena)
2. lookup arena to find latitude and longitude forall games in dataframe
3. compute distance to game to each of the 3 servers (LA, Chi, NY)
4. log the games for each location
5.
need a way to prevent requests to all_events
# testing
1. improve logging to capture the request time of every request as option
2. break down timing to see if the bulk of time is in latency via reqs
or in local allocs / fileio etc
3. if the bulk of time is spent on requests, it might be faster to req
all events every time (even just as a new-game check)
"""
| 35.76
| 78
| 0.671141
|
c75552d15441d7d484c6287e36425145c72d0f76
| 3,651
|
py
|
Python
|
pandas/core/computation/engines.py
|
developing-coder/pandas
|
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
[
"BSD-3-Clause"
] | 1
|
2019-05-04T03:42:25.000Z
|
2019-05-04T03:42:25.000Z
|
pandas/core/computation/engines.py
|
developing-coder/pandas
|
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/computation/engines.py
|
developing-coder/pandas
|
9feb3ad92cc0397a04b665803a49299ee7aa1037
|
[
"BSD-3-Clause"
] | 1
|
2020-08-21T08:00:10.000Z
|
2020-08-21T08:00:10.000Z
|
"""
Engine classes for :func:`~pandas.eval`
"""
import abc
from pandas.core.computation.align import _align, _reconstruct_object
from pandas.core.computation.ops import (
UndefinedVariableError, _mathops, _reductions)
import pandas.io.formats.printing as printing
_ne_builtins = frozenset(_mathops + _reductions)
class NumExprClobberingError(NameError):
pass
def _check_ne_builtin_clash(expr):
"""Attempt to prevent foot-shooting in a helpful way.
Parameters
----------
terms : Term
Terms can contain
"""
names = expr.names
overlap = names & _ne_builtins
if overlap:
s = ', '.join(map(repr, overlap))
raise NumExprClobberingError('Variables in expression "{expr}" '
'overlap with builtins: ({s})'
.format(expr=expr, s=s))
class AbstractEngine(metaclass=abc.ABCMeta):
"""Object serving as a base class for all engines."""
has_neg_frac = False
def __init__(self, expr):
self.expr = expr
self.aligned_axes = None
self.result_type = None
def convert(self):
"""Convert an expression for evaluation.
Defaults to return the expression as a string.
"""
return printing.pprint_thing(self.expr)
def evaluate(self):
"""Run the engine on the expression
This method performs alignment which is necessary no matter what engine
is being used, thus its implementation is in the base class.
Returns
-------
obj : object
The result of the passed expression.
"""
if not self._is_aligned:
self.result_type, self.aligned_axes = _align(self.expr.terms)
# make sure no names in resolvers and locals/globals clash
res = self._evaluate()
return _reconstruct_object(self.result_type, res, self.aligned_axes,
self.expr.terms.return_type)
@property
def _is_aligned(self):
return self.aligned_axes is not None and self.result_type is not None
@abc.abstractmethod
def _evaluate(self):
"""Return an evaluated expression.
Parameters
----------
env : Scope
The local and global environment in which to evaluate an
expression.
Notes
-----
Must be implemented by subclasses.
"""
pass
class NumExprEngine(AbstractEngine):
"""NumExpr engine class"""
has_neg_frac = True
def __init__(self, expr):
super().__init__(expr)
def convert(self):
return str(super().convert())
def _evaluate(self):
import numexpr as ne
# convert the expression to a valid numexpr expression
s = self.convert()
try:
env = self.expr.env
scope = env.full_scope
truediv = scope['truediv']
_check_ne_builtin_clash(self.expr)
return ne.evaluate(s, local_dict=scope, truediv=truediv)
except KeyError as e:
# python 3 compat kludge
try:
msg = e.message
except AttributeError:
msg = str(e)
raise UndefinedVariableError(msg)
class PythonEngine(AbstractEngine):
"""Evaluate an expression in Python space.
Mostly for testing purposes.
"""
has_neg_frac = False
def __init__(self, expr):
super().__init__(expr)
def evaluate(self):
return self.expr()
def _evaluate(self):
pass
_engines = {'numexpr': NumExprEngine, 'python': PythonEngine}
| 25.006849
| 79
| 0.603944
|
99e1fa07c2d88d2d963a9e7b623ac33db9635c12
| 2,692
|
py
|
Python
|
pathfinder/backend/size_change.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | 1
|
2020-04-21T11:39:25.000Z
|
2020-04-21T11:39:25.000Z
|
pathfinder/backend/size_change.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | 7
|
2020-02-12T01:08:01.000Z
|
2022-02-10T11:56:56.000Z
|
pathfinder/backend/size_change.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | null | null | null |
from typing import Dict
from pathfinder.backend.dice import Dice, DieType
from pathfinder.charsheet.constants import Size
DICE_PROGRESSION_CHART: Dict[Dice, int] = {
Dice(1, DieType.D1): 1,
Dice(1, DieType.D2): 2,
Dice(1, DieType.D3): 3,
Dice(1, DieType.D4): 4,
Dice(1, DieType.D6): 5,
Dice(1, DieType.D8): 6,
Dice(1, DieType.D10): 7,
Dice(2, DieType.D6): 8,
Dice(2, DieType.D8): 9,
Dice(3, DieType.D6): 10,
Dice(3, DieType.D8): 11,
Dice(4, DieType.D6): 12,
Dice(4, DieType.D8): 13,
Dice(6, DieType.D6): 14,
Dice(6, DieType.D8): 15,
Dice(8, DieType.D6): 16,
Dice(8, DieType.D8): 17,
Dice(12, DieType.D6): 18,
Dice(12, DieType.D8): 19,
Dice(16, DieType.D6): 20
}
INVERSE_DICE_PROGRESSION_CHART: Dict[int, Dice] = {v: k for k, v in DICE_PROGRESSION_CHART.items()}
def change_size(increase: bool, damage: Dice, initial_size: Size) -> Dice:
"""
Change the damage of a weapon up or down one size category
:param increase: Whether the size should be increased (True) or decreased (False)
:param damage: The initial damage Dice
:param initial_size: The effective Size of the initial damamge
:return: The changed damage Dice
:raises KeyError: When the rules for increase in size are ill-defined for the requested inputs
This can happen at the extremes (e.g reducing 1d1) or with some amounts of certain die types (e.g. 5d4)
"""
# Handle multiple d10s
if damage.dice >= 2 and damage.die_type == 10:
return Dice(damage.dice * 2 if increase else damage.dice, DieType.D8)
# Handle multiple d4s (the rules are ill-defined for some amounts, e.g. 5d4)
if damage.dice % 2 == 0 and damage.die_type == 4:
damage = Dice(damage.dice // 2, DieType.D8)
elif damage.dice % 3 == 0 and damage.die_type == 4:
damage = Dice((damage.dice // 3) * 2, DieType.D6)
# Handle d12s
if damage.die_type == 12:
damage = Dice(damage.dice * 2, DieType.D6)
original_index = 0
try:
original_index = DICE_PROGRESSION_CHART[damage]
except KeyError as e:
if damage.die_type == 6:
pass # TODO
elif damage.die_type == 8:
pass # TODO
else:
raise e
if increase:
index_change = 2
if initial_size <= Size.SMALL or original_index <= DICE_PROGRESSION_CHART[Dice(1, DieType.D6)]:
index_change = 1
else:
index_change = -2
if initial_size >= Size.MEDIUM or original_index <= DICE_PROGRESSION_CHART[Dice(1, DieType.D8)]:
index_change = -1
return INVERSE_DICE_PROGRESSION_CHART[original_index + index_change]
| 34.512821
| 111
| 0.640416
|
5bce8a96fc34357aefc2eead17a45ec24add2ebb
| 838
|
py
|
Python
|
boa/tests/src/blockchain/SignatureTest.py
|
chisleu/neo-boa
|
799bb37f7e7862215f94f479cfe74a4dd4b8cba2
|
[
"MIT"
] | 2
|
2017-11-27T08:45:34.000Z
|
2021-03-08T03:08:56.000Z
|
boa/tests/src/blockchain/SignatureTest.py
|
chisleu/neo-boa
|
799bb37f7e7862215f94f479cfe74a4dd4b8cba2
|
[
"MIT"
] | 2
|
2018-02-13T07:30:09.000Z
|
2021-06-01T22:02:52.000Z
|
boa/tests/src/blockchain/SignatureTest.py
|
localhuman/neo-boa
|
799bb37f7e7862215f94f479cfe74a4dd4b8cba2
|
[
"MIT"
] | null | null | null |
from boa.code.builtins import verify_signature
from boa.blockchain.vm.Neo.Runtime import CheckWitness, Notify
OWNER_PUBKEY = b'\x02\xf7\xbchi\xdf-\xbew\xa6bE\x11\x16\xcc\x99\x9cx\xc3^\xedA\xa11c\x17\xa3\xef\xe3c@t2'
# this is the ScriptHash of the address that created the contract
# the hex string is b'f223483e4287bd7c3e85a7ca896943179cbbc246'
# the below is the hex version unhexxed and reversed
OWNER_HASH = b'F\xc2\xbb\x9c\x17Ci\x89\xca\xa7\x85>|\xbd\x87B>H#\xf2'
def Main(operation):
"""
:param operation:
:return:
"""
verify = CheckWitness(OWNER_HASH)
if verify:
print("ok!!")
else:
print("not ok!")
Notify(operation)
# not sure of how this works
verify2 = verify_signature(operation, OWNER_PUBKEY)
Notify(verify2) # it returs false for now
return True
| 23.277778
| 105
| 0.700477
|
cb140d5f956694986dc62192aff61de78e7bb108
| 3,315
|
py
|
Python
|
oscn/request/cases.py
|
kendallcorner/oscn
|
6ba2d939b27d0d54af236e14f8a6b3f5f2aa995c
|
[
"MIT"
] | null | null | null |
oscn/request/cases.py
|
kendallcorner/oscn
|
6ba2d939b27d0d54af236e14f8a6b3f5f2aa995c
|
[
"MIT"
] | null | null | null |
oscn/request/cases.py
|
kendallcorner/oscn
|
6ba2d939b27d0d54af236e14f8a6b3f5f2aa995c
|
[
"MIT"
] | null | null | null |
import requests
import warnings
from . import settings
from oscn.parse import append_parsers
oscn_url = settings.OSCN_URL
warnings.filterwarnings("ignore")
class OSCNrequest(object):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9'
}
def __init__(self, type='CF', county='tulsa', year='2018', number=1):
self.type = type
self.county = county
self.year = year
self.number = number
@property
def case_number(self):
return f'{self.type}-{self.year}-{self.number}'
@property
def source(self):
return f'{oscn_url}?db={self.county}&number={self.case_number}'
@property
def text(self):
return self.response.text
def _valid_response(self, resp):
if resp.status_code != 200:
return False
for msg in settings.INVALID_CASE_MESSAGES:
if msg in resp.text:
return False
return True
def _request(self):
params = {'db': self.county, 'number': self.case_number}
response = (
requests.post(oscn_url, params, headers=self.headers, verify=False)
)
if self._valid_response(response):
for msg in settings.UNUSED_CASE_MESSAGES:
if msg in response.text:
self.number += 1
self._request()
self.response = response
return self
else:
return None
# This next line adds properties to the OSCNrequest as a shortcut
# for parsing. This allows access to parse results such as:
# name = OSCNrequest.judge
# or
# counts = OSCNrequest.counts
append_parsers(OSCNrequest)
class Case(OSCNrequest):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._request()
class CaseList(OSCNrequest):
def _convert_str_arg(self, name, args):
if name in args:
if type(args[name]) is str:
# convert str to one element list
args[name] = [args[name]]
def _gen_requests(self):
for county in self.counties:
self.county = county
for year in self.years:
self.year = year
self.number = self.start
while True:
self.number += 1
if self.stop and self.number > self.stop:
break
next_case = self._request()
if next_case:
yield next_case
else:
break
raise StopIteration
def __init__(self, start=0, stop=False, **kwargs):
self.start = start if start == 0 else start-1
self.stop = stop
self._convert_str_arg('county', kwargs)
self._convert_str_arg('year', kwargs)
self.counties = kwargs['county']
self.years = kwargs['year']
self.all_cases = self._gen_requests()
super().__init__(number=self.start, **kwargs)
def __iter__(self):
return self
def __next__(self):
return next(self.all_cases)
| 28.826087
| 145
| 0.572851
|
5d186ebd92623d3ffd32246734ea40230e3cf64c
| 855
|
py
|
Python
|
arrays_strings/permutation/test_permutation_solution.py
|
rubenparedes0796/interactive-coding-challenges
|
4a942ff7a8b4a78052eae7e2b33d129683796570
|
[
"Apache-2.0"
] | 27,173
|
2015-07-06T12:36:05.000Z
|
2022-03-31T23:56:41.000Z
|
arrays_strings/permutation/test_permutation_solution.py
|
govind527/interactive-coding-challenges
|
358f2cc60426d5c4c3d7d580910eec9a7b393fa9
|
[
"Apache-2.0"
] | 143
|
2015-07-07T05:13:11.000Z
|
2021-12-07T17:05:54.000Z
|
arrays_strings/permutation/test_permutation_solution.py
|
govind527/interactive-coding-challenges
|
358f2cc60426d5c4c3d7d580910eec9a7b393fa9
|
[
"Apache-2.0"
] | 4,657
|
2015-07-06T13:28:02.000Z
|
2022-03-31T10:11:28.000Z
|
import unittest
class TestPermutation(unittest.TestCase):
def test_permutation(self, func):
self.assertEqual(func(None, 'foo'), False)
self.assertEqual(func('', 'foo'), False)
self.assertEqual(func('Nib', 'bin'), False)
self.assertEqual(func('act', 'cat'), True)
self.assertEqual(func('a ct', 'ca t'), True)
self.assertEqual(func('dog', 'doggo'), False)
print('Success: test_permutation')
def main():
test = TestPermutation()
permutations = Permutations()
test.test_permutation(permutations.is_permutation)
try:
permutations_alt = PermutationsAlt()
test.test_permutation(permutations_alt.is_permutation)
except NameError:
# Alternate solutions are only defined
# in the solutions file
pass
if __name__ == '__main__':
main()
| 27.580645
| 62
| 0.647953
|
c282f14ff98583ee6a78ba1b309581830c65f705
| 5,998
|
py
|
Python
|
docspring/models/combined_submission_data.py
|
DocSpring/docspring-python
|
5afc7c88bdb52a96346364ff7dd2c7474a73c3de
|
[
"MIT"
] | null | null | null |
docspring/models/combined_submission_data.py
|
DocSpring/docspring-python
|
5afc7c88bdb52a96346364ff7dd2c7474a73c3de
|
[
"MIT"
] | null | null | null |
docspring/models/combined_submission_data.py
|
DocSpring/docspring-python
|
5afc7c88bdb52a96346364ff7dd2c7474a73c3de
|
[
"MIT"
] | 1
|
2020-12-20T05:14:04.000Z
|
2020-12-20T05:14:04.000Z
|
# coding: utf-8
"""
API v1
DocSpring is a service that helps you fill out and sign PDF templates. # noqa: E501
OpenAPI spec version: v1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class CombinedSubmissionData(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'expires_in': 'int',
'metadata': 'object',
'password': 'str',
'submission_ids': 'list[str]',
'test': 'bool'
}
attribute_map = {
'expires_in': 'expires_in',
'metadata': 'metadata',
'password': 'password',
'submission_ids': 'submission_ids',
'test': 'test'
}
def __init__(self, expires_in=None, metadata=None, password=None, submission_ids=None, test=None): # noqa: E501
"""CombinedSubmissionData - a model defined in OpenAPI""" # noqa: E501
self._expires_in = None
self._metadata = None
self._password = None
self._submission_ids = None
self._test = None
self.discriminator = None
if expires_in is not None:
self.expires_in = expires_in
if metadata is not None:
self.metadata = metadata
if password is not None:
self.password = password
self.submission_ids = submission_ids
if test is not None:
self.test = test
@property
def expires_in(self):
"""Gets the expires_in of this CombinedSubmissionData. # noqa: E501
:return: The expires_in of this CombinedSubmissionData. # noqa: E501
:rtype: int
"""
return self._expires_in
@expires_in.setter
def expires_in(self, expires_in):
"""Sets the expires_in of this CombinedSubmissionData.
:param expires_in: The expires_in of this CombinedSubmissionData. # noqa: E501
:type: int
"""
self._expires_in = expires_in
@property
def metadata(self):
"""Gets the metadata of this CombinedSubmissionData. # noqa: E501
:return: The metadata of this CombinedSubmissionData. # noqa: E501
:rtype: object
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this CombinedSubmissionData.
:param metadata: The metadata of this CombinedSubmissionData. # noqa: E501
:type: object
"""
self._metadata = metadata
@property
def password(self):
"""Gets the password of this CombinedSubmissionData. # noqa: E501
:return: The password of this CombinedSubmissionData. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this CombinedSubmissionData.
:param password: The password of this CombinedSubmissionData. # noqa: E501
:type: str
"""
self._password = password
@property
def submission_ids(self):
"""Gets the submission_ids of this CombinedSubmissionData. # noqa: E501
:return: The submission_ids of this CombinedSubmissionData. # noqa: E501
:rtype: list[str]
"""
return self._submission_ids
@submission_ids.setter
def submission_ids(self, submission_ids):
"""Sets the submission_ids of this CombinedSubmissionData.
:param submission_ids: The submission_ids of this CombinedSubmissionData. # noqa: E501
:type: list[str]
"""
if submission_ids is None:
raise ValueError("Invalid value for `submission_ids`, must not be `None`") # noqa: E501
self._submission_ids = submission_ids
@property
def test(self):
"""Gets the test of this CombinedSubmissionData. # noqa: E501
:return: The test of this CombinedSubmissionData. # noqa: E501
:rtype: bool
"""
return self._test
@test.setter
def test(self, test):
"""Sets the test of this CombinedSubmissionData.
:param test: The test of this CombinedSubmissionData. # noqa: E501
:type: bool
"""
self._test = test
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CombinedSubmissionData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.513761
| 116
| 0.588696
|
582d598c3f2e9089bb78a3f2fdac5ecac00c9b17
| 8,010
|
py
|
Python
|
src/modules/hunting/cves.py
|
maorkuriel/kube-hunter
|
e0bacd6e7bd5e728ad3c6010904574adb1687dc5
|
[
"Apache-2.0"
] | 1
|
2019-09-25T12:31:33.000Z
|
2019-09-25T12:31:33.000Z
|
src/modules/hunting/cves.py
|
maorkuriel/kube-hunter
|
e0bacd6e7bd5e728ad3c6010904574adb1687dc5
|
[
"Apache-2.0"
] | null | null | null |
src/modules/hunting/cves.py
|
maorkuriel/kube-hunter
|
e0bacd6e7bd5e728ad3c6010904574adb1687dc5
|
[
"Apache-2.0"
] | null | null | null |
import logging
import json
import requests
from ...core.events import handler
from ...core.events.types import Vulnerability, Event, K8sVersionDisclosure
from ...core.types import Hunter, ActiveHunter, KubernetesCluster, RemoteCodeExec, AccessRisk, InformationDisclosure, \
PrivilegeEscalation, DenialOfService, KubectlClient
from ..discovery.kubectl import KubectlClientEvent
from packaging import version
""" Cluster CVES """
class ServerApiVersionEndPointAccessPE(Vulnerability, Event):
"""Node is vulnerable to critical CVE-2018-1002105"""
def __init__(self, evidence):
Vulnerability.__init__(self, KubernetesCluster, name="Critical Privilege Escalation CVE", category=PrivilegeEscalation)
self.evidence = evidence
class ServerApiVersionEndPointAccessDos(Vulnerability, Event):
"""Node not patched for CVE-2019-1002100. Depending on your RBAC settings, a crafted json-patch could cause a Denial of Service."""
def __init__(self, evidence):
Vulnerability.__init__(self, KubernetesCluster, name="Denial of Service to Kubernetes API Server", category=DenialOfService)
self.evidence = evidence
class PingFloodHttp2Implementation(Vulnerability, Event):
"""Node not patched for CVE-2019-9512. an attacker could cause a Denial of Service by sending specially crafted HTTP requests."""
def __init__(self, evidence):
Vulnerability.__init__(self, KubernetesCluster, name="Possible Ping Flood Attack", category=DenialOfService)
self.evidence = evidence
class ResetFloodHttp2Implementation(Vulnerability, Event):
"""Node not patched for CVE-2019-9514. an attacker could cause a Denial of Service by sending specially crafted HTTP requests."""
def __init__(self, evidence):
Vulnerability.__init__(self, KubernetesCluster, name="Possible Reset Flood Attack", category=DenialOfService)
self.evidence = evidence
class ServerApiClusterScopedResourcesAccess(Vulnerability, Event):
"""Api Server not patched for CVE-2019-11247. API server allows access to custom resources via wrong scope"""
def __init__(self, evidence):
Vulnerability.__init__(self, KubernetesCluster, name="Arbitrary Access To Cluster Scoped Resources", category=PrivilegeEscalation)
self.evidence = evidence
""" Kubectl CVES """
class IncompleteFixToKubectlCpVulnerability(Vulnerability, Event):
"""The kubectl client is vulnerable to CVE-2019-11246, an attacker could potentially execute arbitrary code on the client's machine"""
def __init__(self, binary_version):
Vulnerability.__init__(self, KubectlClient, "Kubectl Vulnerable To CVE-2019-11246", category=RemoteCodeExec)
self.binary_version = binary_version
self.evidence = "kubectl version: {}".format(self.binary_version)
class KubectlCpVulnerability(Vulnerability, Event):
"""The kubectl client is vulnerable to CVE-2019-1002101, an attacker could potentially execute arbitrary code on the client's machine"""
def __init__(self, binary_version):
Vulnerability.__init__(self, KubectlClient, "Kubectl Vulnerable To CVE-2019-1002101", category=RemoteCodeExec)
self.binary_version = binary_version
self.evidence = "kubectl version: {}".format(self.binary_version)
class CveUtils:
@staticmethod
def get_base_release(full_ver):
# if LecacyVersion, converting manually to a base version
if type(full_ver) == version.LegacyVersion:
return version.parse('.'.join(full_ver._version.split('.')[:2]))
else:
return version.parse('.'.join(map(str, full_ver._version.release[:2])))
@staticmethod
def to_legacy(full_ver):
# converting version to verison.LegacyVersion
return version.LegacyVersion('.'.join(map(str, full_ver._version.release)))
@staticmethod
def to_raw_version(v):
if type(v) != version.LegacyVersion:
return '.'.join(map(str, v._version.release))
return v._version
@staticmethod
def version_compare(v1, v2):
"""Function compares two versions, handling differences with convertion to LegacyVersion"""
# getting raw version, while striping 'v' char at the start. if exists.
# removing this char lets us safely compare the two version.
v1_raw, v2_raw = CveUtils.to_raw_version(v1).strip('v'), CveUtils.to_raw_version(v2).strip('v')
new_v1 = version.LegacyVersion(v1_raw)
new_v2 = version.LegacyVersion(v2_raw)
return CveUtils.basic_compare(new_v1, new_v2)
@staticmethod
def basic_compare(v1, v2):
return (v1>v2)-(v1<v2)
@staticmethod
def is_vulnerable(fix_versions, check_version):
"""Function determines if a version is vulnerable, by comparing to given fix versions by base release"""
vulnerable = False
check_v = version.parse(check_version)
base_check_v = CveUtils.get_base_release(check_v)
# default to classic compare, unless the check_version is legacy.
version_compare_func = CveUtils.basic_compare
if type(check_v) == version.LegacyVersion:
version_compare_func = CveUtils.version_compare
if check_version not in fix_versions:
# comparing ease base release for a fix
for fix_v in fix_versions:
fix_v = version.parse(fix_v)
base_fix_v = CveUtils.get_base_release(fix_v)
# if the check version and the current fix has the same base release
if base_check_v == base_fix_v:
# when check_version is legacy, we use a custom compare func, to handle differnces between versions.
if version_compare_func(check_v, fix_v) == -1:
# determine vulnerable if smaller and with same base version
vulnerable = True
break
# if we did't find a fix in the fix releases, checking if the version is smaller that the first fix
if not vulnerable and version_compare_func(check_v, version.parse(fix_versions[0])) == -1:
vulnerable = True
return vulnerable
@handler.subscribe_once(K8sVersionDisclosure)
class K8sClusterCveHunter(Hunter):
"""K8s CVE Hunter
Checks if Node is running a Kubernetes version vulnerable to known CVEs
"""
def __init__(self, event):
self.event = event
def execute(self):
logging.debug('Api Cve Hunter determining vulnerable version: {}'.format(self.event.version))
cve_mapping = {
ServerApiVersionEndPointAccessPE: ["1.10.11", "1.11.5", "1.12.3"],
ServerApiVersionEndPointAccessDos: ["1.11.8", "1.12.6", "1.13.4"],
ResetFloodHttp2Implementation: ["1.13.10", "1.14.6", "1.15.3"],
PingFloodHttp2Implementation: ["1.13.10", "1.14.6", "1.15.3"],
ServerApiClusterScopedResourcesAccess: ["1.13.9", "1.14.5", "1.15.2"]
}
for vulnerability, fix_versions in cve_mapping.items():
if CveUtils.is_vulnerable(fix_versions, self.event.version):
self.publish_event(vulnerability(self.event.version))
@handler.subscribe(KubectlClientEvent)
class KubectlCVEHunter(Hunter):
"""Kubectl CVE Hunter
Checks if the kubectl client is vulnerable to known CVEs
"""
def __init__(self, event):
self.event = event
def execute(self):
cve_mapping = {
KubectlCpVulnerability: ['1.11.9', '1.12.7', '1.13.5' '1.14.0'],
IncompleteFixToKubectlCpVulnerability: ['1.12.9', '1.13.6', '1.14.2']
}
logging.debug('Kubectl Cve Hunter determining vulnerable version: {}'.format(self.event.version))
for vulnerability, fix_versions in cve_mapping.items():
if CveUtils.is_vulnerable(fix_versions, self.event.version):
self.publish_event(vulnerability(binary_version=self.event.version))
| 47.117647
| 140
| 0.691511
|
2abf8b2d50aceaaed72bc9069a0c99290023b534
| 495
|
py
|
Python
|
src/logChunk/chunkingConstants.py
|
Yagniksuchak/CodeParser
|
8bf1a7432aba44e982f94d4525607d738bac658a
|
[
"BSD-3-Clause"
] | 1
|
2016-03-14T01:45:35.000Z
|
2016-03-14T01:45:35.000Z
|
src/logChunk/chunkingConstants.py
|
Yagniksuchak/CodeParser
|
8bf1a7432aba44e982f94d4525607d738bac658a
|
[
"BSD-3-Clause"
] | null | null | null |
src/logChunk/chunkingConstants.py
|
Yagniksuchak/CodeParser
|
8bf1a7432aba44e982f94d4525607d738bac658a
|
[
"BSD-3-Clause"
] | null | null | null |
#Constants for the keywords
SINGLE = "single"
BLOCK = "block"
INCLUDED = "included"
EXCLUDED = "excluded"
KEYLISTSIZE = 3
#Constants for which phase we are in
LOOKFORNAME = 1
LOOKFOREND = 2
LOOKFOREXCP = 3
LOOKFOREXCPEND = 4
#LineTypes
ADD = 1
REMOVE = 2
OTHER = 3
#Scope Change directions
INCREASE = 1
DECREASE = 2
#Comment Change Markers
UNMARKED = 0
UNCHANGED = 1
COMADD = 2
COMDEL = 3
TOTALADD = 4
TOTALDEL = 5
#Label for structures found outside of a function
MOCK = "NO_FUNC_CONTEXT"
| 15
| 49
| 0.735354
|
bcd25bf5cbd01829b08ef433dbbcaa0161f4616d
| 3,982
|
py
|
Python
|
ceilometer/sample.py
|
andymcc/ceilometer
|
fa3b047eb17152b30829eadd9220f12ca9949b4f
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/sample.py
|
andymcc/ceilometer
|
fa3b047eb17152b30829eadd9220f12ca9949b4f
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/sample.py
|
andymcc/ceilometer
|
fa3b047eb17152b30829eadd9220f12ca9949b4f
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Sample class for holding data about a metering event.
A Sample doesn't really do anything, but we need a way to
ensure that all of the appropriate fields have been filled
in by the plugins that create them.
"""
import copy
import uuid
from oslo_config import cfg
from oslo_utils import timeutils
OPTS = [
cfg.StrOpt('sample_source',
default='openstack',
help='Source for samples emitted on this instance.'),
]
# Fields explanation:
#
# Source: the source of this sample
# Name: the name of the meter, must be unique
# Type: the type of the meter, must be either:
# - cumulative: the value is incremented and never reset to 0
# - delta: the value is reset to 0 each time it is sent
# - gauge: the value is an absolute value and is not a counter
# Unit: the unit of the meter
# Volume: the sample value
# User ID: the user ID
# Project ID: the project ID
# Resource ID: the resource ID
# Timestamp: when the sample has been read
# Resource metadata: various metadata
# id: an uuid of a sample, can be taken from API when post sample via API
class Sample(object):
SOURCE_DEFAULT = "openstack"
def __init__(self, name, type, unit, volume, user_id, project_id,
resource_id, timestamp=None, resource_metadata=None,
source=None, id=None):
self.name = name
self.type = type
self.unit = unit
self.volume = volume
self.user_id = user_id
self.project_id = project_id
self.resource_id = resource_id
self.timestamp = timestamp
self.resource_metadata = resource_metadata or {}
self.source = source or self.SOURCE_DEFAULT
self.id = id or str(uuid.uuid1())
def as_dict(self):
return copy.copy(self.__dict__)
def __repr__(self):
return '<name: %s, volume: %s, resource_id: %s, timestamp: %s>' % (
self.name, self.volume, self.resource_id, self.timestamp)
@classmethod
def from_notification(cls, name, type, volume, unit,
user_id, project_id, resource_id,
message, timestamp=None, metadata=None, source=None):
if not metadata:
metadata = (copy.copy(message['payload'])
if isinstance(message['payload'], dict) else {})
metadata['event_type'] = message['event_type']
metadata['host'] = message['publisher_id']
ts = timestamp if timestamp else message['timestamp']
return cls(name=name,
type=type,
volume=volume,
unit=unit,
user_id=user_id,
project_id=project_id,
resource_id=resource_id,
timestamp=ts,
resource_metadata=metadata,
source=source)
def set_timestamp(self, timestamp):
self.timestamp = timestamp
def get_iso_timestamp(self):
return timeutils.parse_isotime(self.timestamp)
def setup(conf):
# NOTE(sileht): Instead of passing the cfg.CONF everywhere in ceilometer
# prepare_service will override this default
Sample.SOURCE_DEFAULT = conf.sample_source
TYPE_GAUGE = 'gauge'
TYPE_DELTA = 'delta'
TYPE_CUMULATIVE = 'cumulative'
TYPES = (TYPE_GAUGE, TYPE_DELTA, TYPE_CUMULATIVE)
| 34.327586
| 79
| 0.65344
|
9b787e9efb86b034c5d70e5b8019a9097ae5fe13
| 8,388
|
py
|
Python
|
scripts/build/builders/android.py
|
rachel-li-jci/connectedhomeip
|
7200076e7c690d9fbad4c1d3d13db1d11d87d259
|
[
"Apache-2.0"
] | null | null | null |
scripts/build/builders/android.py
|
rachel-li-jci/connectedhomeip
|
7200076e7c690d9fbad4c1d3d13db1d11d87d259
|
[
"Apache-2.0"
] | 1
|
2021-06-16T08:38:45.000Z
|
2021-06-16T08:38:45.000Z
|
scripts/build/builders/android.py
|
rachel-li-jci/connectedhomeip
|
7200076e7c690d9fbad4c1d3d13db1d11d87d259
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 Project CHIP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shlex
from enum import Enum, auto
from .builder import Builder
class AndroidBoard(Enum):
ARM = auto()
ARM64 = auto()
X64 = auto()
X86 = auto()
def TargetCpuName(self):
if self == AndroidBoard.ARM:
return 'arm'
elif self == AndroidBoard.ARM64:
return 'arm64'
elif self == AndroidBoard.X64:
return 'x64'
elif self == AndroidBoard.X86:
return 'x86'
else:
raise Exception('Unknown board type: %r' % self)
def AbiName(self):
if self == AndroidBoard.ARM:
return 'armeabi-v7a'
elif self == AndroidBoard.ARM64:
return 'arm64-v8a'
elif self == AndroidBoard.X64:
return 'x86_64'
elif self == AndroidBoard.X86:
return 'x86'
else:
raise Exception('Unknown board type: %r' % self)
class AndroidBuilder(Builder):
def __init__(self, root, runner, board: AndroidBoard):
super(AndroidBuilder, self).__init__(root, runner)
self.board = board
def validate_build_environment(self):
for k in ['ANDROID_NDK_HOME', 'ANDROID_HOME']:
if k not in os.environ:
raise Exception(
'Environment %s missing, cannot build android libraries' % k)
# SDK manager must be runnable to 'accept licenses'
sdk_manager = os.path.join(os.environ['ANDROID_HOME'], 'tools', 'bin',
'sdkmanager')
if not (os.path.isfile(sdk_manager) and os.access(sdk_manager, os.X_OK)):
raise Exception("'%s' is not executable by the current user" %
sdk_manager)
# In order to accept a license, the licenses folder is updated with the hash of the
# accepted license
android_home = os.environ['ANDROID_HOME']
licenses = os.path.join(android_home, 'licenses')
if not os.path.exists(licenses):
# Initial install may not have licenses at all
if not os.access(android_home, os.W_OK):
raise Exception(
"'%s' is NOT writable by the current user (needed to create licenses folder for accept)"
% android_home)
elif not os.access(licenses, os.W_OK):
raise Exception(
"'%s' is NOT writable by the current user (needed to accept licenses)"
% licenses)
def generate(self):
self._Execute([
'python3', 'build/chip/java/tests/generate_jars_for_test.py'
], title='Generating JARs for Java build rules test')
self._Execute([
'python3', 'third_party/android_deps/set_up_android_deps.py'
], title='Setting up Android deps through Gradle')
if not os.path.exists(self.output_dir):
# NRF does a in-place update of SDK tools
if not self._runner.dry_run:
self.validate_build_environment()
gn_args = {}
gn_args['target_os'] = 'android'
gn_args['target_cpu'] = self.board.TargetCpuName()
gn_args['android_ndk_root'] = os.environ['ANDROID_NDK_HOME']
gn_args['android_sdk_root'] = os.environ['ANDROID_HOME']
gn_args['chip_use_clusters_for_ip_commissioning'] = 'true'
args = '--args=%s' % (' '.join([
'%s="%s"' % (key, shlex.quote(value))
for key, value in gn_args.items()
]))
self._Execute([
'gn', 'gen', '--check', '--fail-on-unused-args', self.output_dir, args
],
title='Generating ' + self.identifier)
self._Execute([
'bash', '-c',
'yes | %s/tools/bin/sdkmanager --licenses >/dev/null' %
os.environ['ANDROID_HOME']
],
title='Accepting NDK licenses')
def _build(self):
self._Execute(['ninja', '-C', self.output_dir],
title='Building JNI ' + self.identifier)
# JNILibs will be copied as long as they reside in src/main/jniLibs/ABI:
# https://developer.android.com/studio/projects/gradle-external-native-builds#jniLibs
# to avoid redefined in IDE mode, copy to another place and add that path in build.gradle
# We do NOT use python builtins for copy, so that the 'execution commands' are available
# when using dry run.
jnilibs_dir = os.path.join(
self.root, 'src/android/CHIPTool/app/libs/jniLibs', self.board.AbiName())
libs_dir = os.path.join(self.root, 'src/android/CHIPTool/app/libs')
self._Execute(['mkdir', '-p', jnilibs_dir],
title='Prepare Native libs ' + self.identifier)
# TODO: Runtime dependencies should be computed by the build system rather than hardcoded
# GN supports getting these dependencies like:
# gn desc out/android-x64-chip_tool/ //src/controller/java runtime_deps
# gn desc out/android-x64-chip_tool/ //src/setup_payload/java runtime_deps
# However this assumes that the output folder has been populated, which will not be
# the case for `dry-run` executions. Hence this harcoding here.
#
# If we unify the JNI libraries, libc++_shared.so may not be needed anymore, which could
# be another path of resolving this inconsistency.
for libName in ['libSetupPayloadParser.so', 'libCHIPController.so', 'libc++_shared.so']:
self._Execute(['cp', os.path.join(self.output_dir, 'lib', 'jni', self.board.AbiName(
), libName), os.path.join(jnilibs_dir, libName)])
jars = {
'CHIPController.jar': 'src/controller/java/CHIPController.jar',
'SetupPayloadParser.jar': 'src/setup_payload/java/SetupPayloadParser.jar',
'AndroidPlatform.jar': 'src/platform/android/AndroidPlatform.jar'
}
for jarName in jars.keys():
self._Execute(['cp', os.path.join(
self.output_dir, 'lib', jars[jarName]), os.path.join(libs_dir, jarName)])
# App compilation
self._Execute([
'%s/src/android/CHIPTool/gradlew' % self.root, '-p',
'%s/src/android/CHIPTool' % self.root,
'-PbuildDir=%s' % self.output_dir, 'assembleDebug'
],
title='Building APP ' + self.identifier)
def build_outputs(self):
outputs = {
'CHIPController.jar':
os.path.join(self.output_dir, 'lib',
'src/controller/java/CHIPController.jar'),
'AndroidPlatform.jar':
os.path.join(self.output_dir, 'lib',
'src/platform/android/AndroidPlatform.jar'),
'SetupPayloadParser.jar':
os.path.join(self.output_dir, 'lib',
'src/setup_payload/java/SetupPayloadParser.jar'),
'ChipTool-debug.apk':
os.path.join(self.output_dir, 'outputs', 'apk', 'debug',
'app-debug.apk'),
'jni/%s/libSetupPayloadParser.so' % self.board.AbiName():
os.path.join(self.output_dir, 'lib', 'jni',
self.board.AbiName(), 'libSetupPayloadParser.so'),
'jni/%s/libCHIPController.so' % self.board.AbiName():
os.path.join(self.output_dir, 'lib', 'jni',
self.board.AbiName(), 'libCHIPController.so'),
'jni/%s/libc++_shared.so' % self.board.AbiName():
os.path.join(self.output_dir, 'lib', 'jni',
self.board.AbiName(), 'libc++_shared.so'),
}
return outputs
| 41.731343
| 108
| 0.584645
|
2e194b8d8eba05ea79c0966ccc9cee208fc63a6c
| 3,065
|
py
|
Python
|
extract_notes.py
|
margrietpalm/reveal_tools
|
4e1b9d421fcbab0d2be7a66c0d3690c3c7229804
|
[
"MIT"
] | null | null | null |
extract_notes.py
|
margrietpalm/reveal_tools
|
4e1b9d421fcbab0d2be7a66c0d3690c3c7229804
|
[
"MIT"
] | null | null | null |
extract_notes.py
|
margrietpalm/reveal_tools
|
4e1b9d421fcbab0d2be7a66c0d3690c3c7229804
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Clean up images folder for reveal presentation"""
# rom optparse import OptionParser
import argparse
import sys
import os
import glob
import shutil
import future
from itertools import groupby
from operator import itemgetter
__author__ = "Margriet Palm"
__copyright__ = "Copyright 2018"
__credits__ = "Margriet Palm"
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Margriet Palm"
def parse_args():
# read arguments
# create parser
parser = argparse.ArgumentParser(description='Clean images folders')
parser.add_argument('-q', '--quiet', dest="quiet", action="store_true", help="suppress output")
parser.add_argument('-o', '--outfile', dest='outfile', default='notes.md')
parser.add_argument('--to-pdf', action="store_true", help="convert to pdf")
parser.add_argument('--to-html', action="store_true", help="convert to html")
parser.add_argument('--number-slides', action="store_true")
parser.add_argument('mdfile', nargs='?', default='slides.md', help="path to reveal base dir (default: %(default)s)")
return parser.parse_args()
def get_structure(fn):
f = open(fn)
lines = f.readlines()
blanks = [i for i, l in enumerate(lines) if len(l.strip()) == 0]
sections = {1: []}
section = 1
i0 = 0
for k, g in groupby(enumerate(blanks), lambda x: x[1] - x[0]):
elines = list(map(itemgetter(1), g))
if len(elines) < 2:
continue
text = ''.join([lines[i] for i in range(i0, elines[0])])
sections[section].append(text)
if len(elines) > 2:
section += 1
sections[section] = []
i0 = elines[-1] + 1
return sections
def extract_notes(sections, number_slides=False):
notes = ''
parts = list(sections.keys())
parts.sort()
for p in parts:
notes += '# Part {}\n\n'.format(p)
for i, slide in enumerate(sections[p], 1):
lines = slide.split('\n')
notes += '##'
if number_slides:
notes += '## {}.{} '.format(p, i)
notes += lines[0] + '\n'
inotes = len(lines)
for j, l in enumerate(lines):
if l.startswith('Note:'):
inotes = j
break
for j in range(inotes + 1, len(lines)):
notes += lines[j] + '\n'
notes += '\n'
return notes
def main():
opt = parse_args()
sections = get_structure(opt.mdfile)
notes = extract_notes(sections, opt.number_slides)
f = open(opt.outfile, 'w')
f.write(notes)
f.close()
if opt.to_pdf:
template = '/' + '/'.join(os.path.realpath(__file__).split('/')[1:-1]) + '/templates/eisvogel.tex'
os.system('pandoc {} -o {} --from gfm --template {} '
'--listings'.format(opt.outfile, opt.outfile.replace('.md', '.pdf'), template))
if opt.to_html:
os.system('pandoc {} -o {} --from gf'.format(opt.outfile, opt.outfile.replace('.md', '.html')))
if __name__ == "__main__":
main()
| 31.927083
| 120
| 0.585971
|
637488264a3624069b06a94000425c17f46c7f69
| 713
|
py
|
Python
|
test/conanfile.py
|
kannkyo/boilerplate-conan
|
3621fd0a4dad2bcf746fb6ad3b78f99554708e50
|
[
"MIT"
] | null | null | null |
test/conanfile.py
|
kannkyo/boilerplate-conan
|
3621fd0a4dad2bcf746fb6ad3b78f99554708e50
|
[
"MIT"
] | 1
|
2021-11-05T22:33:34.000Z
|
2021-11-05T22:33:34.000Z
|
test/conanfile.py
|
kannkyo/boilerplate-conan
|
3621fd0a4dad2bcf746fb6ad3b78f99554708e50
|
[
"MIT"
] | null | null | null |
import os
from conans import ConanFile, CMake, tools
class SampleTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
requires = "cunit/2.1.3"
def build(self):
cmake = CMake(self)
# Current dir is "test_package/build/<build_id>" and CMakeLists.txt is
# in "test_package"
cmake.configure()
cmake.build()
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.dylib*", dst="bin", src="lib")
self.copy('*.so*', dst='bin', src='lib')
def test(self):
if not tools.cross_building(self):
os.chdir("bin")
self.run(".%sexample" % os.sep)
| 26.407407
| 78
| 0.570827
|
791b25d466d7f1dc79e978d2eb34a6779a7fc0df
| 4,793
|
py
|
Python
|
lib/gps.py
|
ifurusato/ros
|
77b1361e78f68f00ba2d3e3db908bb5ce0f973f5
|
[
"MIT"
] | 9
|
2020-10-12T08:49:55.000Z
|
2021-07-23T14:20:05.000Z
|
lib/gps.py
|
fanmuzhi/ros
|
04534a35901341c4aaa9084bff3d46851795357d
|
[
"MIT"
] | 12
|
2020-07-22T19:08:58.000Z
|
2022-02-03T03:17:03.000Z
|
lib/gps.py
|
fanmuzhi/ros
|
04534a35901341c4aaa9084bff3d46851795357d
|
[
"MIT"
] | 3
|
2020-07-19T20:43:19.000Z
|
2022-03-02T09:15:51.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: Murray Altheim
# created: 2021-03-06
# modified: 2021-03-09
#
import itertools
from pa1010d import PA1010D, PPS
from colorama import init, Fore, Style
init()
from lib.logger import Level, Logger
class GPS(object):
'''
A wrapper around the PA1010d GPS library. Provides individual
properties for each of the GPS outputs.
'''
def __init__(self, level):
self._log = Logger("gps", level)
self._counter = itertools.count()
self._gps = PA1010D()
# self._gps.set_pps(mode=PPS.ALWAYS)
self._gps.set_pps(mode=PPS.ONLY_2D_3D)
self.clear()
self._log.info('ready.')
# ..........................................................................
def clear(self):
self._timestamp, self._latitude, self._longitude, self._altitude, \
self._sat_count, self._quality, self._speed, self._mf_type, \
self._pdop, self._vdop, self._hdop = (None,)*11
# ..........................................................................
def read(self):
self._count = next(self._counter)
result = self._gps.update()
if result:
_data = self._gps.data
if _data.get('timestamp') == None:
self.clear()
else:
self._timestamp = _data.get('timestamp')
self._latitude = _data.get('latitude')
self._longitude = _data.get('longitude')
self._altitude = _data.get('altitude')
self._sat_count = _data.get('num_sats')
self._quality = _data.get('gps_qual')
self._speed = _data.get('speed_over_ground')
self._mf_type = _data.get('mode_fix_type')
self._pdop = _data.get('pdop')
self._vdop = _data.get('vdop')
self._hdop = _data.get('hdop')
else:
return None
# ..........................................................................
def display(self):
if self._timestamp == None:
self._log.info(Fore.CYAN + ' [{:06d}]'.format(self._count) + Fore.YELLOW + ' GPS returned null: no satellites found.')
else:
try:
_color = Fore.BLUE if self._sat_count == 0 else Fore.YELLOW
self._log.info(Fore.CYAN + ' [{:06d}]'.format(self._count) + _color \
+ Fore.GREEN + ' time: {}; {} sat; q{};'.format(self._timestamp, self._sat_count, self._quality) \
+ Fore.WHITE + ' lati-long: {:6.4f}, {:6.4f}; alt: {:5.2f}m; speed: {}m/s;'.format(self._latitude, self._longitude, self._altitude, self._speed) )
# + Fore.BLACK + ' fix type: {} PDOP: {} VDOP: {} HDOP: {}'.format(self._mf_type, self._pdop, self._vdop, self._hdop) )
except Exception:
pass
# ..........................................................................
@property
def timestamp(self):
return self._timestamp
# ..........................................................................
@property
def latitude(self):
return self._latitude
# ..........................................................................
@property
def longitude(self):
return self._longitude
# ..........................................................................
@property
def altitude(self):
return self._altitude
# ..........................................................................
@property
def satellites(self):
return self._sat_count
# ..........................................................................
@property
def quality(self):
return self._quality
# ..........................................................................
@property
def speed(self):
return self._speed
# ..........................................................................
@property
def mode_fix_type(self):
return self._mf_type
# ..........................................................................
@property
def pdop(self):
return self._pdop
# ..........................................................................
@property
def vdop(self):
return self._vdop
# ..........................................................................
@property
def hdop(self):
return self._hdop
#EOF
| 35.768657
| 170
| 0.440851
|
3ae332081debc19c5bfb920e480a08600d912fbb
| 10,064
|
py
|
Python
|
src/storage-blob-preview/azext_storage_blob_preview/tests/latest/test_storage_blob_live_scenarios.py
|
ConnectionMaster/azure-cli-extensions
|
08d184f4efeac397c1ffcd21a83d651f4fad2782
|
[
"MIT"
] | 2
|
2021-06-05T17:51:26.000Z
|
2021-11-17T11:17:56.000Z
|
src/storage-blob-preview/azext_storage_blob_preview/tests/latest/test_storage_blob_live_scenarios.py
|
ConnectionMaster/azure-cli-extensions
|
08d184f4efeac397c1ffcd21a83d651f4fad2782
|
[
"MIT"
] | 1
|
2020-06-12T01:39:40.000Z
|
2020-06-12T01:39:40.000Z
|
src/storage-blob-preview/azext_storage_blob_preview/tests/latest/test_storage_blob_live_scenarios.py
|
ConnectionMaster/azure-cli-extensions
|
08d184f4efeac397c1ffcd21a83d651f4fad2782
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
from datetime import datetime, timedelta
from azure.cli.testsdk import (LiveScenarioTest, ResourceGroupPreparer, StorageAccountPreparer,
JMESPathCheck, JMESPathCheckExists, NoneCheck, api_version_constraint)
from azure.cli.core.profiles import ResourceType
from ..storage_test_util import StorageScenarioMixin
@api_version_constraint(ResourceType.MGMT_STORAGE, min_api='2016-12-01')
class StorageBlobUploadLiveTests(LiveScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_128mb_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 128 * 1024, 'block')
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_64mb_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 64 * 1024, 'block')
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_256mb_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 256 * 1024, 'block')
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_1G_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 1024 * 1024, 'block')
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_2G_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 2 * 1024 * 1024,
'block')
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload_10G_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 10 * 1024 * 1024,
'block', skip_download=True)
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_page_blob_upload_10G_file(self, resource_group, storage_account):
self.verify_blob_upload_and_download(resource_group, storage_account, 10 * 1024 * 1024,
'page', skip_download=True)
def verify_blob_upload_and_download(self, group, account, file_size_kb, blob_type,
skip_download=False):
container = self.create_random_name(prefix='cont', length=24)
local_dir = self.create_temp_dir()
local_file = self.create_temp_file(file_size_kb, full_random=True)
blob_name = self.create_random_name(prefix='blob', length=24)
account_key = self.cmd('storage account keys list -n {} -g {} --query "[0].value" -otsv'
.format(account, group)).output
self.set_env('AZURE_STORAGE_ACCOUNT', account)
self.set_env('AZURE_STORAGE_KEY', account_key)
self.cmd('storage container create -n {}'.format(container))
self.cmd('storage blob exists -n {} -c {}'.format(blob_name, container),
checks=JMESPathCheck('exists', False))
self.cmd('storage blob upload -c {} -f "{}" -n {} --type {}'
.format(container, local_file, blob_name, blob_type))
self.cmd('storage blob exists -n {} -c {}'.format(blob_name, container),
checks=JMESPathCheck('exists', True))
self.cmd('storage blob show -n {} -c {}'.format(blob_name, container),
checks=[JMESPathCheck('properties.contentLength', file_size_kb * 1024),
JMESPathCheckExists('properties.pageRanges') if blob_type == 'page' else
JMESPathCheck('properties.pageRanges', None)])
if not skip_download:
downloaded = os.path.join(local_dir, 'test.file')
self.cmd('storage blob download -n {} -c {} --file "{}"'
.format(blob_name, container, downloaded))
self.assertTrue(os.path.isfile(downloaded), 'The file is not downloaded.')
self.assertEqual(file_size_kb * 1024, os.stat(downloaded).st_size,
'The download file size is not right.')
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_upload(self, resource_group, storage_account):
from azure.cli.core.azclierror import AzureResponseError
file_size_kb = 128
local_file = self.create_temp_file(file_size_kb, full_random=True)
container = self.create_random_name(prefix='cont', length=10)
blob_name = self.create_random_name(prefix='blob', length=24)
account_key = self.cmd('storage account keys list -n {} -g {} --query "[0].value" -otsv'
.format(storage_account, resource_group)).output
self.set_env('AZURE_STORAGE_ACCOUNT', storage_account)
self.set_env('AZURE_STORAGE_KEY', account_key)
self.cmd('storage container create -n {}'.format(container))
# test upload through file path
self.cmd('storage blob upload -c {} -f "{}" -n {}'.format(container, local_file, blob_name))
self.cmd('storage blob exists -n {} -c {}'.format(blob_name, container), checks=JMESPathCheck('exists', True))
self.cmd('storage blob show -n {} -c {}'.format(blob_name, container), checks=[
JMESPathCheck('properties.contentLength', file_size_kb * 1024),
JMESPathCheck('name', blob_name)])
# test upload from data
self.cmd('storage blob upload -c {} --data {} --length 4 -n {} --overwrite'.format(
container, "test", blob_name))
self.cmd('storage blob show -n {} -c {}'.format(blob_name, container), checks=[
JMESPathCheck('properties.contentLength', 4),
JMESPathCheck('name', blob_name)])
class StorageBlobURLScenarioTest(StorageScenarioMixin, LiveScenarioTest):
@ResourceGroupPreparer(name_prefix='clitest')
@StorageAccountPreparer(kind='StorageV2', name_prefix='clitest', location='eastus2euap')
def test_storage_blob_url_scenarios(self, resource_group, storage_account):
account_info = self.get_account_info(resource_group, storage_account)
container = self.create_container(account_info, prefix="con1")
local_file1 = self.create_temp_file(128)
local_file2 = self.create_temp_file(64)
blob_name1 = "/".join(["dir", self.create_random_name(prefix='blob', length=24)])
# set delete-policy to enable soft-delete
self.storage_cmd('storage blob service-properties delete-policy update --enable true --days-retained 2',
account_info)
self.storage_cmd('storage blob service-properties delete-policy show',
account_info).assert_with_checks(JMESPathCheck('enabled', True),
JMESPathCheck('days', 2))
# Prepare blob
self.storage_cmd('storage blob upload -c {} -f "{}" -n {} ', account_info,
container, local_file1, blob_name1)
expiry = (datetime.utcnow() + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ')
blob_uri = self.storage_cmd('storage blob generate-sas -n {} -c {} --expiry {} --permissions '
'rwad --https-only --full-uri -o tsv',
account_info, blob_name1, container, expiry).output.strip()
self.cmd('storage blob exists --blob-url {}'.format(blob_uri), checks=JMESPathCheck('exists', True))
self.cmd('storage blob show --blob-url {}'.format(blob_uri), checks=[
JMESPathCheck('name', blob_name1),
JMESPathCheck('properties.contentLength', 128 * 1024),
JMESPathCheck('properties.blobTier', 'Hot')])
self.cmd('storage blob upload -f "{}" --blob-url {} --overwrite'.format(local_file2, blob_uri))
self.cmd('storage blob show --blob-url {}'.format(blob_uri), checks=[
JMESPathCheck('name', blob_name1),
JMESPathCheck('properties.contentLength', 64 * 1024)])
local_dir = self.create_temp_dir()
downloaded = os.path.join(local_dir, 'test.file')
self.cmd('storage blob download --blob-url {} -f "{}"'.format(blob_uri, downloaded))
self.assertTrue(os.path.isfile(downloaded), 'The file is not downloaded.')
self.assertEqual(64 * 1024, os.stat(downloaded).st_size,
'The download file size is not right.')
self.cmd('storage blob set-tier --blob-url {} --tier Cool'.format(blob_uri))
self.cmd('storage blob show --blob-url {}'.format(blob_uri), checks=[
JMESPathCheck('name', blob_name1),
JMESPathCheck('properties.contentLength', 64 * 1024),
JMESPathCheck('properties.blobTier', "Cool")])
self.cmd('storage blob snapshot --blob-url {}'.format(blob_uri),
checks=JMESPathCheckExists('snapshot'))
self.storage_cmd('storage blob list -c {}', account_info, container)\
.assert_with_checks(JMESPathCheck('length(@)', 1))
self.cmd('storage blob delete --blob-url {} --delete-snapshots include '.format(blob_uri))
self.storage_cmd('storage blob list -c {}', account_info, container).assert_with_checks(
JMESPathCheck('length(@)', 0))
self.cmd('storage blob undelete --blob-url {} '.format(blob_uri))
self.storage_cmd('storage blob list -c {}', account_info, container).assert_with_checks(
JMESPathCheck('length(@)', 1))
| 54.107527
| 118
| 0.641494
|
e7a19b279b17bf722890f1c9d4e35480da127ad5
| 1,149
|
py
|
Python
|
setup.py
|
pannkotsky/graphene-field-permission
|
75306a09e89949915cc82ca32aa2eca88d3f7016
|
[
"MIT"
] | 9
|
2019-02-23T05:09:14.000Z
|
2022-03-11T00:22:36.000Z
|
setup.py
|
pannkotsky/graphene-field-permission
|
75306a09e89949915cc82ca32aa2eca88d3f7016
|
[
"MIT"
] | null | null | null |
setup.py
|
pannkotsky/graphene-field-permission
|
75306a09e89949915cc82ca32aa2eca88d3f7016
|
[
"MIT"
] | 2
|
2020-12-07T22:32:10.000Z
|
2022-01-25T15:36:03.000Z
|
import setuptools
from os import sys
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
with open('README.md') as readme:
long_description = readme.read()
setuptools.setup(
name="graphene-field-permission",
version="1.0.0",
author="Dave O'Connor",
author_email="github@dead-pixels.org",
description="A package to add field permission support for Graphene",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/daveoconnor/graphene-field-permission",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
tests_require=["pytest"],
setup_requires=[
"setuptools>=57.1.0",
"twine>=3.4.1",
] + pytest_runner,
)
| 31.916667
| 73
| 0.651871
|
d4818cf6e04e375a9dbddea8d4363e96e8a47060
| 927
|
py
|
Python
|
example/conftest.py
|
Euraxluo/dynamic_config
|
7c6e0881159e5334f44b70eb704eb179d03f9287
|
[
"MIT"
] | null | null | null |
example/conftest.py
|
Euraxluo/dynamic_config
|
7c6e0881159e5334f44b70eb704eb179d03f9287
|
[
"MIT"
] | null | null | null |
example/conftest.py
|
Euraxluo/dynamic_config
|
7c6e0881159e5334f44b70eb704eb179d03f9287
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Time: 2022-02-11 11:59
# Copyright (c) 2022
# author: Euraxluo
import redis # type:ignore
import json
from typing import Union, Callable
class RedisHelper(object):
def __init__(self, host='127.0.0.1', port='6379', db=1, password='redis', decode_responses=False):
redis.ConnectionPool()
self.pool = redis.ConnectionPool(host=host, port=port, db=db, password=password, decode_responses=decode_responses)
self.r = redis.Redis(connection_pool=self.pool)
def rdb(self) -> redis.Redis:
return self.r
@staticmethod
def encode(data: dict, default: dict = {}):
if data:
return json.dumps(data)
return json.dumps(default)
@staticmethod
def decode(data: Union[str, bytes], instance: Callable = str):
if data:
return json.loads(data)
return instance().__dict__()
rdb = RedisHelper().rdb()
| 25.75
| 123
| 0.644013
|
78ac0640d93b59b3b4d5477cdf9990558d888a26
| 11,255
|
py
|
Python
|
src/de5000.py
|
4x1md/de5000_lcr_python
|
abfbe5d605f0e9d6928119f4be904a382d0c14a0
|
[
"MIT"
] | 8
|
2018-08-08T22:52:21.000Z
|
2021-06-10T07:29:40.000Z
|
src/de5000.py
|
4x1md/de5000_lcr_python
|
abfbe5d605f0e9d6928119f4be904a382d0c14a0
|
[
"MIT"
] | 1
|
2019-03-29T19:23:28.000Z
|
2019-03-29T19:23:28.000Z
|
src/de5000.py
|
4x1md/de5000_lcr_python
|
abfbe5d605f0e9d6928119f4be904a382d0c14a0
|
[
"MIT"
] | 2
|
2018-08-08T22:52:24.000Z
|
2019-03-31T00:26:16.000Z
|
'''
Created on Sep 15, 2017
@author: 4x1md
Serial port settings: 9600 8N1 DTR=1 RTS=0
'''
import serial
# Settings constants
BAUD_RATE = 9600
BITS = serial.EIGHTBITS
PARITY = serial.PARITY_NONE
STOP_BITS = serial.STOPBITS_ONE
TIMEOUT = 1
# Data packet ends with CR LF (\r \n) characters
EOL = b'\x0D\x0A'
RAW_DATA_LENGTH = 17
READ_RETRIES = 3
# Cyrustek ES51919 protocol constants
# Byte 0x02: flags
# bit 0 = hold enabled
HOLD = 0b00000001
# bit 1 = reference shown (in delta mode)
REF_SHOWN = 0b00000010
# bit 2 = delta mode
DELTA = 0b00000100
# bit 3 = calibration mode
CAL = 0b00001000
# bit 4 = sorting mode
SORTING = 0b00010000
# bit 5 = LCR mode
LCR_AUTO = 0b00100000
# bit 6 = auto mode
AUTO_RANGE = 0b01000000
# bit 7 = parallel measurement (vs. serial)
PARALLEL = 0b10000000
# Byte 0x03 bits 5-7: Frequency
FREQ = [
'100 Hz',
'120 Hz',
'1 KHz',
'10 KHz',
'100 KHz',
'DC'
]
# Byte 0x04: tolerance
TOLERANCE = [
None,
None, None,
'+-0.25%',
'+-0.5%',
'+-1%',
'+-2%',
'+-5%',
'+-10%',
'+-20%',
'-20+80%',
]
# Byte 0x05: primary measured quantity (serial and parallel mode)
MEAS_QUANTITY_SER = [None, 'Ls', 'Cs', 'Rs', 'DCR']
MEAS_QUANTITY_PAR = [None, 'Lp', 'Cp', 'Rp', 'DCR']
# Bytes 0x08, 0x0D bits 3-7: Units
MAIN_UNITS = [
'',
'Ohm',
'kOhm',
'MOhm',
None,
'uH',
'mH',
'H',
'kH',
'pF',
'nF',
'uF',
'mF',
'%',
'deg',
None, None, None, None, None, None
]
# Bytes 0x09, 0x0E bits 0-3: Measurement display status
STATUS = [
'normal',
'blank',
'----',
'OL',
None, None, None,
'PASS',
'FAIL',
'OPEn',
'Srt'
]
# Byte 0x0a: secondary measured quantity
SEC_QUANTITY = [
None,
'D',
'Q',
'ESR',
'Theta'
]
RP = 'RP'
# Output format
MEAS_RES = {
'main_quantity': None,
'main_val': None,
'main_units': None,
'main_status': None,
'main_norm_val': None,
'main_norm_units': None,
'sec_quantity': None,
'sec_val': None,
'sec_units': None,
'sec_status': None,
'sec_norm_val': None,
'sec_norm_units': None,
'freq': None,
'tolerance': None,
'ref_shown': False,
'delta_mode': False,
'cal_mode': False,
'sorting_mode': False,
'lcr_auto': False,
'auto_range': False,
'parallel': False,
'data_valid': False
}
# Normalization constants
# Each value contains multiplier and target value
NORMALIZE_RULES = {
'': (1, ''),
'Ohm': (1, 'Ohm'),
'kOhm': (1E3, 'Ohm'),
'MOhm': (1E6, 'Ohm'),
'uH': (1E-6, 'H'),
'mH': (1E-3, 'H'),
'H': (1, 'H'),
'kH': (1E3, 'H'),
'pF': (1E-12, 'F'),
'nF': (1E-9, 'F'),
'uF': (1E-6, 'F'),
'mF': (1E-3, 'F'),
'%': (1, '%'),
'deg': (1, 'deg')
}
class DE5000(object):
def __init__(self, port):
self._port = port
self._ser = serial.Serial(self._port, BAUD_RATE, BITS, PARITY, STOP_BITS, timeout=TIMEOUT)
self._ser.setDTR(True)
self._ser.setRTS(False)
def read_raw_data(self):
'''Reads a new data packet from serial port.
If the packet was valid returns array of integers.
if the packet was not valid returns empty array.
In order to get the last reading the input buffer is flushed
before reading any data.
If the first received packet contains less than 17 bytes, it is
not complete and the reading is done again. Maximum number of
retries is defined by READ_RETRIES value.
'''
self._ser.reset_input_buffer()
retries = 0
while retries < READ_RETRIES:
raw_data = self._ser.read_until(EOL, RAW_DATA_LENGTH)
# If 17 bytes were read, the packet is valid and the loop ends.
if len(raw_data) == RAW_DATA_LENGTH:
break
retries += 1
res = []
# Check data validity
if self.is_data_valid(raw_data):
res = [ord(c) for c in raw_data]
return res
def is_data_valid(self, raw_data):
'''Checks data validity:
1. 17 bytes long
2. Header bytes 0x00 0x0D
3. Footer bytes 0x0D 0x0A'''
# Data length
if len(raw_data) != RAW_DATA_LENGTH:
return False
# Start bits
if raw_data[0] != '\x00' or raw_data[1] != '\x0D':
return False
# End bits
if raw_data[15] != '\x0D' or raw_data[16] != '\x0A':
return False
return True
def read_hex_str_data(self):
'''Returns raw data represented as string with hexadecimal values.'''
data = self.read_raw_data()
codes = ["0x%02X" % c for c in data]
return " ".join(codes)
def get_meas(self):
'''Returns received measurement as dictionary'''
res = MEAS_RES.copy()
raw_data = self.read_raw_data()
# If raw data is empty, return
if len(raw_data) == 0:
res['data_valid'] = False
return res
# Frequency
val = raw_data[0x03]
val &= 0b11100000
val = val >> 5
res['freq'] = FREQ[val]
# Reference shown
val = raw_data[0x02]
val &= REF_SHOWN
res['ref_shown'] = True if val else False
# Delta mode
val = raw_data[0x02]
val &= DELTA
res['delta_mode'] = True if val else False
# Calibration mode
val = raw_data[0x02]
val &= CAL
res['cal_mode'] = True if val else False
# Sorting mode
val = raw_data[0x02]
val &= SORTING
res['sorting_mode'] = True if val else False
# LCR AUTO mode
val = raw_data[0x02]
val &= LCR_AUTO
res['lcr_auto'] = True if val else False
# Auto range
val = raw_data[0x02]
val &= AUTO_RANGE
res['auto_range'] = True if val else False
# Parallel measurement
val = raw_data[0x02]
val &= PARALLEL
res['parallel'] = True if val else False
# Main measurement
# Status
val = raw_data[0x09]
val &= 0b00001111
res['main_status'] = STATUS[val]
# Quantity
val = raw_data[0x05]
if res['parallel']:
res['main_quantity'] = MEAS_QUANTITY_PAR[val]
else:
res['main_quantity'] = MEAS_QUANTITY_SER[val]
# Value
val = raw_data[0x06] * 0x100 + raw_data[0x07]
mul = raw_data[0x08]
mul &= 0b00000111
val = val * 10**-mul
res['main_val'] = val
# Units
val = raw_data[0x08]
val &= 0b11111000
val = val >> 3
res['main_units'] = MAIN_UNITS[val]
# Normalize value
nval = self.normalize_val(res['main_val'], res['main_units'])
res['main_norm_val'] = nval[0]
res['main_norm_units'] = nval[1]
# Secondary measurement
# Status
val = raw_data[0x0E]
val &= 0b00000111
res['sec_status'] = STATUS[val]
# Quantity
val = raw_data[0x0A]
if res['parallel'] and val == 0x03:
res['sec_quantity'] = RP
else:
res['sec_quantity'] = SEC_QUANTITY[val]
# Units
val = raw_data[0x0D]
val &= 0b11111000
val = val >> 3
res['sec_units'] = MAIN_UNITS[val]
# Value
val = raw_data[0x0B] * 0x100 + raw_data[0x0C]
'''If units are % or deg, the value may be negative which is
represented in two's complement form.
In this case if the highest bit is 1, the value should be converted
to negative bu substracting it from 0x10000.'''
if res['sec_units'] in ('%', 'deg') and val & 0x1000:
val = val - 0x10000
mul = raw_data[0x0D]
mul &= 0b00000111
val = val * 10**-mul
res['sec_val'] = val
# Normalize value
nval = self.normalize_val(res['sec_val'], res['sec_units'])
res['sec_norm_val'] = nval[0]
res['sec_norm_units'] = nval[1]
# Tolerance
val = raw_data[0x04]
res['tolerance'] = TOLERANCE[val]
res['data_valid'] = True
return res
def normalize_val(self, val, units):
'''Normalizes measured value to standard units. Resistance
is normalized to Ohm, capacitance to Farad and inductance
to Henry. Other units are not changed.'''
val = val * NORMALIZE_RULES[units][0]
units = NORMALIZE_RULES[units][1]
return (val, units)
def pretty_print(self, disp_norm_val = False):
'''Prints measurement details in pretty print.
disp_norm_val: if True, normalized values will also be displayed.
'''
data = self.get_meas()
if data['data_valid'] == False:
print "DE-5000 is not connected."
return
# In calibration mode frequency is not displayed.
if data['cal_mode']:
print "Calibration"
else:
if data['sorting_mode']:
print "SORTING Tol %s" % data['tolerance']
print "Frequency: %s" % data['freq']
# LCR autodetection mode
if data['lcr_auto']:
print "LCR AUTO"
# Auto range
if data['auto_range']:
print "AUTO RNG"
# Delta mode parameters
if data['delta_mode']:
if data['ref_shown']:
print "DELTA Ref"
else:
print "DELTA"
# Main display
if data['main_status'] == 'normal':
print "%s = %s %s" % (data['main_quantity'], data['main_val'], data['main_units'])
elif data['main_status'] == 'blank':
print
else:
print data['main_status']
# Secondary display
if data['sec_status'] == 'normal':
if data['sec_quantity'] is not None:
print "%s = %s %s" % (data['sec_quantity'], data['sec_val'], data['sec_units'])
else:
print "%s %s" % (data['sec_val'], data['sec_units'])
elif data['sec_status'] == 'blank':
print
else:
print data['sec_status']
# Display normalized values
# If measurement status is not normal, ---- will be displayed.
if disp_norm_val:
if data['main_status'] == 'normal':
print "Primary: %s %s" % (data['main_norm_val'], data['main_norm_units'])
else:
print "Primary: ----"
if data['sec_status'] == 'normal':
print "Secondary: %s %s" % (data['sec_norm_val'], data['sec_norm_units'])
else:
print "Secondary: ----"
def __del__(self):
if hasattr(self, '_ser'):
self._ser.close()
if __name__ == '__main__':
pass
| 26.358314
| 99
| 0.525722
|
d693426256bdf2e623fcb3060e05c85caa7828d1
| 6,196
|
py
|
Python
|
parlai/core/testing_utils.py
|
cherie11/ParlAI
|
1c1e4b00b398278b652c24ed5cac072cff6a9c9a
|
[
"MIT"
] | 3
|
2020-12-04T07:29:18.000Z
|
2021-04-08T06:23:20.000Z
|
parlai/core/testing_utils.py
|
cherie11/ParlAI
|
1c1e4b00b398278b652c24ed5cac072cff6a9c9a
|
[
"MIT"
] | null | null | null |
parlai/core/testing_utils.py
|
cherie11/ParlAI
|
1c1e4b00b398278b652c24ed5cac072cff6a9c9a
|
[
"MIT"
] | 1
|
2020-12-04T07:29:04.000Z
|
2020-12-04T07:29:04.000Z
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
General utilities for helping writing ParlAI unit and integration tests.
"""
import os
import unittest
import contextlib
import tempfile
import shutil
import io
try:
import torch
TORCH_AVAILABLE = True
GPU_AVAILABLE = torch.cuda.device_count() > 0
except ImportError:
TORCH_AVAILABLE = False
GPU_AVAILABLE = False
try:
import git
git_ = git.Git()
GIT_AVAILABLE = True
except ImportError:
git_ = None
GIT_AVAILABLE = False
DEBUG = False # change this to true to print to stdout anyway
def skipUnlessTorch(testfn, reason='pytorch is not installed'):
"""Decorator for skipping a test if torch is not installed."""
return unittest.skipUnless(TORCH_AVAILABLE, reason)(testfn)
def skipIfGPU(testfn, reason='Test is CPU-only'):
"""
Decorator for skipping a test if a GPU is available.
Useful for disabling hogwild tests.
"""
return unittest.skipIf(GPU_AVAILABLE, reason)(testfn)
def skipUnlessGPU(testfn, reason='Test requires a GPU'):
"""Decorator for skipping a test if no GPU is available."""
return unittest.skipUnless(GPU_AVAILABLE, reason)(testfn)
def skipIfTravis(testfn, reason='Test disabled in Travis'):
"""Decorator for skipping a test if running on Travis."""
return unittest.skipIf(os.environ.get('TRAVIS'), reason)(testfn)
class retry(object):
"""
Decorator for flaky tests. Test is run up to ntries times, retrying on failure.
On the last time, the test will simply fail.
>>> @retry(ntries=10)
... def test_flaky(self):
... import random
... self.assertLess(0.5, random.random())
"""
def __init__(self, ntries=3):
self.ntries = ntries
def __call__(self, testfn):
from functools import wraps
@wraps(testfn)
def _wrapper(testself, *args, **kwargs):
for i in range(self.ntries - 1):
try:
return testfn(testself, *args, **kwargs)
except testself.failureException:
pass
# last time, actually throw any errors there may be
return testfn(testself, *args, **kwargs)
return _wrapper
def git_ls_files(root=None, skip_nonexisting=True):
"""
List all files tracked by git.
"""
filenames = git_.ls_files(root).split('\n')
if skip_nonexisting:
filenames = [fn for fn in filenames if os.path.exists(fn)]
return filenames
def git_ls_dirs(root=None):
"""
Lists all folders tracked by git.
"""
dirs = set()
for fn in git_ls_files(root):
dirs.add(os.path.dirname(fn))
return list(dirs)
def git_changed_files(skip_nonexisting=True):
"""
Lists all the changed files in the git repository.
"""
fork_point = git_.merge_base('--fork-point', 'origin/master').strip()
filenames = git_.diff('--name-only', fork_point).split('\n')
if skip_nonexisting:
filenames = [fn for fn in filenames if os.path.exists(fn)]
return filenames
@contextlib.contextmanager
def capture_output():
"""
Context manager which suppresses all stdout and stderr, and combines them
into a single io.StringIO.
:returns: the output
:rtype: io.StringIO
>>> with capture_output() as output:
... print('hello')
>>> output.getvalue()
'hello'
"""
if DEBUG:
yield
else:
sio = io.StringIO()
with contextlib.redirect_stdout(sio), contextlib.redirect_stderr(sio):
yield sio
@contextlib.contextmanager
def tempdir():
"""
Simple wrapper for creating a temporary directory.
>>> with tempdir() as tmpdir:
... print(tmpdir) # prints a folder like /tmp/randomname
"""
d = tempfile.mkdtemp()
yield d
shutil.rmtree(d)
def train_model(opt):
"""
Runs through a TrainLoop.
If model_file is not in opt, then this helper will create a temporary
directory to store the model, dict, etc.
:return: (stdout, stderr, valid_results, test_results)
:rtype: (str, str, dict, dict)
"""
import parlai.scripts.train_model as tms
with capture_output() as output:
with tempdir() as tmpdir:
if 'model_file' not in opt:
opt['model_file'] = os.path.join(tmpdir, 'model')
if 'dict_file' not in opt:
opt['dict_file'] = os.path.join(tmpdir, 'model.dict')
parser = tms.setup_args()
parser.set_params(**opt)
popt = parser.parse_args(print_args=False)
tl = tms.TrainLoop(popt)
valid, test = tl.train()
return (
output.getvalue(),
valid,
test,
)
def eval_model(opt):
"""
Runs through an evaluation loop.
:return: (stdout, stderr, valid_results, test_results)
:rtype: (str, str, dict, dict)
If model_file is not in opt, then this helper will create a temporary directory
to store the model files, and clean up afterwards. You can keep the directory
by disabling autocleanup
"""
import parlai.scripts.eval_model as ems
parser = ems.setup_args()
parser.set_params(**opt)
popt = parser.parse_args(print_args=False)
if popt.get('model_file') and not popt.get('dict_file'):
popt['dict_file'] = popt['model_file'] + '.dict'
with capture_output() as output:
popt['datatype'] = 'valid'
valid = ems.eval_model(popt)
popt['datatype'] = 'test'
test = ems.eval_model(popt)
return (
output.getvalue(),
valid,
test,
)
def download_unittest_models():
from parlai.core.params import ParlaiParser
from parlai.core.build_data import download_models
opt = ParlaiParser().parse_args(print_args=False)
model_filenames = [
'seq2seq.tar.gz',
'transformer_ranker.tar.gz',
'transformer_generator.tar.gz'
]
with capture_output() as _:
download_models(opt, model_filenames, 'unittest')
| 26.706897
| 83
| 0.641059
|
cabd876d0a79abfc7f8a1a79c68e05c0438d9b8a
| 2,230
|
py
|
Python
|
Data collection/scriptSf.py
|
PAMepi/PAMepi_scripts_datalake
|
3b268a09c69c5120fa71611983e23f224df0a5e3
|
[
"MIT"
] | 2
|
2021-12-07T09:00:47.000Z
|
2022-03-30T20:39:33.000Z
|
Data collection/scriptSf.py
|
PAMepi/PAMepi_scripts_datalake
|
3b268a09c69c5120fa71611983e23f224df0a5e3
|
[
"MIT"
] | null | null | null |
Data collection/scriptSf.py
|
PAMepi/PAMepi_scripts_datalake
|
3b268a09c69c5120fa71611983e23f224df0a5e3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import pathlib
from datetime import datetime
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
from config import DATALAKE
raw = 'raw_data_covid19_version-' + datetime.now().strftime('%Y-%m-%d')
output_sg = os.path.join(DATALAKE, raw, 'data-notificacao_sindrome_gripal')
url = 'https://opendatasus.saude.gov.br/dataset/'
list_endpoints = [
'notificacoes-de-sindrome-gripal-leve-2020',
'notificacoes-de-sindrome-gripal-leve-2021',
'notificacoes-de-sindrome-gripal-leve-2022',
]
ufs = [
'AC', 'AP', 'AM', 'PA', 'RO', 'RR', 'TO',
'AL', 'BA', 'CE', 'MA', 'PB', 'PE', 'PI',
'RN', 'SE', 'DF', 'GO', 'MT', 'MS', 'ES',
'MG', 'RJ', 'SP', 'PR', 'RS', 'SC'
]
for endpoint in list_endpoints:
part_parth = endpoint.replace('notificacoes-de-', '')
folder_dataset = os.path.join(output_sg, part_parth)
pathlib.Path(folder_dataset).mkdir(parents=True, exist_ok=True)
r = requests.get(os.path.join(url, endpoint))
soup = BeautifulSoup(r.text, 'html.parser')
tag_a = soup.findAll('a')
list_uf_text = list(map(lambda x: 'Dados ' + x, ufs))
data_url = {}
for tag in tag_a:
string = tag.text.lstrip('\n').rstrip('\n').lstrip(' ').rstrip(' ')[0:8]
for string_uf in list_uf_text:
if string_uf == string:
href = tag['href']
data_url[string] = href.lstrip('/dataset/')
for csv_url in data_url.values():
r = requests.get(os.path.join(url, csv_url))
soup = BeautifulSoup(r.text, 'html.parser')
tag_a = soup.findAll('a')
for tag in tag_a:
if tag['href'].endswith('.csv'):
file_csv = requests.get(tag['href'], stream=True)
with open(os.path.join(folder_dataset, tag.text), 'wb') as f, tqdm(
desc=tag.text,
total=int(file_csv.headers['Content-Length']),
unit='iB',
unit_scale=True,
unit_divisor=1024
) as bar:
for content in file_csv.iter_content(chunk_size=1024):
size = f.write(content)
bar.update(size)
| 31.857143
| 83
| 0.575785
|
021a0a73a077b0a4efe50c98eaca50da84c2e78f
| 2,067
|
py
|
Python
|
elixir_rems_proxy/config/jwks.py
|
CSCfi/elixir-rems-proxy
|
8f3f3e02880fedd22902a53c1ff650e753fb9c03
|
[
"MIT"
] | 1
|
2018-08-29T08:33:06.000Z
|
2018-08-29T08:33:06.000Z
|
elixir_rems_proxy/config/jwks.py
|
CSCfi/elixir-rems-proxy
|
8f3f3e02880fedd22902a53c1ff650e753fb9c03
|
[
"MIT"
] | 19
|
2018-09-21T08:11:24.000Z
|
2020-09-07T12:30:29.000Z
|
elixir_rems_proxy/config/jwks.py
|
CSCfi/elixir-rems-proxy
|
8f3f3e02880fedd22902a53c1ff650e753fb9c03
|
[
"MIT"
] | null | null | null |
"""JWK Generator."""
import json
import os
import secrets
from pathlib import Path
from typing import Tuple
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.backends import default_backend
from authlib.jose import jwk
def print_jwks() -> None:
"""Write JWK set to file."""
print("Writing keys to file.")
public_data, pem = generate_jwks()
# Public data to public_key.json
# Double use of Path to get correct types (needed for testing)
with open(Path(os.environ.get("JWK_PUBLIC_KEY_FILE", Path(__file__).resolve().parent.joinpath("public_key.json"))), "w") as public_file:
public_file.write(json.dumps(public_data))
# Private data to private_key.json
# Double use of Path to get correct types (needed for testing)
with open(Path(os.environ.get("JWK_PRIVATE_KEY_FILE", Path(__file__).resolve().parent.joinpath("private_key.json"))), "w") as private_file:
private_file.write(json.dumps(pem))
print("Done. Keys saved to public_key.json and private_key.json")
def generate_jwks() -> Tuple[dict, dict]:
"""Generate JWK set."""
# Generate keys
print("Generating keys.")
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
public_key = private_key.public_key().public_bytes(encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo)
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
)
# Public data to public_key.json
public_data = {"keys": [jwk.dumps(public_key, kty="RSA")]}
public_data["keys"][0].update({"kid": secrets.token_hex(4)})
public_data["keys"][0].update({"alg": "RS256"})
# Private data to private_key.json
return public_data, jwk.dumps(pem, kty="RSA")
if __name__ == "__main__":
"""Run script."""
print_jwks()
| 40.529412
| 147
| 0.726173
|
c3d8e30064cd8c63cede7f3244c6cee23272555b
| 325
|
py
|
Python
|
April 2021/Count Binary Substrings.py
|
parikshitgupta1/leetcode
|
eba6c11740dc7597204af127c0f4c2163376294f
|
[
"MIT"
] | null | null | null |
April 2021/Count Binary Substrings.py
|
parikshitgupta1/leetcode
|
eba6c11740dc7597204af127c0f4c2163376294f
|
[
"MIT"
] | null | null | null |
April 2021/Count Binary Substrings.py
|
parikshitgupta1/leetcode
|
eba6c11740dc7597204af127c0f4c2163376294f
|
[
"MIT"
] | null | null | null |
class Solution:
def countBinarySubstrings(self, s):
ans, prev, cur = 0, 0, 1
for i in range(1, len(s)):
if s[i] != s[i-1]:
ans += min(prev, cur)
prev, cur = cur, 1
else:
cur += 1
return ans + min(prev, cur)
| 25
| 39
| 0.396923
|
0616ccdae5aa312b65ea4688dc5efc06435f24b9
| 7,862
|
py
|
Python
|
userbot/plugins/filemanager.py
|
indianSammy07/Wolf
|
aba9ecce1860f86f81a52722062531590521ad7f
|
[
"MIT"
] | 1
|
2020-08-30T07:57:50.000Z
|
2020-08-30T07:57:50.000Z
|
userbot/plugins/filemanager.py
|
indianSammy07/Wolf
|
aba9ecce1860f86f81a52722062531590521ad7f
|
[
"MIT"
] | null | null | null |
userbot/plugins/filemanager.py
|
indianSammy07/Wolf
|
aba9ecce1860f86f81a52722062531590521ad7f
|
[
"MIT"
] | 3
|
2020-12-06T23:40:55.000Z
|
2020-12-18T17:38:36.000Z
|
"""Execute GNU/Linux commands inside Telegram
Syntax: .lsroot , .lslocal"""
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from telethon import events
import subprocess
from telethon.errors import MessageEmptyError, MessageTooLongError, MessageNotModifiedError
import io
import asyncio
import time
import os
if not os.path.isdir("./SAVED"):
os.makedirs("./SAVED")
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
@borg.on(events.NewMessage(pattern=r"\.lslocal", outgoing=True))
async def _(event):
if event.fwd_from:
return
DELAY_BETWEEN_EDITS = 0.3
PROCESS_RUN_TIME = 100
# dirname = event.pattern_match.group(1)
# tempdir = "localdir"
cmd = "ls -lh ./DOWNLOADS/"
# if dirname == tempdir:
eply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
start_time = time.time() + PROCESS_RUN_TIME
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
OUTPUT = f"**Files in [WOLFUSERBOT](tg://WolfUserbotOT/) DOWNLOADS Folder:**\n"
stdout, stderr = await process.communicate()
if len(stdout) > Config.MAX_MESSAGE_SIZE_LIMIT:
with io.BytesIO(str.encode(stdout)) as out_file:
out_file.name = "exec.text"
await borg.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=OUTPUT,
reply_to=reply_to_id
)
await event.delete()
if stderr.decode():
await event.edit(f"**{stderr.decode()}**")
return
await event.edit(f"{OUTPUT}`{stdout.decode()}`")
# else:
# await event.edit("Unknown Command")
@borg.on(events.NewMessage(pattern=r"\.lsroot", outgoing=True))
async def _(event):
if event.fwd_from:
return
DELAY_BETWEEN_EDITS = 0.3
PROCESS_RUN_TIME = 100
cmd = "ls -lh"
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
start_time = time.time() + PROCESS_RUN_TIME
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
OUTPUT = f"**Files in root directory:**\n"
stdout, stderr = await process.communicate()
if len(stdout) > Config.MAX_MESSAGE_SIZE_LIMIT:
with io.BytesIO(str.encode(stdout)) as out_file:
out_file.name = "exec.text"
await borg.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=OUTPUT,
reply_to=reply_to_id
)
await event.delete()
if stderr.decode():
await event.edit(f"**{stderr.decode()}**")
return
await event.edit(f"{OUTPUT}`{stdout.decode()}`")
@borg.on(events.NewMessage(pattern=r"\.lssaved", outgoing=True))
async def _(event):
if event.fwd_from:
return
DELAY_BETWEEN_EDITS = 0.3
PROCESS_RUN_TIME = 100
cmd = "ls ./SAVED/"
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
start_time = time.time() + PROCESS_RUN_TIME
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
OUTPUT = f"**Files in SAVED directory:**\n"
stdout, stderr = await process.communicate()
if len(stdout) > Config.MAX_MESSAGE_SIZE_LIMIT:
with io.BytesIO(str.encode(stdout)) as out_file:
out_file.name = "exec.text"
await borg.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=OUTPUT,
reply_to=reply_to_id
)
await event.delete()
if stderr.decode():
await event.edit(f"**{stderr.decode()}**")
return
await event.edit(f"{OUTPUT}`{stdout.decode()}`")
@borg.on(events.NewMessage(pattern=r"\.rnsaved ?(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
DELAY_BETWEEN_EDITS = 0.3
PROCESS_RUN_TIME = 100
input_str = event.pattern_match.group(1)
if "|" in input_str:
src, dst = input_str.split("|")
src = src.strip()
dst = dst.strip()
cmd = f"mv ./SAVED/{src} ./SAVED/{dst}"
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
start_time = time.time() + PROCESS_RUN_TIME
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
OUTPUT = f"**Files in root directory:**\n"
stdout, stderr = await process.communicate()
if len(stdout) > Config.MAX_MESSAGE_SIZE_LIMIT:
with io.BytesIO(str.encode(stdout)) as out_file:
out_file.name = "exec.text"
await borg.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=OUTPUT,
reply_to=reply_to_id
)
await event.delete()
if stderr.decode():
await event.edit(f"**{stderr.decode()}**")
return
await event.edit(f"File renamed `{src}` to `{dst}`")
@borg.on(events.NewMessage(pattern=r"\.rnlocal ?(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
DELAY_BETWEEN_EDITS = 0.3
PROCESS_RUN_TIME = 100
input_str = event.pattern_match.group(1)
if "|" in input_str:
src, dst = input_str.split("|")
src = src.strip()
dst = dst.strip()
cmd = f"mv ./DOWNLOADS/{src} ./DOWNLOADS/{dst}"
reply_to_id = event.message.id
if event.reply_to_msg_id:
reply_to_id = event.reply_to_msg_id
start_time = time.time() + PROCESS_RUN_TIME
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
OUTPUT = f"**Files in root directory:**\n"
stdout, stderr = await process.communicate()
if len(stdout) > Config.MAX_MESSAGE_SIZE_LIMIT:
with io.BytesIO(str.encode(stdout)) as out_file:
out_file.name = "exec.text"
await borg.send_file(
event.chat_id,
out_file,
force_document=True,
allow_cache=False,
caption=OUTPUT,
reply_to=reply_to_id
)
await event.delete()
if stderr.decode():
await event.edit(f"**{stderr.decode()}**")
return
await event.edit(f"File renamed `{src}` to `{dst}`")
@borg.on(events.NewMessage(pattern=r"\.delsave (.*)", outgoing=True))
async def handler(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
pathtofile = f"./SAVED/{input_str}"
if os.path.isfile(pathtofile):
os.remove(pathtofile)
await event.edit("✅ File Deleted 🗑")
else:
await event.edit("⛔️File Not Found സാധനം കയ്യിലില്ല😬")
@borg.on(events.NewMessage(pattern=r"\.delocal (.*)", outgoing=True))
async def handler(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
pathtofile = f"./BotHub/{input_str}"
if os.path.isfile(pathtofile):
os.remove(pathtofile)
await event.edit("✅ File Deleted 🗑")
else:
await event.edit("⛔️File Not Found സാധനം കയ്യിലില്ല😬")
| 33.887931
| 91
| 0.616764
|
a724f41118084f0fc1f47dae0dd809a445be3dca
| 276
|
py
|
Python
|
tests/basics/fun_name.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 53
|
2018-12-12T15:36:54.000Z
|
2021-12-06T18:31:12.000Z
|
tests/basics/fun_name.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 175
|
2018-05-30T03:06:15.000Z
|
2019-02-06T23:54:24.000Z
|
tests/basics/fun_name.py
|
learnforpractice/micropython-cpp
|
004bc8382f74899e7b876cc29bfa6a9cc976ba10
|
[
"MIT"
] | 16
|
2016-09-13T17:56:59.000Z
|
2022-01-02T09:19:59.000Z
|
def Fun():
pass
class A:
def __init__(self):
pass
def Fun(self):
pass
try:
print(Fun.__name__)
print(A.__init__.__name__)
print(A.Fun.__name__)
print(A().Fun.__name__)
except AttributeError:
print('SKIP')
raise SystemExit
| 15.333333
| 30
| 0.608696
|
3572a2cb07d95ffaec261bdc63492ade734ea8b9
| 596
|
py
|
Python
|
python/paddle/trainer_config_helpers/tests/configs/test_detection_output_layer.py
|
shenchaohua/Paddle
|
9c5942db13308d53cc115708058c1e885f4b57a3
|
[
"Apache-2.0"
] | 3
|
2018-04-16T23:35:32.000Z
|
2019-08-12T01:01:07.000Z
|
python/paddle/trainer_config_helpers/tests/configs/test_detection_output_layer.py
|
shenchaohua/Paddle
|
9c5942db13308d53cc115708058c1e885f4b57a3
|
[
"Apache-2.0"
] | 9
|
2017-09-13T07:39:31.000Z
|
2017-10-18T05:58:23.000Z
|
python/paddle/trainer_config_helpers/tests/configs/test_detection_output_layer.py
|
shenchaohua/Paddle
|
9c5942db13308d53cc115708058c1e885f4b57a3
|
[
"Apache-2.0"
] | 2
|
2020-11-04T08:07:46.000Z
|
2020-11-06T08:33:24.000Z
|
from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-5)
input_loc = data_layer(name='input_loc', size=16, height=16, width=1)
input_conf = data_layer(name='input_conf', size=8, height=1, width=8)
priorbox = data_layer(name='priorbox', size=32, height=4, width=8)
detout = detection_output_layer(
input_loc=input_loc,
input_conf=input_conf,
priorbox=priorbox,
num_classes=21,
nms_threshold=0.45,
nms_top_k=400,
keep_top_k=200,
confidence_threshold=0.01,
background_id=0,
name='test_detection_output')
outputs(detout)
| 24.833333
| 69
| 0.738255
|
40fe350bfcb0fece3168bfced6c0b135fab3f5c9
| 771
|
py
|
Python
|
Kaggle-Expedia/test_read_csv.py
|
ppik/playdata
|
62612dbaf26ed139d0ced800d199b897a0f2ba01
|
[
"MIT"
] | null | null | null |
Kaggle-Expedia/test_read_csv.py
|
ppik/playdata
|
62612dbaf26ed139d0ced800d199b897a0f2ba01
|
[
"MIT"
] | null | null | null |
Kaggle-Expedia/test_read_csv.py
|
ppik/playdata
|
62612dbaf26ed139d0ced800d199b897a0f2ba01
|
[
"MIT"
] | 1
|
2021-06-19T19:27:28.000Z
|
2021-06-19T19:27:28.000Z
|
import os
import timeit
import sys
def time_read_csv(filename, numlines=10, numtries=10):
s = 'pd.read_csv("{}", nrows={:d})'.format(filename, numlines)
setup = 'import pandas as pd'
return timeit.timeit(stmt=s, setup=setup, number=numtries)
if __name__=='__main__':
filename = 'data/train.csv'
numlines = 1000000
numtries = 1
filenames = [filename, filename+'.gz']
times = []
for filename in filenames:
if os.path.exists(filename):
time = time_read_csv(filename, numlines, numtries)
print("{}: {:f} s".format(filename, time/numtries))
times.append(time)
else:
times.append(float('nan'))
print('Time increase: {:.0f}%'.format((times[-1]-times[0])/times[0]*100))
| 28.555556
| 77
| 0.61738
|
e5839ba8d36da2f4068ddc00efdf2a33ad90dded
| 9,931
|
py
|
Python
|
detect.py
|
Dishant-P/S-Extension-patch-Official-research-module
|
6f1c887874b9afd4a9765d04821cbcdfc1113271
|
[
"MIT"
] | 1
|
2021-09-07T16:25:04.000Z
|
2021-09-07T16:25:04.000Z
|
detect.py
|
Dishant-P/S-Extension-patch-Official-research-module
|
6f1c887874b9afd4a9765d04821cbcdfc1113271
|
[
"MIT"
] | null | null | null |
detect.py
|
Dishant-P/S-Extension-patch-Official-research-module
|
6f1c887874b9afd4a9765d04821cbcdfc1113271
|
[
"MIT"
] | null | null | null |
import argparse
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from PIL import Image
from torch.autograd import Variable
def predict_image(image, device, classifier):
test_transforms = transforms.Compose([transforms.Resize(224),
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406],
# [0.229, 0.224, 0.225])
])
image_tensor = test_transforms(image).float()
image_tensor = image_tensor.unsqueeze_(0)
input = Variable(image_tensor)
input = input.to(device)
output = classifier(input)
index = output.data.cpu().numpy().argmax()
return index
def detect(save_img=False):
vehicle_classifier = model.load('bus_car_truck_van_200.pth') #Model trained on 200 images of bus, car, truck and van. A four class ResNet152 classifier.
to_pil = transforms.ToPILImage()
source, weights, view_img, save_txt, imgsz = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size
save_img = not opt.nosave and not source.endswith('.txt') # save inference images
webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
('rtsp://', 'rtmp://', 'http://', 'https://'))
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Initialize
set_logging()
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check img_size
if half:
model.half() # to FP16
# Second-stage classifier
classify = False
if classify:
modelc = load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = check_imshow()
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz, stride=stride)
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
t0 = time.time()
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections per image
if webcam: # batch_size >= 1
p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
else:
p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # img.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
image_classify = to_pil(im0)
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or view_img: # Add bbox to image
label = f'{names[int(cls)]} {conf:.2f}'
if(int(cls) in [1,3,7,80]): #Classes of Car, Bus and Truck
#Use the classifier over im0
classifier_result = predict_image(im0, 'cuda', vehicle_classifier)
else:
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
# Print time (inference + NMS)
print(f'{s}Done. ({t2 - t1:.3f}s)')
# Stream results
if view_img:
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
if dataset.mode == 'image':
cv2.imwrite(save_path, im0)
else: # 'video' or 'stream'
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
if vid_cap: # video
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else: # stream
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path += '.mp4'
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
print(f'Done. ({time.time() - t0:.3f}s)')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--source', type=str, default='data/images', help='source') # file/folder, 0 for webcam
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--update', action='store_true', help='update all models')
parser.add_argument('--project', default='runs/detect', help='save results to project/name')
parser.add_argument('--name', default='exp', help='save results to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
print(opt)
#check_requirements(exclude=('pycocotools', 'thop'))
with torch.no_grad():
if opt.update: # update all models (to fix SourceChangeWarning)
for opt.weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']:
detect()
strip_optimizer(opt.weights)
else:
detect()
| 48.208738
| 156
| 0.593596
|
e46e5b24da35dbf48dc9bf61f34bd8a29a2c0b37
| 12,695
|
py
|
Python
|
PostDiffMixture/simulations_folder/Old/checkpickle_simbased_bsprop_armprob_EFFECT_backup.py
|
SIGKDDanon/SIGKDD2021DeAnonV2
|
76f0373ec42ab55feefed3f4ce4bf4d532b51dd2
|
[
"Apache-2.0"
] | null | null | null |
PostDiffMixture/simulations_folder/Old/checkpickle_simbased_bsprop_armprob_EFFECT_backup.py
|
SIGKDDanon/SIGKDD2021DeAnonV2
|
76f0373ec42ab55feefed3f4ce4bf4d532b51dd2
|
[
"Apache-2.0"
] | null | null | null |
PostDiffMixture/simulations_folder/Old/checkpickle_simbased_bsprop_armprob_EFFECT_backup.py
|
SIGKDDanon/SIGKDD2021DeAnonV2
|
76f0373ec42ab55feefed3f4ce4bf4d532b51dd2
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib
matplotlib.use('Agg')
import pickle
import os
import pandas as pd
import matplotlib.pyplot as plt
# print(data)
import numpy as np
import os
from scipy import stats
from matplotlib.pyplot import figure
import glob
import numpy as np
#import explorE_delete as ed
#figure(num=None, figsize=(15, 15), dpi=60, facecolor='w', edgecolor='k')
#IPW https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/bar_stacked.html
to_check = '2019-08-08_13:19:56/bbUniform0.1BU0.1DfByTrial.pkl'
to_check = 'sim1-2sims/bb0.1BB0.1Df.pkl'
to_check = '2019-08-09_12:39:47/bbEqualMeansEqualPrior32BB0N32Df.pkl'
to_check = '2019-08-09_12:39:47/bbEqualMeansEqualPrior785BB0N785Df.pkl'
to_check = '2019-08-09_12:49:37-20sims_t1/bbEqualMeansEqualPrior785BB0N785Df.pkl' #10?
def hist_and_cutoffs(df = None, to_check = None, to_check_unif = None, to_check_ipw = None, n = None, num_sims = None, load_df = True, \
title = None, plot = True, to_check_ipw_mean1 = None, to_check_ipw_mean2 = None):
'''
TODO rename to_check_ipw to to_check_ipw_wald_stat
'''
if load_df == True:
with open(to_check, 'rb') as f:
df = pickle.load(f)
with open(to_check_unif, 'rb') as f:
df_unif = pickle.load(f)
#print(data)
step_sizes = df['num_steps'].unique()
size_vars = ["n/2", "n", "2*n", "4*n"]
if plot == True:
fig, ax = plt.subplots(2,2)
fig.set_size_inches(14.5, 10.5)
ax = ax.ravel()
i = 0
percenticle_dict_left = {}
percentile_dict_right = {}
for num_steps in step_sizes:
if to_check_ipw != None:
to_check_ipw_f = to_check_ipw.format(num_steps)
wald_ipw_per_sim = np.load(to_check_ipw_f)
ipw_mean1 = np.mean(np.load(to_check_ipw_mean1.format(num_steps))) #E[p_hat_mle]
ipw_mean2 = np.mean(np.load(to_check_ipw_mean2.format(num_steps)))
df_unif_for_num_steps = df_unif[df_unif['num_steps'] == num_steps]
df_unif_for_num_steps_wald = df_unif_for_num_steps['wald_type_stat']
df_for_num_steps = df[df['num_steps'] == num_steps]
mle_mean1 = np.mean(df_for_num_steps['mean_1'])
mle_mean2 = np.mean(df_for_num_steps['mean_2'])
unif_mean1 = np.mean(df_unif_for_num_steps['mean_1'])
unif_mean2 = np.mean(df_unif_for_num_steps['mean_2'])
df_wald_type_per_sim = df_for_num_steps['wald_type_stat']
# df_unif_for_num_steps = np.ma.masked_invalid(df_unif_for_num_steps)
#print(np.mean(df_unif_for_num_steps))
if plot == True:
#ax[i].hist(df_unif_for_num_steps, density = True)
ax[i].hist(df_unif_for_num_steps_wald, normed = True, alpha = 0.5, \
label = "Uniform: \n$\mu$ = {} \n $\sigma$ = {} \n bias($\hatp_1$) = {} \n bias($\hatp_2$) = {}".format(
np.round(np.mean(df_unif_for_num_steps_wald), 3),\
np.round(np.std(df_unif_for_num_steps_wald), 3), np.round(unif_mean1 - 0.5, 3), np.round(unif_mean2 - 0.5, 3)
)
)
if to_check_ipw != None:
ax[i].hist(wald_ipw_per_sim, \
normed = True, alpha = 0.5,\
label = "\n IPW: \n $\mu$ = {} \n$\sigma$ = {} \n bias($\hatp_1$) = {} \n bias($\hatp_2$) = {}".format(
np.round(np.mean(wald_ipw_per_sim), 3), \
np.round(np.std(wald_ipw_per_sim), 3), \
np.round(ipw_mean1 - 0.5,3), np.round(ipw_mean2 - 0.5,3)
)
)
ax[i].hist(df_wald_type_per_sim, \
normed = True, alpha = 0.5, \
label = "\n MLE: \n $\mu$ = {} \n $\sigma$ = {} \n bias($\hatp_1$) = {} \n bias($\hatp_2$) = {}".format(
np.round(np.mean(df_wald_type_per_sim), 3), \
np.round(np.std(df_wald_type_per_sim), 3), \
np.round(mle_mean1 - 0.5,3), np.round(mle_mean2 - 0.5,3)
)
)
ax[i].set_xlabel("number of participants = {} = {}".format(size_vars[i], num_steps))
ax[i].axvline(x = np.percentile(df_wald_type_per_sim, 2.5), linestyle = "--", color = "black")
ax[i].axvline(x = np.percentile(df_wald_type_per_sim, 97.5), linestyle = "--", color = "black")
# ax[i].text(0.85, 0.5,'Mean = {}, Std = {}'.format(np.mean(df_wald_type_per_sim), np.std(df_wald_type_per_sim)),
# horizontalalignment='center',
# verticalalignment='center',
# transform = ax[i].transAxes)
# ax[i]
mu = 0
variance = 1
sigma = np.sqrt(variance)
x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100)
ax[i].plot(x, stats.norm.pdf(x, mu, sigma))
ax[i].legend()
#print("mean, std", np.mean(df_wald_type_per_sim), np.std(df_wald_type_per_sim))
percenticle_dict_left[str(num_steps)] = np.percentile(df_wald_type_per_sim, 2.5)
percentile_dict_right[str(num_steps)] = np.percentile(df_wald_type_per_sim, 97.5)
i+=1
if plot == True:
fig.suptitle(title)
fig.tight_layout(rect=[0, 0.03, 1, 0.90])
if not os.path.isdir("plots"):
os.path.mkdir("plots")
print("saving to ", "plots/{}.png".format(title))
fig.savefig("plots/{}.png".format(title))
plt.show()
plt.clf()
plt.close()
return percenticle_dict_left, percentile_dict_right
def stacked_bar_plot_with_cutoff(df = None, to_check = None, to_check_unif = None, to_check_ipw = None, n = None, num_sims = None, load_df = True, \
title = None, percentile_dict_left = None, \
percentile_dict_right = None, bs_prop = 0.0,\
ax = None, ax_idx = None, df_ts_curr = None):
if load_df == True:
with open(to_check, 'rb') as f:
df = pickle.load(f)
with open(to_check_unif, 'rb') as f:
df_unif = pickle.load(f)
if to_check_ipw != None:
ipw_t1_list = np.load(to_check_ipw)
#print(data)
step_sizes = df['num_steps'].unique()
size_vars = ["n/2", "n", "2*n", "4*n"]
t1_list = []
t1_wald_list = []
wald_stat_list = []
wald_pval_list = []
arm1_mean_list = []
arm2_mean_list = []
arm1_std_list = []
arm2_std_list = []
ratio_mean_list = []
ratio_std_list = []
t1_simbased_list = []
t1_list_unif = []
t1_wald_list_unif = []
df_ts_curr_list = np.array(list(df_ts_curr["Prior between"]))
for num_steps in step_sizes:
df_for_num_steps = df[df['num_steps'] == num_steps]
df_for_num_steps_unif = df_unif[df_unif['num_steps'] == num_steps]
pval_for_num_steps = df_for_num_steps['pvalue'].mean()
num_replications = len(df_for_num_steps)
# if use_pval == True:
num_rejected = np.sum(df_for_num_steps['pvalue'] < .05)
num_rejected_unif = np.sum(df_for_num_steps_unif['pvalue'] < .05)
t1 =num_rejected / num_replications
t1_unif =num_rejected_unif / num_replications
t1_list_unif.append(t1_unif)
t1_list.append(t1)
ind = np.arange(2*len(step_sizes), step=2)
# print(ind)
# print(step_sizes)
ax.set_xticks(ind)
ax.set_xticklabels(step_sizes)
width = 0.5
capsize = width*5
width_total = 2*width
t1_list = np.array(t1_list)
t1_list_unif = np.array(t1_list_unif)#TODO REMOVE 0.2, for PLOTTING low sims
t1_se = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list*(1-t1_list)/num_sims) #TODO should be 95 CI for Proportion
df_ts_curr_list_se = stats.t.ppf(1-0.025, num_sims)*np.sqrt(df_ts_curr_list*(1-df_ts_curr_list)/num_sims)
t1_se_unif = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_unif*(1-t1_list_unif)/num_sims)
print(t1_se_unif) #note that power goes to 1.0 for unif, thus error bars
#print(t1_se_unif)
p1 = ax.bar(ind, t1_list, width = width, yerr = t1_se, \
ecolor='black', capsize=capsize, color = 'yellow', edgecolor='black')
p2 = ax.bar(ind-width, t1_list_unif, width = width,\
yerr = t1_se_unif, ecolor='black', \
capsize=capsize, color = 'red', \
edgecolor='black')
p3 = ax.bar(ind+width, df_ts_curr_list, width = width,\
yerr = df_ts_curr_list_se, ecolor='black', \
capsize=capsize, color = 'blue', \
edgecolor='black')
if ax_idx == 2:
leg1 = ax.legend((p1[0], p3[0], p2[0]), ('Epsilon Greedy Chi Squared', "Thompson Sampling (Prior Between)","Uniform Chi Squared"), bbox_to_anchor=(1.0, 1.6))
#leg2 = ax.legend(loc = 2)
ax.add_artist(leg1)
# plt.tight_layout()
# plt.title(title)
# if ax_idx == 6 or ax_idx == 7 or ax_idx == 8:
ax.set_xlabel("number of participants = \n n/2, n, 2*n, 4*n")
ax.set_ylim(0, 1.03)
ax.axhline(y=0.80, linestyle='--')
return [t1_list_unif, t1_list] #returns [UR Eps Greedy], in this case, need to return for each step size, but only plotting for one bs, so save step size by model (4x2)
def parse_dir(root, root_cutoffs):
num_sims = 500
arm_prob= 0.5
arm_prob_list = [0.2, 0.5, 0.8]
es_list = [0.5, 0.3, 0.1]
#es_list = [0.5, 0.3]
n_list = [32, 88, 785]
epsilon = 0.1
#EpsilonGreedyIsEffect/num_sims=5armProb=0.5/es=0.3epsilon=0.1/
root_dir = root + "/num_sims={}armProb={}".format(num_sims, arm_prob)
fig, ax = plt.subplots(1,3)
#fig.set_size_inches(17.5, 13.5)
ax = ax.ravel()
i = 0
df_list_ts = pd.read_pickle("banditsGraphs/180114BinaryPower.pkl")
for es in es_list:
bs = 1
es_dir = root_dir + "/es={}epsilon={}/".format(es, epsilon)
to_check = glob.glob(es_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))[0] #Has uniform and TS, 34 in 348!!
assert(len(glob.glob(es_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))) == 1)
to_check_unif = glob.glob(es_dir + "/*Uniform*{}*{}Df.pkl".format(bs, es))[0]
assert(len(glob.glob(es_dir + "/*Uniform*{}*{}Df.pkl".format(bs, es))) == 1)
# to_check_cutoffs = glob.glob(outcome_dir_cutoffs + "/*Prior*{}*{}Df.pkl".format(bs, es))[0] #Has uniform and TS
# assert(len(glob.glob(outcome_dir_cutoffs + "/*Prior*{}*{}Df.pkl".format(bs, es))) == 1)
#title = "Power \n n = {} and {} sims \n Initial Batch Size {} and Batch Size {} \n Arm Prob. {}".format(n, num_sims, bs, bs, arm_prob)
#percentile_dict_left, percentile_dict_right = hist_and_cutoffs(to_check = to_check_cutoffs, to_check_unif = to_check_unif,\
# n = n, num_sims = num_sims, title = title, plot = False) #Note title not used here per say
df_ts_curr = df_list_ts[2-i]
next_df = stacked_bar_plot_with_cutoff(to_check = to_check,to_check_unif = to_check_unif,\
n = n_list[i], num_sims = num_sims,
ax = ax[i], ax_idx = i, df_ts_curr = df_ts_curr)
df = pd.DataFrame(next_df, columns = ["n/2","n","2n","4n"])
df.index = ["Uniform Random","Epsilon Greedy"]
df.to_csv("Tables/Power_n={}_es={}_numsims={}.csv".format(n_list[i], es,num_sims))
ax[i].set_title("Effect Size = {} \n n = {}".format(es, n_list[i]))
i += 1
title = "Power Across {} Simulations For Epsilon = {}".format(num_sims, epsilon)
#ax[i].set_title(title, fontsize = 55)
#i +=1
#fig.suptitle("Type One Error Rates Across {} Simulations".format(num_sims))
fig.suptitle(title)
#fig.tight_layout(rect=[0, 0.03, 1, 0.95])
#handles, labels = ax[i-1].get_legend_handles_labels()
#fig.legend(handles, labels, loc='upper right', prop={'size': 50})
#fig.tight_layout()
if not os.path.isdir("plots"):
os.mkdir("plots")
print("saving to ", "plots/{}.png".format(title))
fig.tight_layout()
fig.subplots_adjust(top=.8)
fig.savefig("plots/{}.svg".format(title), bbox_inches = 'tight')
plt.show()
plt.clf()
plt.close()
root = "EpsilonGreedyIsEffect"
#parse_dir(root, root_cutoffs)
parse_dir(root, root)
| 39.303406
| 172
| 0.578968
|
9b718c18612170b944fbad3d099770130031b6c3
| 6,734
|
py
|
Python
|
redisrpc/base.py
|
munisisazade/redis-pub-sub
|
782d556e2ebdd8839edaeef8678f94db0c4e6a54
|
[
"MIT"
] | 2
|
2020-03-16T07:14:32.000Z
|
2021-02-14T11:19:37.000Z
|
redisrpc/base.py
|
munisisazade/redis-pub-sub
|
782d556e2ebdd8839edaeef8678f94db0c4e6a54
|
[
"MIT"
] | null | null | null |
redisrpc/base.py
|
munisisazade/redis-pub-sub
|
782d556e2ebdd8839edaeef8678f94db0c4e6a54
|
[
"MIT"
] | 2
|
2020-01-26T15:07:56.000Z
|
2021-04-03T17:48:13.000Z
|
import os
import time
import base64
import json
import uuid
import platform
import itertools
import datetime
import traceback
from urllib.parse import urlparse
from redisrpc.version import VERSION
try:
import redis
except:
pass
class BasePubSub(object):
"""
Base publish–subscribe is a
messaging pattern class
"""
def __init__(self, channel_name):
"""
Initialize and subscribe channel
"""
self.rdb = redis.StrictRedis.from_url(os.getenv("REDIS_URI"))
self.check_connection_redis()
self.channel = channel_name
self.pubsub = self.rdb.pubsub()
self.events = {}
self.token = str(uuid.uuid4())
def check_connection_redis(self):
try:
self.rdb.set("testing", 1)
assert int(self.rdb.get("testing")) == 1
self.rdb.delete("testing")
except:
raise PermissionError(
"Cannot read write redis database bellow credentials\n"
f"redis_uri: {os.getenv('REDIS_URI')}"
)
def listen(self):
try:
self.print_start()
self.log_print("Pubsub is listen...")
self.pubsub.subscribe(self.channel)
while True:
message = self.pubsub.get_message()
if message:
if message["type"] == "message":
data = self.__convert_to_python(message["data"])
if data["token"] != self.token:
self.log_print("new request", type="DEBUG")
event_name = data["event_name"]
response_data = data["data"]
self.event_handler(event_name, response_data)
time.sleep(0.3)
except KeyboardInterrupt:
self.log_print("Pubsub is stoping...")
time.sleep(1)
self.log_print("Shutdown")
def event_handler(self, event_name, data):
if self.events.get(event_name, False):
event = self.events.get(event_name)
try:
response = event(data)
if response:
self.log_print(f"Success response from {event_name}", "DEBUG")
self.__send_reponse({
"token": self.token,
"event_name": event_name,
"data": response
})
else:
self.log_print(f"Empty response from {event_name}", "WARNING")
except:
self.log_print(traceback.format_exc(), "FATAL")
else:
self.log_print(f"Can't find `{event_name}` event name", "ERROR")
return {
"error": f"Can't find `{event_name}` event name"
}
def print_start(self):
start_text = f"""
_._
_.-``__ ''-._
_.-`` `. `_. ''-._ Redis Publish–subscribe Remote Procedure Call system
.-`` .-```. ```\/ _.,_ ''-._ Connection: {self.connection_uri()}
( ' , .-` | `, ) Channel name: {self.channel}
|`-._`-...-` __...-.``-._|'` _.-'| Channel token: {self.token}
| `-._ `._ / _.-' | Hostname: {platform.node()}
`-._ `-._ `-./ _.-' _.-' Running
|`-._`-._ `-.__.-' _.-'_.-'| PID: {os.getpid()}
| `-._`-._ _.-'_.-' | Name: RedisPubSub {VERSION}v
`-._ `-._`-.__.-'_.-' _.-' https://github.com/munisisazade/redis-pub-sub
|`-._`-._ `-.__.-' _.-'_.-'|
| `-._`-._ _.-'_.-' |
`-._ `-._`-.__.-'_.-' _.-'
`-._ `-.__.-' _.-'
`-._ _.-'
`-.__.-'
"""
print(start_text)
print("[events]")
start_count = itertools.count(1)
for event_name in self.events.keys():
print(f"{next(start_count)})", event_name)
print("")
self.log_print("Starting...")
def log_print(self, text, type="INFO"):
now = datetime.datetime.today()
print(f"[{now.strftime('%Y-%m-%d %H:%M:%f')}: {type}] {text}")
def connection_uri(self):
uri = urlparse(os.getenv("REDIS_URI"))
host = uri.netloc
paswd = ""
if ":" in host and "@" in host:
paswd = host[host.index(":"):host.index("@")]
return os.getenv("REDIS_URI").replace(paswd, "****")
def __encode_base64(self, data):
return base64.b64encode(json.dumps(data).encode("utf-8"))
def __convert_to_python(self, byte):
if isinstance(byte, bytes):
response = base64.b64decode(byte).decode("utf-8")
return json.loads(response)
elif isinstance(byte, int):
return byte
else:
raise TypeError(
f"a bytes-like object is required, not '{type(byte).__name__}'"
)
def register(self, function, name=None):
name = name if name else function.__name__
if callable(function):
self.events[name] = function
else:
raise ValueError("Event function must be callable object")
def __send_reponse(self, data):
decode = self.__encode_base64(data)
self.rdb.publish(self.channel, decode)
def send(self, event_name, data, wait_response_time=2):
resp = {
"token": self.token,
"event_name": event_name,
"data": data
}
decode = self.__encode_base64(resp)
self.rdb.publish(self.channel, decode)
print("Send")
send_time = time.time()
self.pubsub.subscribe(self.channel)
while True:
message = self.pubsub.get_message()
if message:
if message["type"] == "message":
data = self.__convert_to_python(message["data"])
if data["token"] != self.token:
self.pubsub.unsubscribe(self.channel)
return data["data"]
response_time = time.time()
if int(response_time - send_time) > wait_response_time:
print("Cannot get response from server handler")
return None
| 37.831461
| 120
| 0.472973
|
54686bcaea51ab1ca85cc3e6940a25c2dfa5b505
| 2,631
|
py
|
Python
|
benchmarks/report.py
|
nammingi/haste
|
459608cfdb4de4d28d2213df8e71f005be8d0f35
|
[
"Apache-2.0"
] | 291
|
2020-01-29T19:46:28.000Z
|
2022-03-31T22:41:27.000Z
|
benchmarks/report.py
|
nammingi/haste
|
459608cfdb4de4d28d2213df8e71f005be8d0f35
|
[
"Apache-2.0"
] | 43
|
2020-02-24T22:25:13.000Z
|
2022-03-07T20:08:43.000Z
|
benchmarks/report.py
|
nammingi/haste
|
459608cfdb4de4d28d2213df8e71f005be8d0f35
|
[
"Apache-2.0"
] | 28
|
2020-02-07T02:51:19.000Z
|
2022-01-12T08:44:15.000Z
|
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
def extract(x, predicate):
return np.array(list(filter(predicate, x)))
def main(args):
np.set_printoptions(suppress=True)
A = np.loadtxt(args.A, delimiter=',')
B = np.loadtxt(args.B, delimiter=',')
faster = 1.0 - A[:,-1] / B[:,-1]
print(f'A is faster than B by:')
print(f' mean: {np.mean(faster)*100:7.4}%')
print(f' std: {np.std(faster)*100:7.4}%')
print(f' median: {np.median(faster)*100:7.4}%')
print(f' min: {np.min(faster)*100:7.4}%')
print(f' max: {np.max(faster)*100:7.4}%')
for batch_size in np.unique(A[:,0]):
for input_size in np.unique(A[:,2]):
a = extract(A, lambda x: x[0] == batch_size and x[2] == input_size)
b = extract(B, lambda x: x[0] == batch_size and x[2] == input_size)
fig, ax = plt.subplots(dpi=200)
ax.set_xticks(a[:,1])
ax.set_xticklabels(a[:,1].astype(np.int32), rotation=60)
ax.tick_params(axis='y', which='both', length=0)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.title(f'batch size={int(batch_size)}, input size={int(input_size)}')
plt.plot(a[:,1], a[:,-1], color=args.color[0])
plt.plot(a[:,1], b[:,-1], color=args.color[1])
plt.xlabel('hidden size')
plt.ylabel('time (ms)')
plt.legend(args.name, frameon=False)
plt.tight_layout()
if args.save:
os.makedirs(args.save[0], exist_ok=True)
plt.savefig(f'{args.save[0]}/report_n={int(batch_size)}_c={int(input_size)}.png', dpi=200)
else:
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', nargs=2, default=['A', 'B'])
parser.add_argument('--color', nargs=2, default=['#1f77b4', '#2ca02c'])
parser.add_argument('--save', nargs=1, default=None)
parser.add_argument('A')
parser.add_argument('B')
main(parser.parse_args())
| 36.041096
| 98
| 0.630179
|
2a0fef16f5bbc828fcb24e658d0454f825452eab
| 9,064
|
py
|
Python
|
edparser/transform/txt.py
|
attardi/iwpt-shared-task-2020
|
3a70c42d53716678776afcccf02d896655777353
|
[
"Apache-2.0"
] | 3
|
2020-06-16T12:58:57.000Z
|
2021-06-07T21:07:37.000Z
|
edparser/transform/txt.py
|
attardi/iwpt-shared-task-2020
|
3a70c42d53716678776afcccf02d896655777353
|
[
"Apache-2.0"
] | 6
|
2020-06-22T07:46:49.000Z
|
2022-02-10T02:22:14.000Z
|
edparser/transform/txt.py
|
attardi/iwpt-shared-task-2020
|
3a70c42d53716678776afcccf02d896655777353
|
[
"Apache-2.0"
] | 2
|
2020-06-27T07:32:43.000Z
|
2020-11-10T07:21:03.000Z
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-10-24 15:07
import functools
from abc import ABC
from typing import Tuple, Union, List, Iterable
import tensorflow as tf
from edparser.common.transform_tf import Transform
from edparser.common.vocab_tf import VocabTF
from edparser.utils.io_util import get_resource
from edparser.utils.lang.zh.char_table import CharTable
def generate_words_per_line(file_path):
with open(file_path, encoding='utf-8') as src:
for line in src:
cells = line.strip().split()
if not cells:
continue
yield cells
def words_to_bmes(words):
tags = []
for w in words:
if not w:
raise ValueError('{} contains None or zero-length word {}'.format(str(words), w))
if len(w) == 1:
tags.append('S')
else:
tags.extend(['B'] + ['M'] * (len(w) - 2) + ['E'])
return tags
def bmes_to_words(chars, tags):
result = []
if len(chars) == 0:
return result
word = chars[0]
for c, t in zip(chars[1:], tags[1:]):
if t == 'B' or t == 'S':
result.append(word)
word = ''
word += c
if len(word) != 0:
result.append(word)
return result
def extract_ngram_features_and_tags(sentence, bigram_only=False, window_size=4, segmented=True):
"""
Feature extraction for windowed approaches
See Also https://github.com/chqiwang/convseg/
Parameters
----------
sentence
bigram_only
window_size
segmented
Returns
-------
"""
chars, tags = bmes_of(sentence, segmented)
chars = CharTable.normalize_chars(chars)
ret = []
ret.append(chars)
# TODO: optimize ngram generation using https://www.tensorflow.org/api_docs/python/tf/strings/ngrams
ret.extend(extract_ngram_features(chars, bigram_only, window_size))
ret.append(tags)
return tuple(ret[:-1]), ret[-1] # x, y
def bmes_of(sentence, segmented):
if segmented:
chars = []
tags = []
words = sentence.split()
for w in words:
chars.extend(list(w))
if len(w) == 1:
tags.append('S')
else:
tags.extend(['B'] + ['M'] * (len(w) - 2) + ['E'])
else:
chars = list(sentence)
tags = ['S'] * len(chars)
return chars, tags
def extract_ngram_features(chars, bigram_only, window_size):
ret = []
if bigram_only:
chars = ['', ''] + chars + ['', '']
ret.append([a + b if a and b else '' for a, b in zip(chars[:-4], chars[1:])])
ret.append([a + b if a and b else '' for a, b in zip(chars[1:-3], chars[2:])])
ret.append([a + b if a and b else '' for a, b in zip(chars[2:-2], chars[3:])])
ret.append([a + b if a and b else '' for a, b in zip(chars[3:-1], chars[4:])])
elif window_size > 0:
chars = ['', '', ''] + chars + ['', '', '']
# single char
if window_size >= 1:
ret.append(chars[3:-3])
if window_size >= 2:
# bi chars
ret.append([a + b if a and b else '' for a, b in zip(chars[2:], chars[3:-3])])
ret.append([a + b if a and b else '' for a, b in zip(chars[3:-3], chars[4:])])
if window_size >= 3:
# tri chars
ret.append(
[a + b + c if a and b and c else '' for a, b, c in zip(chars[1:], chars[2:], chars[3:-3])])
ret.append(
[a + b + c if a and b and c else '' for a, b, c in zip(chars[2:], chars[3:-3], chars[4:])])
ret.append(
[a + b + c if a and b and c else '' for a, b, c in zip(chars[3:-3], chars[4:], chars[5:])])
if window_size >= 4:
# four chars
ret.append([a + b + c + d if a and b and c and d else '' for a, b, c, d in
zip(chars[0:], chars[1:], chars[2:], chars[3:-3])])
ret.append([a + b + c + d if a and b and c and d else '' for a, b, c, d in
zip(chars[1:], chars[2:], chars[3:-3], chars[4:])])
ret.append([a + b + c + d if a and b and c and d else '' for a, b, c, d in
zip(chars[2:], chars[3:-3], chars[4:], chars[5:])])
ret.append([a + b + c + d if a and b and c and d else '' for a, b, c, d in
zip(chars[3:-3], chars[4:], chars[5:], chars[6:])])
return ret
def generate_ngram_bmes(file_path, bigram_only=False, window_size=4, gold=True):
with open(file_path, encoding='utf-8') as src:
for line in src:
sentence = line.strip()
if not sentence:
continue
yield extract_ngram_features_and_tags(sentence, bigram_only, window_size, gold)
def vocab_from_txt(txt_file_path, bigram_only=False, window_size=4, **kwargs) -> Tuple[VocabTF, VocabTF, VocabTF]:
char_vocab, ngram_vocab, tag_vocab = VocabTF(), VocabTF(), VocabTF(pad_token=None, unk_token=None)
for X, Y in generate_ngram_bmes(txt_file_path, bigram_only, window_size, gold=True):
char_vocab.update(X[0])
for ngram in X[1:]:
ngram_vocab.update(filter(lambda x: x, ngram))
tag_vocab.update(Y)
return char_vocab, ngram_vocab, tag_vocab
def dataset_from_txt(txt_file_path: str, char_vocab: VocabTF, ngram_vocab: VocabTF, tag_vocab: VocabTF, bigram_only=False,
window_size=4, segmented=True, batch_size=32, shuffle=None, repeat=None, prefetch=1):
generator = functools.partial(generate_ngram_bmes, txt_file_path, bigram_only, window_size, segmented)
return dataset_from_generator(generator, char_vocab, ngram_vocab, tag_vocab, bigram_only, window_size, batch_size,
shuffle, repeat, prefetch)
def dataset_from_generator(generator, char_vocab, ngram_vocab, tag_vocab, bigram_only=False, window_size=4,
batch_size=32, shuffle=None, repeat=None, prefetch=1):
if bigram_only:
ngram_size = 4
else:
ngram_size = window_size * (window_size + 1) // 2
vec_dim = 2 + ngram_size
shapes = tuple([[None]] * (vec_dim - 1)), [None]
types = tuple([tf.string] * (vec_dim - 1)), tf.string
defaults = tuple([char_vocab.pad_token] + [
ngram_vocab.pad_token if ngram_vocab else char_vocab.pad_token] * ngram_size), (
tag_vocab.pad_token if tag_vocab.pad_token else tag_vocab.first_token)
dataset = tf.data.Dataset.from_generator(generator, output_shapes=shapes, output_types=types)
if shuffle:
if isinstance(shuffle, bool):
shuffle = 1024
dataset = dataset.shuffle(shuffle)
if repeat:
dataset = dataset.repeat(repeat)
dataset = dataset.padded_batch(batch_size, shapes, defaults).prefetch(prefetch)
return dataset
class TxtFormat(Transform, ABC):
def file_to_inputs(self, filepath: str, gold=True):
filepath = get_resource(filepath)
with open(filepath, encoding='utf-8') as src:
for line in src:
sentence = line.strip()
if not sentence:
continue
yield sentence
class TxtBMESFormat(TxtFormat, ABC):
def file_to_inputs(self, filepath: str, gold=True):
max_seq_len = self.config.get('max_seq_len', False)
if max_seq_len:
delimiter = set()
delimiter.update('。!?:;、,,;!?、,')
for text in super().file_to_inputs(filepath, gold):
chars, tags = bmes_of(text, gold)
if max_seq_len and len(chars) > max_seq_len:
short_chars, short_tags = [], []
for idx, (char, tag) in enumerate(zip(chars, tags)):
short_chars.append(char)
short_tags.append(tag)
if len(short_chars) >= max_seq_len and char in delimiter:
yield short_chars, short_tags
short_chars, short_tags = [], []
if short_chars:
yield short_chars, short_tags
else:
yield chars, tags
def input_is_single_sample(self, input: Union[List[str], List[List[str]]]) -> bool:
return isinstance(input, str)
def inputs_to_samples(self, inputs, gold=False):
for chars, tags in (inputs if gold else zip(inputs, [None] * len(inputs))):
if not gold:
tags = [self.tag_vocab.safe_pad_token] * len(chars)
chars = CharTable.normalize_chars(chars)
yield chars, tags
def Y_to_outputs(self, Y: Union[tf.Tensor, Tuple[tf.Tensor]], gold=False, inputs=None, X=None) -> Iterable:
yield from self.Y_to_tokens(self.tag_vocab, Y, gold, inputs)
@staticmethod
def Y_to_tokens(tag_vocab, Y, gold, inputs):
if not gold:
Y = tf.argmax(Y, axis=2)
for text, ys in zip(inputs, Y):
tags = [tag_vocab.idx_to_token[int(y)] for y in ys[:len(text)]]
yield bmes_to_words(list(text), tags)
| 38.570213
| 122
| 0.575905
|
74e3bce185f65bb181c055a03eae589a3e663533
| 6,322
|
py
|
Python
|
PythonClient/multirotor/wav_reader.py
|
altay13/AirSim
|
a42fb69e6a692ec154f25abd80c0b49ef45caac4
|
[
"MIT"
] | 6,115
|
2019-05-07T05:29:14.000Z
|
2022-03-31T12:46:36.000Z
|
PythonClient/multirotor/wav_reader.py
|
altay13/AirSim
|
a42fb69e6a692ec154f25abd80c0b49ef45caac4
|
[
"MIT"
] | 2,306
|
2019-05-07T00:17:31.000Z
|
2022-03-31T23:31:46.000Z
|
PythonClient/multirotor/wav_reader.py
|
altay13/AirSim
|
a42fb69e6a692ec154f25abd80c0b49ef45caac4
|
[
"MIT"
] | 2,059
|
2019-05-07T03:07:43.000Z
|
2022-03-31T06:31:19.000Z
|
###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: wav_reader.py
# Authors: Chris Lovett
#
# Requires: Python 3.x
#
###################################################################################################
import audioop
import math
import wave
import numpy as np
import pyaudio
class WavReader:
def __init__(self, sample_rate=16000, channels=1, auto_scale=True):
""" Initialize the wav reader with the type of audio you want returned.
sample_rate Rate you want audio converted to (default 16 kHz)
channels Number of channels you want output (default 1)
auto_scale Whether to scale numbers to the range -1 to 1.
"""
self.input_stream = None
self.audio = pyaudio.PyAudio()
self.wav_file = None
self.requested_channels = int(channels)
self.requested_rate = int(sample_rate)
self.buffer_size = 0
self.sample_width = 0
self.read_size = None
self.dtype = None
self.auto_scale = auto_scale
self.audio_scale_factor = 1
self.tail = None
def open(self, filename, buffer_size, speaker=None):
""" open a wav file for reading
buffersize Number of audio samples to return on each read() call
speaker Optional output speaker to send converted audio to so you can hear it.
"""
self.speaker = speaker
# open a stream on the audio input file.
self.wav_file = wave.open(filename, "rb")
self.cvstate = None
self.read_size = int(buffer_size)
self.actual_channels = self.wav_file.getnchannels()
self.actual_rate = self.wav_file.getframerate()
self.sample_width = self.wav_file.getsampwidth()
# assumes signed integer used in raw audio, so for example, the max for 16bit is 2^15 (32768)
if self.auto_scale:
self.audio_scale_factor = 1 / pow(2, (8 * self.sample_width) - 1)
if self.requested_rate == 0:
raise Exception("Requested rate cannot be zero")
self.buffer_size = int(math.ceil((self.read_size * self.actual_rate) / self.requested_rate))
# convert int16 data to scaled floats
if self.sample_width == 1:
self.dtype = np.int8
elif self.sample_width == 2:
self.dtype = np.int16
elif self.sample_width == 4:
self.dtype = np.int32
else:
msg = "Unexpected sample width {}, can only handle 1, 2 or 4 byte audio"
raise Exception(msg.format(self.sample_width))
if speaker:
# configure output stream to match what we are resampling to...
audio_format = self.audio.get_format_from_width(self.sample_width)
speaker.open(audio_format, self.requested_channels, self.requested_rate)
def read_raw(self):
""" Reads the next chunk of audio (returns buffer_size provided to open)
It returns the raw data buffers converted to the target rate without any scaling.
"""
if self.wav_file is None:
return None
data = self.wav_file.readframes(self.buffer_size)
if len(data) == 0:
return None
if self.actual_rate != self.requested_rate:
# convert the audio to the desired recording rate
data, self.cvstate = audioop.ratecv(data, self.sample_width, self.actual_channels, self.actual_rate,
self.requested_rate, self.cvstate)
return self.get_requested_channels(data)
def get_requested_channels(self, data):
if self.requested_channels > self.actual_channels:
raise Exception("Cannot add channels, actual is {}, requested is {}".format(
self.actual_channels, self.requested_channels))
if self.requested_channels < self.actual_channels:
data = np.frombuffer(data, dtype=np.int16)
channels = []
# split into separate channels
for i in range(self.actual_channels):
channels += [data[i::self.actual_channels]]
# drop the channels we don't want
channels = channels[0:self.requested_channels]
# zip the resulting channels back up.
data = np.array(list(zip(*channels))).flatten()
# convert back to packed bytes in PCM 16 format
data = bytes(np.array(data, dtype=np.int16))
return data
def read(self):
""" Reads the next chunk of audio (returns buffer_size provided to open)
It returns the data converted to floating point numbers between -1 and 1, scaled by the range of
values possible for the given audio format.
"""
# deal with any accumulation of tails, if the tail grows to a full
# buffer then return it!
if self.tail is not None and len(self.tail) >= self.read_size:
data = self.tail[0:self.read_size]
self.tail = self.tail[self.read_size:]
return data
data = self.read_raw()
if data is None:
return None
if self.speaker:
self.speaker.write(data)
data = np.frombuffer(data, dtype=self.dtype).astype(float)
if self.tail is not None:
# we have a tail from previous frame, so prepend it
data = np.concatenate((self.tail, data))
# now the caller needs us to stick to our sample_size contract, but when
# rate conversion happens we can't be sure that 'data' is exactly that size.
if len(data) > self.read_size:
# usually one byte extra so add this to our accumulating tail
self.tail = data[self.read_size:]
data = data[0:self.read_size]
if len(data) < self.read_size:
# might have reached the end of a file, so pad with zeros.
zeros = np.zeros(self.read_size - len(data))
data = np.concatenate((data, zeros))
return data * self.audio_scale_factor
def close(self):
if self.wav_file:
self.wav_file.close()
self.wav_file = None
def is_closed(self):
return self.wav_file is None
| 39.761006
| 112
| 0.600601
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.