hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
68df28c169972c991af48446b9d7957487d0a266
| 15,000
|
py
|
Python
|
tensorflow_probability/python/distributions/beta.py
|
cafeal/probability
|
f968a32d601d29ec31a10568ccfe30263cf91ef2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/beta.py
|
cafeal/probability
|
f968a32d601d29ec31a10568ccfe30263cf91ef2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/beta.py
|
cafeal/probability
|
f968a32d601d29ec31a10568ccfe30263cf91ef2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Beta distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.util.seed_stream import SeedStream
from tensorflow.python.util import deprecation # pylint: disable=g-direct-tensorflow-import
__all__ = [
'Beta',
]
_beta_sample_note = """Note: `x` must have dtype `self.dtype` and be in
`[0, 1].` It must have a shape compatible with `self.batch_shape()`."""
class Beta(distribution.Distribution):
"""Beta distribution.
The Beta distribution is defined over the `(0, 1)` interval using parameters
`concentration1` (aka 'alpha') and `concentration0` (aka 'beta').
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta) = x**(alpha - 1) (1 - x)**(beta - 1) / Z
Z = Gamma(alpha) Gamma(beta) / Gamma(alpha + beta)
```
where:
* `concentration1 = alpha`,
* `concentration0 = beta`,
* `Z` is the normalization constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The concentration parameters represent mean total counts of a `1` or a `0`,
i.e.,
```none
concentration1 = alpha = mean * total_concentration
concentration0 = beta = (1. - mean) * total_concentration
```
where `mean` in `(0, 1)` and `total_concentration` is a positive real number
representing a mean `total_count = concentration1 + concentration0`.
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Warning: The samples can be zero due to finite precision.
This happens more often when some of the concentrations are very small.
Make sure to round the samples to `np.finfo(dtype).tiny` before computing the
density.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Create a batch of three Beta distributions.
alpha = [1, 2, 3]
beta = [1, 2, 3]
dist = tfd.Beta(alpha, beta)
dist.sample([4, 5]) # Shape [4, 5, 3]
# `x` has three batch entries, each with two samples.
x = [[.1, .4, .5],
[.2, .3, .5]]
# Calculate the probability of each pair of samples under the corresponding
# distribution in `dist`.
dist.prob(x) # Shape [2, 3]
```
```python
# Create batch_shape=[2, 3] via parameter broadcast:
alpha = [[1.], [2]] # Shape [2, 1]
beta = [3., 4, 5] # Shape [3]
dist = tfd.Beta(alpha, beta)
# alpha broadcast as: [[1., 1, 1,],
# [2, 2, 2]]
# beta broadcast as: [[3., 4, 5],
# [3, 4, 5]]
# batch_Shape [2, 3]
dist.sample([4, 5]) # Shape [4, 5, 2, 3]
x = [.2, .3, .5]
# x will be broadcast as [[.2, .3, .5],
# [.2, .3, .5]],
# thus matching batch_shape [2, 3].
dist.prob(x) # Shape [2, 3]
```
Compute the gradients of samples w.r.t. the parameters:
```python
alpha = tf.constant(1.0)
beta = tf.constant(2.0)
dist = tfd.Beta(alpha, beta)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [alpha, beta])
```
"""
def __init__(self,
concentration1,
concentration0,
validate_args=False,
allow_nan_stats=True,
name='Beta'):
"""Initialize a batch of Beta distributions.
Args:
concentration1: Positive floating-point `Tensor` indicating mean
number of successes; aka 'alpha'. Implies `self.dtype` and
`self.batch_shape`, i.e.,
`concentration1.shape = [N1, N2, ..., Nm] = self.batch_shape`.
concentration0: Positive floating-point `Tensor` indicating mean
number of failures; aka 'beta'. Otherwise has same semantics as
`concentration1`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value '`NaN`' to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([concentration1, concentration0],
dtype_hint=tf.float32)
self._concentration1 = tensor_util.convert_nonref_to_tensor(
concentration1, dtype=dtype, name='concentration1')
self._concentration0 = tensor_util.convert_nonref_to_tensor(
concentration0, dtype=dtype, name='concentration0')
super(Beta, self).__init__(
dtype=dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
parameters=parameters,
name=name)
@staticmethod
def _param_shapes(sample_shape):
s = tf.convert_to_tensor(sample_shape, dtype=tf.int32)
return dict(concentration1=s, concentration0=s)
@classmethod
def _params_event_ndims(cls):
return dict(concentration1=0, concentration0=0)
@property
def concentration1(self):
"""Concentration parameter associated with a `1` outcome."""
return self._concentration1
@property
def concentration0(self):
"""Concentration parameter associated with a `0` outcome."""
return self._concentration0
@property
@deprecation.deprecated(
'2019-10-01',
('The `total_concentration` property is deprecated; instead use '
'`dist.concentration1 + dist.concentration0`.'),
warn_once=True)
def total_concentration(self):
"""Sum of concentration parameters."""
with self._name_and_control_scope('total_concentration'):
return self.concentration1 + self.concentration0
def _batch_shape_tensor(self, concentration1=None, concentration0=None):
return prefer_static.broadcast_shape(
prefer_static.shape(
self.concentration1 if concentration1 is None else concentration1),
prefer_static.shape(
self.concentration0 if concentration0 is None else concentration0))
def _batch_shape(self):
return tf.broadcast_static_shape(
self.concentration1.shape, self.concentration0.shape)
def _event_shape_tensor(self):
return tf.constant([], dtype=tf.int32)
def _event_shape(self):
return tf.TensorShape([])
def _sample_n(self, n, seed=None):
seed = SeedStream(seed, 'beta')
concentration1 = tf.convert_to_tensor(self.concentration1)
concentration0 = tf.convert_to_tensor(self.concentration0)
shape = self._batch_shape_tensor(concentration1, concentration0)
expanded_concentration1 = tf.broadcast_to(concentration1, shape)
expanded_concentration0 = tf.broadcast_to(concentration0, shape)
gamma1_sample = tf.random.gamma(
shape=[n], alpha=expanded_concentration1, dtype=self.dtype, seed=seed())
gamma2_sample = tf.random.gamma(
shape=[n], alpha=expanded_concentration0, dtype=self.dtype, seed=seed())
beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
return beta_sample
@distribution_util.AppendDocstring(_beta_sample_note)
def _log_prob(self, x):
concentration0 = tf.convert_to_tensor(self.concentration0)
concentration1 = tf.convert_to_tensor(self.concentration1)
return (self._log_unnormalized_prob(x, concentration1, concentration0) -
self._log_normalization(concentration1, concentration0))
@distribution_util.AppendDocstring(_beta_sample_note)
def _prob(self, x):
return tf.exp(self._log_prob(x))
@distribution_util.AppendDocstring(_beta_sample_note)
def _log_cdf(self, x):
return tf.math.log(self._cdf(x))
@distribution_util.AppendDocstring(_beta_sample_note)
def _cdf(self, x):
concentration1 = tf.convert_to_tensor(self.concentration1)
concentration0 = tf.convert_to_tensor(self.concentration0)
shape = self._batch_shape_tensor(concentration1, concentration0)
concentration1 = tf.broadcast_to(concentration1, shape)
concentration0 = tf.broadcast_to(concentration0, shape)
return tf.math.betainc(concentration1, concentration0, x)
def _log_unnormalized_prob(self, x, concentration1, concentration0):
return (tf.math.xlogy(concentration1 - 1., x) +
(concentration0 - 1.) * tf.math.log1p(-x))
def _log_normalization(self, concentration1, concentration0):
return (tf.math.lgamma(concentration1) + tf.math.lgamma(concentration0) -
tf.math.lgamma(concentration1 + concentration0))
def _entropy(self):
concentration1 = tf.convert_to_tensor(self.concentration1)
concentration0 = tf.convert_to_tensor(self.concentration0)
total_concentration = concentration1 + concentration0
return (self._log_normalization(concentration1, concentration0) -
(concentration1 - 1.) * tf.math.digamma(concentration1) -
(concentration0 - 1.) * tf.math.digamma(concentration0) +
(total_concentration - 2.) * tf.math.digamma(total_concentration))
def _mean(self):
concentration1 = tf.convert_to_tensor(self.concentration1)
return concentration1 / (concentration1 + self.concentration0)
def _variance(self):
concentration1 = tf.convert_to_tensor(self.concentration1)
concentration0 = tf.convert_to_tensor(self.concentration0)
total_concentration = concentration1 + concentration0
return (concentration1 * concentration0 /
((total_concentration)**2 * (total_concentration + 1.)))
@distribution_util.AppendDocstring(
"""Note: The mode is undefined when `concentration1 <= 1` or
`concentration0 <= 1`. If `self.allow_nan_stats` is `True`, `NaN`
is used for undefined modes. If `self.allow_nan_stats` is `False` an
exception is raised when one or more modes are undefined.""")
def _mode(self):
concentration1 = tf.convert_to_tensor(self.concentration1)
concentration0 = tf.convert_to_tensor(self.concentration0)
mode = (concentration1 - 1.) / (concentration1 + concentration0 - 2.)
with tf.control_dependencies([] if self.allow_nan_stats else [ # pylint: disable=g-long-ternary
assert_util.assert_less(
tf.ones([], dtype=self.dtype),
concentration1,
message='Mode undefined for concentration1 <= 1.'),
assert_util.assert_less(
tf.ones([], dtype=self.dtype),
concentration0,
message='Mode undefined for concentration0 <= 1.')
]):
return tf.where(
(concentration1 > 1.) & (concentration0 > 1.),
mode,
dtype_util.as_numpy_dtype(self.dtype)(np.nan))
def _sample_control_dependencies(self, x):
"""Checks the validity of a sample."""
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_non_negative(
x, message='Sample must be non-negative.'))
assertions.append(assert_util.assert_less_equal(
x, tf.ones([], x.dtype),
message='Sample must be less than or equal to `1`.'))
return assertions
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
for concentration in [self.concentration0, self.concentration1]:
if is_init != tensor_util.is_ref(concentration):
assertions.append(assert_util.assert_positive(
concentration,
message='Concentration parameter must be positive.'))
return assertions
@kullback_leibler.RegisterKL(Beta, Beta)
def _kl_beta_beta(d1, d2, name=None):
"""Calculate the batchwise KL divergence KL(d1 || d2) with d1 and d2 Beta.
Args:
d1: instance of a Beta distribution object.
d2: instance of a Beta distribution object.
name: (optional) Name to use for created operations.
default is 'kl_beta_beta'.
Returns:
Batchwise KL(d1 || d2)
"""
with tf.name_scope(name or 'kl_beta_beta'):
d1_concentration1 = tf.convert_to_tensor(d1.concentration1)
d1_concentration0 = tf.convert_to_tensor(d1.concentration0)
d2_concentration1 = tf.convert_to_tensor(d2.concentration1)
d2_concentration0 = tf.convert_to_tensor(d2.concentration0)
d1_total_concentration = d1_concentration1 + d1_concentration0
d2_total_concentration = d2_concentration1 + d2_concentration0
d1_log_normalization = d1._log_normalization( # pylint: disable=protected-access
d1_concentration1, d1_concentration0)
d2_log_normalization = d2._log_normalization( # pylint: disable=protected-access
d2_concentration1, d2_concentration0)
return ((d2_log_normalization - d1_log_normalization) -
(tf.math.digamma(d1_concentration1) *
(d2_concentration1 - d1_concentration1)) -
(tf.math.digamma(d1_concentration0) *
(d2_concentration0 - d1_concentration0)) +
(tf.math.digamma(d1_total_concentration) *
(d2_total_concentration - d1_total_concentration)))
| 39.164491
| 100
| 0.699667
|
5d576e6c379f505a5b53ab5ed57598ea174a0d0c
| 5,862
|
py
|
Python
|
refactorings/extract_class.py
|
miladjobs/CodART
|
f37af3f36c380245b0ae883dda7401e2b9d65980
|
[
"MIT"
] | null | null | null |
refactorings/extract_class.py
|
miladjobs/CodART
|
f37af3f36c380245b0ae883dda7401e2b9d65980
|
[
"MIT"
] | null | null | null |
refactorings/extract_class.py
|
miladjobs/CodART
|
f37af3f36c380245b0ae883dda7401e2b9d65980
|
[
"MIT"
] | 1
|
2021-01-30T09:28:54.000Z
|
2021-01-30T09:28:54.000Z
|
"""
The scripts implements different refactoring operations
"""
__version__ = '0.1.0'
__author__ = 'Morteza'
import networkx as nx
from antlr4 import *
from antlr4.TokenStreamRewriter import TokenStreamRewriter
from gen.java9.Java9_v2Parser import Java9_v2Parser
from gen.java9 import Java9_v2Listener
import visualization.graph_visualization
class ExtractClassRefactoringListener(Java9_v2Listener):
"""
To implement the extract class refactoring
Encapsulate field: Make a public field private and provide accessors
a stream of tokens is sent to the listener, to build an object token_stream_rewriter
field addresses the field of the class, tobe encapsulated.
"""
def __init__(self, common_token_stream: CommonTokenStream = None,
class_identifier: str = None):
"""
:param common_token_stream:
"""
self.enter_class = False
self.token_stream = common_token_stream
self.class_identifier = class_identifier
# Move all the tokens in the source code in a buffer, token_stream_rewriter.
if common_token_stream is not None:
self.token_stream_rewriter = TokenStreamRewriter(common_token_stream)
else:
raise TypeError('common_token_stream is None')
self.field_dict = {}
self.method_name = [] #
self.method_no = 0
# Groups methods in terms of their dependncies on the class attributes and one another
def split_class(self):
# 1- move the dictionay of fields into a new dictionary of methods operating on fields
method_dict = {}
for key, value in self.field_dict.items():
for method in value:
if not str(method) in method_dict:
method_dict[str(method)] = [key]
else:
method_dict[str(method)].append(key)
print("methods dic.", method_dict)
# 2- Group methods in terms of their dependencies on one another
method_group = dict()
# _____________________To be modified ________________________
# 3- Group methods in terms of their dependencies on the class attributes
for key, value in method_dict.items():
if not str(value) in method_group:
method_group[str(value)] = [key]
else:
method_group[str(value)].append(key)
print("methods group", method_group)
# --------------------------------------
# 4- Create graph
G = nx.DiGraph()
for field, methods in self.field_dict.items():
for method in methods:
print('add edge {0} --> {1}'.format(field, method))
G.add_node(method[1], method_name=method[0])
G.add_edge(field, method[1])
print('---------\nExtracted classes:')
visualization.graph_visualization.draw(g=G)
# CC = nx.connected_components(G)
S = [G.subgraph(c).copy() for c in nx.weakly_connected_components(G)]
for class_ in S:
# print('class_', class_.nodes.data())
class_fields = [node for node in class_.nodes if class_.in_degree(node) == 0]
class_methods = [(class_.nodes[node]['method_name'], node) for node in class_.nodes if class_.in_degree(node) > 0]
print('class_fields', class_fields)
print('class_methods', class_methods)
print('-'*10)
# Enter a parse tree produced by Java9_v2Parser#normalClassDeclaration.
def enterNormalClassDeclaration(self, ctx: Java9_v2Parser.NormalClassDeclarationContext):
if ctx.identifier().getText() != self.class_identifier:
return
self.enter_class = True
# Exit a parse tree produced by Java9_v2Parser#normalClassDeclaration.
def exitNormalClassDeclaration(self, ctx: Java9_v2Parser.NormalClassDeclarationContext):
self.enter_class = False
print("----------------------------")
print("Class attributes and methods using each attribute ")
print("field dictionary =", self.field_dict)
print("----------------------------")
self.split_class()
self.field_dict = {}
self.method_name = []
self.method_no = 0
# when exiting from a class attribute (field) declaration this method is invoked.
# This method adds attributes of the target class to a dictionary
def enterFieldDeclaration(self, ctx: Java9_v2Parser.FieldDeclarationContext):
if not self.enter_class:
return
field_id = ctx.variableDeclaratorList().variableDeclarator(i=0).variableDeclaratorId().identifier().getText()
self.field_dict[field_id] = []
# Enter a parse tree produced by Java9_v2Parser#methodDeclaration.
def enterMethodDeclaration(self, ctx: Java9_v2Parser.MethodDeclarationContext):
if not self.enter_class:
return
m = []
m_name = ctx.methodHeader().methodDeclarator().identifier().getText()
self.method_no = self.method_no + 1
m.append(m_name)
m.append(self.method_no)
self.method_name.append(m)
# Exit a parse tree produced by Java9_v2Parser#methodDeclaration.
def exitMethodDeclaration(self, ctx: Java9_v2Parser.MethodDeclarationContext):
if not self.enter_class:
return
# Exit a parse tree produced by Java9_v2Parser#identifier.
def exitIdentifier(self, ctx: Java9_v2Parser.IdentifierContext):
if not self.enter_class:
return
if self.method_no == 0:
return
current_method = self.method_name[-1]
variable_name = ctx.getText()
if variable_name not in self.field_dict:
return
if not current_method in self.field_dict[variable_name]:
self.field_dict[variable_name].append(current_method)
| 40.993007
| 126
| 0.648243
|
7405352c2afdd40bb91a09b5d47414289494e291
| 21,227
|
py
|
Python
|
venv1/Lib/site-packages/tensorflow/python/ops/embedding_ops.py
|
Soum-Soum/Tensorflow_Face_Finder
|
fec6c15d2df7012608511ad87f4b55731bf99478
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
venv1/Lib/site-packages/tensorflow/python/ops/embedding_ops.py
|
Soum-Soum/Tensorflow_Face_Finder
|
fec6c15d2df7012608511ad87f4b55731bf99478
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-05-20T00:58:04.000Z
|
2021-05-20T00:58:04.000Z
|
venv1/Lib/site-packages/tensorflow/python/ops/embedding_ops.py
|
Soum-Soum/Tensorflow_Face_Finder
|
fec6c15d2df7012608511ad87f4b55731bf99478
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
# Imports gradient definitions.
from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
def _gather(params, ids, name=None):
"""Helper function for _embedding_lookup_and_transform.
This function gathers embeddings from a single tensor. The gather deals with
resource variables specially.
Args:
params: A `Tensor` of embeddings.
ids: A `Tensor` indexing the embeddings to be retrieved from `params`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `params`.
"""
if isinstance(params, resource_variable_ops.ResourceVariable):
return params.sparse_read(ids, name=name)
else:
return array_ops.gather(params, ids, name=name)
def _clip(params, ids, max_norm):
"""Helper function for _embedding_lookup_and_transform.
This function optionally clips embeddings to an l2-norm of max_norm.
Args:
params: A `Tensor` of embeddings retrieved by `_gather`.
ids: The `ids` argument that was passed to `_gather`.
max_norm: If provided, the embeddings are l2-normalized to the value of
max_norm.
Returns:
A `Tensor` with the same type as `params`.
"""
def _rank(x):
"""Helper function to retrieve the rank of a tensor.
Args:
x: Something convertible to `Tensor`.
Returns:
Either a pair `(rank, True)` where `rank` is an integer or a pair
`(rank, False)` where `rank` is an integer `Tensor`. In either case,
`rank` is the rank of `x`.
"""
rank = ops.convert_to_tensor(x).get_shape().ndims
if rank:
return rank, True
else:
return array_ops.rank(x), False
if max_norm is None:
return params
ids_rank, ids_static = _rank(ids)
params_rank, params_static = _rank(params)
return clip_ops.clip_by_norm(
params,
max_norm,
axes=(list(range(ids_rank, params_rank))
if ids_static and params_static
else math_ops.range(ids_rank, params_rank)))
def _embedding_lookup_and_transform(params,
ids,
partition_strategy="mod",
name=None,
max_norm=None,
transform_fn=None):
"""Helper function for embedding_lookup and _compute_sampled_logits.
This function is a generalization of embedding_lookup that optionally
applies a caller-specified transformation to each embedding. This is
done through the `transform_fn` argument. If provided, the function is
applied to each partitioned tensor of retrieved embeddings, colocated
with the embeddings. This function will be called with a single `Tensor`
argument of the same type as the `params` tensor and should return a
`Tensor`. The shape of the argument will be the same as `params` except
for the size of the first dimension. The first dimension of the result's
shape must be the same size as the argument's.
Args:
params: See embedding_lookup.
ids: See embedding_lookup.
partition_strategy: See embedding_lookup.
name: See embedding_lookup.
max_norm: See embedding_lookup.
transform_fn: An optional function to apply to each retrieved embedding.
If max_norm is provided, transform_fn is applied to the norm-limited
embeddings.
Returns:
See embedding_lookup for details.
Raises:
ValueError: If `params` is empty.
"""
if params is None or params in ((), []):
raise ValueError("Need at least one param")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
with ops.name_scope(name, "embedding_lookup", params + [ids]) as name:
np = len(params) # Number of partitions
# Preserve the resource variable status to avoid accidental dense reads.
if not any(
isinstance(p, resource_variable_ops.ResourceVariable) for p in params):
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
ids = ops.convert_to_tensor(ids, name="ids")
if np == 1 and (not transform_fn or ids.get_shape().ndims == 1):
with ops.colocate_with(params[0]):
result = _clip(_gather(params[0], ids, name=name), ids, max_norm)
if transform_fn:
result = transform_fn(result)
return result
else:
# Flatten the ids. There are two cases where we need to do this.
# - There is more than one params tensor.
# - There is a transform_fn and ids is not statically known to be 1-D.
# We must flatten in this case because transform_fn expects a flat
# tensor of embeddings.
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = params[0].get_shape()[0]
for p in xrange(1, np):
dim_0_size += params[p].get_shape()[0]
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size.value, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
if params[p].get_shape()[0].value is not None:
dim_0_sizes.append(params[p].get_shape()[0].value)
else:
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(
flat_ids // (ids_per_partition + 1),
(flat_ids - extras) // ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
new_ids = array_ops.where(p_assignments < extras,
flat_ids % (ids_per_partition + 1),
(flat_ids - extras) % ids_per_partition)
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in xrange(np):
pids = gather_ids[p]
with ops.colocate_with(params[p]):
result = _gather(params[p], pids)
if transform_fn:
# If transform_fn is provided, the clip_by_norm precedes
# the transform and hence must be co-located. See below
# for the counterpart if transform_fn is not proveded.
result = transform_fn(_clip(result, pids, max_norm))
partitioned_result.append(result)
# Stitch these back together
ret = data_flow_ops.parallel_dynamic_stitch(
pindices, partitioned_result, name=name)
# Determine the static element shape.
if transform_fn is None:
element_shape_s = params[0].get_shape()[1:]
for p in params[1:]:
element_shape_s = element_shape_s.merge_with(p.get_shape()[1:])
else:
element_shape_s = ret.get_shape()[1:]
# Compute the dynamic element shape.
if element_shape_s.is_fully_defined():
element_shape_d = element_shape_s
elif transform_fn is None:
# It's important that we compute params[0].shape on the right device
# to avoid data motion.
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
element_shape_d = params_shape[1:]
else:
element_shape_d = array_ops.shape(ret)[1:]
# Reshape to reverse the flattening of ids.
ret = array_ops.reshape(ret,
array_ops.concat(
[array_ops.shape(ids), element_shape_d], 0))
# Normally the reshape is sufficient, but setting shape explicitly
# teaches shape inference that params[1:].get_shape() matters
# (in the case that transform_fn is None).
ret.set_shape(ids.get_shape().concatenate(element_shape_s))
if not transform_fn:
# If transform_fn was provided, the clip_by_norm was done above.
ret = _clip(ret, ids, max_norm)
return ret
@tf_export("nn.embedding_lookup")
def embedding_lookup(
params,
ids,
partition_strategy="mod",
name=None,
validate_indices=True, # pylint: disable=unused-argument
max_norm=None):
"""Looks up `ids` in a list of embedding tensors.
This function is used to perform parallel lookups on the list of
tensors in `params`. It is a generalization of
@{tf.gather}, where `params` is
interpreted as a partitioning of a large embedding tensor. `params` may be
a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
If `len(params) > 1`, each element `id` of `ids` is partitioned between
the elements of `params` according to the `partition_strategy`.
In all strategies, if the id space does not evenly divide the number of
partitions, each of the first `(max_id + 1) % len(params)` partitions will
be assigned one more id.
If `partition_strategy` is `"mod"`, we assign each id to partition
`p = id % len(params)`. For instance,
13 ids are split across 5 partitions as:
`[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
If `partition_strategy` is `"div"`, we assign ids to partitions in a
contiguous manner. In this case, 13 ids are split across 5 partitions as:
`[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`
The results of the lookup are concatenated into a dense
tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked
up in `params`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`.
name: A name for the operation (optional).
validate_indices: DEPRECATED. If this operation is assigned to CPU, values
in `indices` are always validated to be within range. If assigned to GPU,
out-of-bound indices result in safe but unspecified behavior, which may
include raising an error.
max_norm: If provided, embedding values are l2-normalized to the value of
max_norm.
Returns:
A `Tensor` with the same type as the tensors in `params`.
Raises:
ValueError: If `params` is empty.
"""
return _embedding_lookup_and_transform(
params=params,
ids=ids,
partition_strategy=partition_strategy,
name=name,
max_norm=max_norm,
transform_fn=None)
@tf_export("nn.embedding_lookup_sparse")
def embedding_lookup_sparse(params,
sp_ids,
sp_weights,
partition_strategy="mod",
name=None,
combiner=None,
max_norm=None):
"""Computes embeddings for the given ids and weights.
This op assumes that there is at least one id for each row in the dense tensor
represented by sp_ids (i.e. there are no rows with empty features), and that
all the indices of sp_ids are in canonical row-major order.
It also assumes that all id values lie in the range [0, p0), where p0
is the sum of the size of params along dimension 0.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
sp_ids: N x M SparseTensor of int64 ids (typically from FeatureValueToId),
where N is typically batch size and M is arbitrary.
sp_weights: either a SparseTensor of float / double weights, or None to
indicate all weights should be taken to be 1. If specified, sp_weights
must have exactly the same shape and indices as sp_ids.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported.
"sum" computes the weighted sum of the embedding results for each row.
"mean" is the weighted sum divided by the total weight.
"sqrtn" is the weighted sum divided by the square root of the sum of the
squares of the weights.
max_norm: If provided, each embedding is normalized to have l2 norm equal
to max_norm before combining.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by sp_ids, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
In other words, if
shape(combined params) = [p0, p1, ..., pm]
and
shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]
then
shape(output) = [d0, d1, ..., dn-1, p1, ..., pm].
For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
[0, 0]: id 1, weight 2.0
[0, 1]: id 3, weight 0.5
[1, 0]: id 0, weight 1.0
[2, 3]: id 1, weight 3.0
with `combiner`="mean", then the output will be a 3x20 matrix where
output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
output[1, :] = params[0, :] * 1.0
output[2, :] = params[1, :] * 3.0
Raises:
TypeError: If sp_ids is not a SparseTensor, or if sp_weights is neither
None nor SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.name_scope(name, "embedding_lookup_sparse",
params + [sp_ids]) as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
if ignore_weights:
ids, idx = array_ops.unique(ids)
else:
idx = None
embeddings = embedding_lookup(
params, ids, partition_strategy=partition_strategy, max_norm=max_norm)
if not ignore_weights:
weights = sp_weights.values
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
# Reshape weights to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones],
0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set the weight shape, since after reshaping to bcast_weights_shape,
# the shape becomes None.
if embeddings.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
embeddings *= weights
if combiner == "sum":
embeddings = math_ops.segment_sum(embeddings, segment_ids, name=name)
elif combiner == "mean":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt, name=name)
else:
assert False, "Unrecognized combiner"
else:
assert idx is not None
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(
embeddings, idx, segment_ids, name=name)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(
embeddings, idx, segment_ids, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(
embeddings, idx, segment_ids, name=name)
else:
assert False, "Unrecognized combiner"
return embeddings
| 42.624498
| 82
| 0.656145
|
5591e952289f7c4b940a6238c5810936ed0373bc
| 38
|
py
|
Python
|
Build folder/__init__.py
|
techbliss/Windows_Screenrecorder
|
7e0167b0cb907e790357f09e59ba0e43fe61e334
|
[
"MIT"
] | 17
|
2016-09-30T21:26:05.000Z
|
2021-06-14T13:08:20.000Z
|
Build folder/__init__.py
|
techbliss/Windows_Screenrecorder
|
7e0167b0cb907e790357f09e59ba0e43fe61e334
|
[
"MIT"
] | null | null | null |
Build folder/__init__.py
|
techbliss/Windows_Screenrecorder
|
7e0167b0cb907e790357f09e59ba0e43fe61e334
|
[
"MIT"
] | 7
|
2016-10-06T10:24:37.000Z
|
2020-07-13T16:57:31.000Z
|
#auther Storm Shadow www.techbliss.org
| 38
| 38
| 0.842105
|
ce9d72bc89eeb35d70ac2c4616cc889865b71a43
| 3,452
|
py
|
Python
|
src/python/pants/backend/kotlin/lint/ktlint/rules.py
|
wonlay/pants
|
53c66503b6898e83c9c9596e56cde5ad9ed6a0d3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/kotlin/lint/ktlint/rules.py
|
wonlay/pants
|
53c66503b6898e83c9c9596e56cde5ad9ed6a0d3
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/backend/kotlin/lint/ktlint/rules.py
|
wonlay/pants
|
53c66503b6898e83c9c9596e56cde5ad9ed6a0d3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
from dataclasses import dataclass
from pants.backend.kotlin.lint.ktlint.skip_field import SkipKtlintField
from pants.backend.kotlin.lint.ktlint.subsystem import KtlintSubsystem
from pants.backend.kotlin.target_types import KotlinSourceField
from pants.core.goals.fmt import FmtRequest, FmtResult
from pants.core.goals.generate_lockfiles import GenerateToolLockfileSentinel
from pants.engine.fs import Digest
from pants.engine.internals.native_engine import Snapshot
from pants.engine.internals.selectors import Get
from pants.engine.process import ProcessResult
from pants.engine.rules import collect_rules, rule
from pants.engine.target import FieldSet, Target
from pants.engine.unions import UnionRule
from pants.jvm.jdk_rules import InternalJdk, JvmProcess
from pants.jvm.resolve import jvm_tool
from pants.jvm.resolve.coursier_fetch import ToolClasspath, ToolClasspathRequest
from pants.jvm.resolve.jvm_tool import GenerateJvmLockfileFromTool
from pants.util.logging import LogLevel
from pants.util.strutil import pluralize
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class KtlintFieldSet(FieldSet):
required_fields = (KotlinSourceField,)
source: KotlinSourceField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(SkipKtlintField).value
class KtlintRequest(FmtRequest):
field_set_type = KtlintFieldSet
name = KtlintSubsystem.options_scope
class KtlintToolLockfileSentinel(GenerateToolLockfileSentinel):
resolve_name = KtlintSubsystem.options_scope
@rule(desc="Format with Ktlint", level=LogLevel.DEBUG)
async def ktlint_fmt(request: KtlintRequest, tool: KtlintSubsystem, jdk: InternalJdk) -> FmtResult:
if tool.skip:
return FmtResult.skip(formatter_name=request.name)
lockfile_request = await Get(GenerateJvmLockfileFromTool, KtlintToolLockfileSentinel())
tool_classpath = await Get(ToolClasspath, ToolClasspathRequest(lockfile=lockfile_request))
toolcp_relpath = "__toolcp"
extra_immutable_input_digests = {
toolcp_relpath: tool_classpath.digest,
}
args = [
"com.pinterest.ktlint.Main",
"-F",
*request.snapshot.files,
]
result = await Get(
ProcessResult,
JvmProcess(
jdk=jdk,
argv=args,
classpath_entries=tool_classpath.classpath_entries(toolcp_relpath),
input_digest=request.snapshot.digest,
extra_immutable_input_digests=extra_immutable_input_digests,
extra_nailgun_keys=extra_immutable_input_digests,
output_files=request.snapshot.files,
description=f"Run Ktlint on {pluralize(len(request.field_sets), 'file')}.",
level=LogLevel.DEBUG,
),
)
output_snapshot = await Get(Snapshot, Digest, result.output_digest)
return FmtResult.create(request, result, output_snapshot, strip_chroot_path=True)
@rule
def generate_ktlint_lockfile_request(
_: KtlintToolLockfileSentinel, tool: KtlintSubsystem
) -> GenerateJvmLockfileFromTool:
return GenerateJvmLockfileFromTool.create(tool)
def rules():
return [
*collect_rules(),
*jvm_tool.rules(),
UnionRule(FmtRequest, KtlintRequest),
UnionRule(GenerateToolLockfileSentinel, KtlintToolLockfileSentinel),
]
| 34.52
| 99
| 0.762457
|
a8fe044640830d0817ba8fe9d5268c6f7ba375db
| 1,919
|
py
|
Python
|
gevent/select.py
|
PythonCharmers/gevent-future
|
f0b8be81206b73c6f5fd5c66fab9fbd0c1eac85e
|
[
"MIT"
] | 2
|
2019-05-12T22:01:32.000Z
|
2020-06-09T14:11:01.000Z
|
gevent/select.py
|
PythonCharmers/gevent-future
|
f0b8be81206b73c6f5fd5c66fab9fbd0c1eac85e
|
[
"MIT"
] | null | null | null |
gevent/select.py
|
PythonCharmers/gevent-future
|
f0b8be81206b73c6f5fd5c66fab9fbd0c1eac85e
|
[
"MIT"
] | 1
|
2019-05-12T22:01:18.000Z
|
2019-05-12T22:01:18.000Z
|
# Copyright (c) 2009-2011 Denis Bilenko. See LICENSE for details.
from __future__ import absolute_import
from gevent.event import Event
from gevent.hub import get_hub
__implements__ = ['select']
__all__ = ['error'] + __implements__
import select as __select__
error = __select__.error
def get_fileno(obj):
try:
fileno_f = obj.fileno
except AttributeError:
if not isinstance(obj, (int, long)):
raise TypeError('argument must be an int, or have a fileno() method: %r' % (obj, ))
return obj
else:
return fileno_f()
class SelectResult(object):
__slots__ = ['read', 'write', 'event']
def __init__(self):
self.read = []
self.write = []
self.event = Event()
def add_read(self, socket):
self.read.append(socket)
self.event.set()
def add_write(self, socket):
self.write.append(socket)
self.event.set()
def select(rlist, wlist, xlist, timeout=None):
"""An implementation of :meth:`select.select` that blocks only the current greenlet.
Note: *xlist* is ignored.
"""
watchers = []
loop = get_hub().loop
io = loop.io
MAXPRI = loop.MAXPRI
result = SelectResult()
try:
try:
for readfd in rlist:
watcher = io(get_fileno(readfd), 1)
watcher.priority = MAXPRI
watcher.start(result.add_read, readfd)
watchers.append(watcher)
for writefd in wlist:
watcher = io(get_fileno(writefd), 2)
watcher.priority = MAXPRI
watcher.start(result.add_write, writefd)
watchers.append(watcher)
except IOError as ex:
raise error(*ex.args)
result.event.wait(timeout=timeout)
return result.read, result.write, []
finally:
for watcher in watchers:
watcher.stop()
| 27.028169
| 95
| 0.596144
|
9b0e3cb037ed4c9212e3a5673318ea6e11386c18
| 916
|
py
|
Python
|
settings.py
|
nikgun1984/ketolife_backend
|
d7c61791646a162061c454f8dedc2512d698cc53
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | 1
|
2021-03-15T19:22:55.000Z
|
2021-03-15T19:22:55.000Z
|
settings.py
|
nikgun1984/ketolife_backend
|
d7c61791646a162061c454f8dedc2512d698cc53
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
settings.py
|
nikgun1984/ketolife_backend
|
d7c61791646a162061c454f8dedc2512d698cc53
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
import os
from app import app
from forms import LoginForm, UserAddForm
from secrets import APP_KEY,APP_ID_RECIPE,APP_KEY_RECIPE
CURR_USER_KEY = "curr_user"
BASE_URL_SP = "https://api.spoonacular.com"
BASE_URL_ED = "https://api.edamam.com"
BASE_IMG_LINK = "https://spoonacular.com/cdn/ingredients_250x250";
BASE_URL_SP = "https://api.spoonacular.com"
BASE_URL_ED = "https://api.edamam.com"
APP_KEY = os.environ.get('APP_KEY',APP_KEY)
APP_ID_RECIPE = os.environ.get('APP_ID_RECIPE', APP_ID_RECIPE)
APP_KEY_RECIPE = os.environ.get('APP_KEY_RECIPE', APP_KEY_RECIPE)
@app.context_processor
def context_processor():
"""Now forms will be available globally across all jinja templates"""
login_form = LoginForm()
signup_form = UserAddForm()
classes = ["fa fa-user","fa fa-paper-plane","fa fa-lock","fa fa-check-circle"]
return dict(login_form=login_form,signup_form=signup_form,classes=classes)
| 35.230769
| 82
| 0.7631
|
e6cea76e4d917d4f43a67d39d39ba97b072ea430
| 6,497
|
py
|
Python
|
baselines/convnets-keras/fcn/fabric_train.py
|
leix28/ML-Fabri
|
6776f1b93cc84ab40569af3052ffc30bee7f8910
|
[
"MIT"
] | null | null | null |
baselines/convnets-keras/fcn/fabric_train.py
|
leix28/ML-Fabri
|
6776f1b93cc84ab40569af3052ffc30bee7f8910
|
[
"MIT"
] | null | null | null |
baselines/convnets-keras/fcn/fabric_train.py
|
leix28/ML-Fabri
|
6776f1b93cc84ab40569af3052ffc30bee7f8910
|
[
"MIT"
] | null | null | null |
"""
adapted from keras example cifar10_cnn.py
Train ResNet-18 on the CIFAR10 small images dataset.
GPU run command with Theano backend (with TensorFlow, the GPU is automatically used):
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10.py
"""
from __future__ import print_function
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping
import tensorflow as tf
import sys
import datetime
import os
import shutil
from keras.optimizers import Adam, Adadelta
from convnets import AlexNet_FCN
from datagenerator import data_gen
import keras.backend as K
import numpy as np
import dataloader
import datagenerator
from keras.backend.tensorflow_backend import set_session
from keras.metrics import top_k_categorical_accuracy
def top_3_accuracy(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=3)
set_session(sess)
t = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
print(t)
batch_size = 32
nb_classes = 14
nb_epoch = 100
outs = 31
data_augmentation = True
# The data, shuffled and split between train and test sets:
dataset_fn = '../../../data_preprocessing/material_dataset.txt'
imgs_fn = '../../../../storage/center_227x227.npz'
weights_fn = '../../../../storage/alexnet_weights.h5'
#sz = 227
sz = 300
img_rows = sz
img_cols = sz
img_channels = 3
with tf.device('/gpu:0'):
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6)
early_stopper = EarlyStopping(min_delta=0.001, patience=10)
csv_logger = CSVLogger('alexnet.csv')
#model = resnet.ResnetBuilder.build_resnet_18((img_channels, img_rows, img_cols), nb_classes)
#model = resnet.ResnetBuilder.build_resnet_50((img_channels, img_rows, img_cols), nb_classes)
model, outs = AlexNet_FCN(nb_classes=nb_classes, sz=sz)
#model = AlexNet(weights_fn, nb_classes=nb_classes, sz=sz)
#model = AlexNet(weights_fn, nb_classes=nb_classes)
print("outs", outs)
#opt = Adadelta(lr=0.01, rho=0.95, epsilon=1e-08, decay=0.0)
#opt = Adadelta(lr=1, rho=0.95, epsilon=1e-08, decay=0.0)
def sum_loss(y_true, y_pred):
y_true = K.reshape(y_true, [batch_size*outs*outs, nb_classes])
y_pred = K.reshape(y_pred, [batch_size*outs*outs, nb_classes])
s = K.mean(K.categorical_crossentropy(y_true, y_pred))
return s
opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(#loss='categorical_crossentropy',
loss=sum_loss,
optimizer=opt,
#metrics=['accuracy', top_3_accuracy])
metrics=['accuracy'])
if data_augmentation:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
r = 0.2
datagen = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=r*100,
width_shift_range=r,
height_shift_range=r,
shear_range=r,
zoom_range=r,
channel_shift_range=r,
fill_mode='nearest',
cval=0.,
horizontal_flip=True,
vertical_flip=False,
rescale=None,
preprocessing_function=None)
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
#datagen.fit(X_train)
def print_log(y_pred, Z, log_fn, k=5):
fout = open(log_fn, 'w')
acc1 = 0
acc3 = 0
cnt = 0
for i in range(0, len(y_pred), k):
img_fn = Z[i][0]
label = Z[i][1]
loc = Z[i][2]
print(img_fn, label, end=' ', file=fout)
y_sum = np.sum(y_pred[i:i+k], axis=0)
y_sum = np.sum(np.sum(y_sum, axis=0), axis=0)
y = [(j, y_sum[j]) for j in range(nb_classes)]
y_sorted = sorted(y, key=lambda d:d[1], reverse=True)
for j in y_sorted[:5]:
print(j[0], end=' ', file=fout)
print("", file=fout)
if y_sorted[0][0] == label:
acc1 += 1
if y_sorted[0][0] == label or y_sorted[1][0] == label or y_sorted[2][0] == label:
acc3 += 1
y_sum = np.zeros_like(y_pred[0])
cnt += 1
fout.close()
return acc1 * 1.0 / cnt, acc3 * 1.0 / cnt
def predict(model, val=True):
y_preds = []
Z = []
for (x, y, z) in datagenerator.test_generator(dataset_fn, imgs_fn, val=val, sz=img_rows):
y_pred = model.predict(x, batch_size=batch_size)
y_preds.append(y_pred)
Z = Z + z
y_preds = np.vstack(y_preds)
return y_preds, Z
log_dir = '../../../../result/alexnet/{}/'.format(t)
os.mkdir(log_dir)
shutil.copy('./fabric_train.py', log_dir+'fabric_train.py')
shutil.copy('./convnets.py', log_dir+'convnets.py')
G = data_gen('../../../data_preprocessing/material_dataset.txt', batch_size=batch_size, datagen=datagen, sz=sz, outs=outs)
# Fit the model on the batches generated by datagen.flow().
for epochs in range(nb_epoch):
model.fit_generator(#datagen.flow(X_train, Y_train, batch_size=batch_size),
#steps_per_epoch=X_train.shape[0] // batch_size,
G,
steps_per_epoch=500,
epochs=1, verbose=1, max_q_size=100)
#y_pred_valid = model.predict(X_valid, batch_size=batch_size)
#y_pred_test = model.predict(X_test, batch_size=batch_size)
y_pred_valid, Z_valid = predict(model, val=True)
y_pred_test, Z_test = predict(model, val=False)
k = 1
log_fn = log_dir + '.tmp.txt'
val_acc = print_log(y_pred_valid, Z_valid, log_fn, k=k)
test_acc = print_log(y_pred_test, Z_test, log_fn, k=k)
log_fn = log_dir + 'val_{:02d}'.format(epochs) + '_{:.4f}_{:.4f}'.format(val_acc[1], test_acc[1]) + '.txt'
print_log(y_pred_valid, Z_valid, log_fn, k=k)
log_fn = log_dir + '{:02d}'.format(epochs) + '_{:.4f}_{:.4f}'.format(val_acc[1], test_acc[1]) + '.txt'
print_log(y_pred_test, Z_test, log_fn, k=k)
print(epochs, val_acc, test_acc)
| 34.375661
| 126
| 0.647837
|
77329a7454e67dc5c921506a54e18cba177b6346
| 26,577
|
py
|
Python
|
cmyui/web.py
|
cmyui/cmyui_pkg
|
6cef5af9f64763368c102487a19a039df67692e5
|
[
"MIT"
] | 9
|
2020-11-30T11:46:53.000Z
|
2021-09-03T13:07:38.000Z
|
cmyui/web.py
|
cmyui/cmyui_pkg
|
6cef5af9f64763368c102487a19a039df67692e5
|
[
"MIT"
] | 3
|
2021-02-13T01:45:05.000Z
|
2021-12-28T07:35:14.000Z
|
cmyui/web.py
|
cmyui/cmyui_pkg
|
6cef5af9f64763368c102487a19a039df67692e5
|
[
"MIT"
] | 8
|
2020-08-20T01:33:31.000Z
|
2021-09-21T20:23:21.000Z
|
# -*- coding: utf-8 -*-
# Domain-based asynchronous server implementation, written from
# sockets, mostly with https://github.com/cmyui/gulag in mind.
import asyncio
import gzip
import http
import importlib
import inspect
import os
import re
import select
import signal
import socket
import sys
import urllib.parse
from functools import wraps
from time import perf_counter as clock
from time import perf_counter_ns as clock_ns
from typing import Any
from typing import Callable
from typing import Coroutine
from typing import Iterable
from typing import Optional
from typing import Union
from .logging import Ansi
from .logging import log
from .logging import printc
from .logging import RGB
from .utils import magnitude_fmt_time
__all__ = (
'Address',
'STATUS_LINES',
'ratelimit',
'Connection',
'RouteMap',
'Domain',
'Server'
)
STATUS_LINES = {
c.value: f'HTTP/1.1 {c.value} {c.phrase.upper()}'
for c in http.HTTPStatus
}
def ratelimit(period: int, max_count: int,
default_return: Optional[Any] = None
) -> Callable:
"""Utility decorator for global ratelimiting."""
period = period
max_count = max_count
default_return = default_return
last_reset = 0
num_calls = 0
def decorate(f: Callable) -> Callable:
# TODO: not an extra 18 lines for 6 char change
if inspect.iscoroutinefunction(f):
async def wrapper(*args, **kwargs) -> Optional[Any]:
nonlocal period, max_count, last_reset, num_calls
elapsed = clock() - last_reset
period_remaining = period - elapsed
if period_remaining <= 0:
num_calls = 0
last_reset = clock()
num_calls += 1
if num_calls > max_count:
# call ratelimited.
return default_return
return await f(*args, **kwargs)
else:
def wrapper(*args, **kwargs) -> Optional[Any]:
nonlocal period, max_count, last_reset, num_calls
elapsed = clock() - last_reset
period_remaining = period - elapsed
if period_remaining <= 0:
num_calls = 0
last_reset = clock()
num_calls += 1
if num_calls > max_count:
# call ratelimited.
return default_return
return f(*args, **kwargs)
return wraps(f)(wrapper)
return decorate
# (host, port) for inet; sockpath for unix
Address = Union[tuple[str, int], str]
Hostname_Types = Union[str, Iterable[str], re.Pattern]
REQUEST_LINE_RGX = re.compile(
r'^(?P<cmd>GET|HEAD|POST|PUT|DELETE|PATCH|OPTIONS) '
r'(?P<path>/[^? ]*)(?P<args>\?[^ ]+)? ' # cursed?
r'HTTP/(?P<httpver>1\.0|1\.1|2\.0|3\.0)$'
)
class CaseInsensitiveDict(dict):
"""A dictionary with case insensitive keys."""
def __init__(self, *args, **kwargs) -> None:
self._keystore = {}
d = dict(*args, **kwargs)
for k in d.keys():
self._keystore[k.lower()] = k
return super().__init__(*args, **kwargs)
def __setitem__(self, k, v) -> None:
self._keystore[k.lower()] = k
return super().__setitem__(k, v)
def __getitem__(self, k) -> str:
k_lower = k.lower()
if k_lower in self._keystore:
k = self._keystore[k_lower]
return super().__getitem__(k)
def __contains__(self, k: str) -> bool:
k_lower = k.lower()
if k_lower in self._keystore:
k = self._keystore[k_lower]
return super().__contains__(k)
def get(self, k, failobj=None) -> Optional[str]:
k_lower = k.lower()
if k_lower in self._keystore:
k = self._keystore[k_lower]
return super().get(k, failobj)
class Connection:
__slots__ = (
'client',
# Request params
'headers', 'body', 'cmd',
'path', 'raw_path', 'httpver',
'args', 'multipart_args', 'files',
'_buf',
# Response params
'resp_code', 'resp_headers'
)
def __init__(self, client: socket.socket) -> None:
self.client = client
# Request params
self.headers = CaseInsensitiveDict()
self.body: Optional[memoryview] = None
self.cmd = None
self.path = None
self.raw_path = None
self.httpver = 0.0
self.args: dict[str, str] = {}
self.multipart_args: dict[str, str] = {}
self.files: dict[str, bytes] = {}
# Response params
self.resp_code = 200
self.resp_headers = {}
self._buf = bytearray()
""" Request methods """
def _parse_urlencoded(self, data: str) -> None:
for a_pair in data.split('&'):
a_key, a_val = a_pair.split('=', 1)
self.args[a_key] = a_val
def _parse_headers(self, data: str) -> None:
"""Parse the http headers from the internal body."""
# split up headers into http line & header lines
http_line, *header_lines = data.split('\r\n')
# parse http line
self.cmd, self.raw_path, _httpver = http_line.split(' ', 2)
self.httpver = float(_httpver[5:])
# parse urlencoded args from raw_path
if (args_offs := self.raw_path.find('?')) != -1:
self._parse_urlencoded(self.raw_path[args_offs + 1:])
self.path = self.raw_path[:args_offs]
else:
self.path = self.raw_path
# parse header lines
for h_key, h_val in [h.split(': ', 1) for h in header_lines]:
self.headers[h_key] = h_val
def _parse_multipart(self) -> None:
"""Parse multipart/form-data from the internal body."""
boundary = self.headers['Content-Type'].split('boundary=', 1)[1]
for param in self.body.tobytes().split(f'--{boundary}'.encode())[1:-1]:
headers, _body = param.split(b'\r\n\r\n', 1)
body = _body[:-2] # remove \r\n
# find Content-Disposition
for header in headers.decode().split('\r\n')[1:]:
h_key, h_val = header.split(': ', 1)
if h_key == 'Content-Disposition':
# find 'name' or 'filename' attribute
attrs = {}
for attr in h_val.split('; ')[1:]:
a_key, _a_val = attr.split('=', 1)
attrs[a_key] = _a_val[1:-1] # remove ""
if 'filename' in attrs:
a_val = attrs['filename']
self.files[a_val] = body
break
elif 'name' in attrs:
a_val = attrs['name']
self.multipart_args[a_val] = body.decode()
break
break
async def parse(self) -> bytes:
"""Receive & parse the http request from the client."""
loop = asyncio.get_running_loop()
while (body_delim_offs := self._buf.find(b'\r\n\r\n')) == -1:
self._buf += await loop.sock_recv(self.client, 1024)
# we have all headers, parse them
self._parse_headers(self._buf[:body_delim_offs].decode())
if 'Content-Length' not in self.headers:
# the request has no body to read.
return
content_length = int(self.headers['Content-Length'])
to_read = ((body_delim_offs + 4) + content_length) - len(self._buf)
if to_read:
# there's more to read; preallocate the space
# required and read into it from the socket.
self._buf += b"\x00" * to_read # is this rly the fastest way?
with memoryview(self._buf)[-to_read:] as read_view:
while to_read:
nbytes = await loop.sock_recv_into(self.client, read_view)
read_view = read_view[nbytes:]
to_read -= nbytes
# all data read from the socket, store a readonly view of the body.
self.body = memoryview(self._buf)[body_delim_offs + 4:].toreadonly()
if self.cmd == 'POST':
if 'Content-Type' in self.headers:
content_type = self.headers['Content-Type']
if content_type.startswith('multipart/form-data'):
self._parse_multipart()
elif content_type == 'application/x-www-form-urlencoded':
self._parse_urlencoded(
urllib.parse.unquote(self.body.tobytes().decode())
) # hmmm
""" Response methods """
async def send(self, status: int, body: bytes = b'') -> None:
"""Attach appropriate headers and send data back to the client."""
# Insert HTTP response line & content at the beginning of headers.
header_lines = [STATUS_LINES[status]]
if body: # Add content-length header if we are sending a body.
header_lines.append(f'Content-Length: {len(body)}')
# Add all user-specified response headers.
header_lines.extend(map(': '.join, self.resp_headers.items()))
# Create an encoded response from the headers.
resp = ('\r\n'.join(header_lines) + '\r\n\r\n').encode()
# Add body to response if we have one to send.
if body:
resp += body
# Send all data to the client.
loop = asyncio.get_running_loop()
try:
await loop.sock_sendall(self.client, resp)
except BrokenPipeError: # TODO: detect this earlier?
log('Connection closed by client.', Ansi.LRED)
class Route:
"""A single endpoint within of domain."""
__slots__ = ('path', 'methods', 'handler', 'cond')
def __init__(self, path: Union[str, Iterable, re.Pattern],
methods: list[str], handler: Callable) -> None:
self.methods = methods
self.handler = handler
if isinstance(path, str):
self.path = path
self.cond = lambda k: k == path
elif isinstance(path, Iterable):
if isinstance(next(iter(path)), re.Pattern):
self.cond = lambda k: any([p.match(k) for p in path])
self.path = str(type(path)([f'~{p.pattern}' for p in path]))
else:
self.cond = lambda k: k in path
self.path = str(path)
elif isinstance(path, re.Pattern):
self.cond = lambda k: path.match(k)
self.path = f'~{path.pattern}' # ~ for rgx
def __repr__(self) -> str:
return f'{"/".join(self.methods)} {self.path}'
def matches(self, path: str, method: str) -> bool:
"""Check if a given path & method match internals."""
return method in self.methods and self.cond(path)
class RouteMap:
"""A collection of endpoints of a domain."""
__slots__ = ('routes',)
def __init__(self) -> None:
self.routes = set() # {Route(), ...}
def route(self, path: Hostname_Types,
methods: list[str] = ['GET']) -> Callable:
"""Add a given route to the routemap."""
if not isinstance(path, (str, Iterable, re.Pattern)):
raise TypeError('Route should be str | Iterable[str] | re.Pattern')
def wrapper(handler: Coroutine) -> Coroutine:
self.routes.add(Route(path, methods, handler))
return handler
return wrapper
def find_route(self, path: str, method: str) -> Optional[Route]:
"""Find the first route matching a given path & method."""
for route in self.routes:
if route.matches(path, method):
return route
class Domain(RouteMap):
"""The main routemap, with a hostname.
Allows for merging of additional routemaps."""
__slots__ = ('hostname', 'cond',)
def __init__(self, hostname: Hostname_Types) -> None:
super().__init__()
self.hostname = hostname # for __repr__
if isinstance(hostname, str):
self.cond = lambda hn: hn == hostname
elif isinstance(hostname, Iterable):
self.cond = lambda hn: hn in hostname
self.hostname = str(hostname)
elif isinstance(hostname, re.Pattern):
self.cond = lambda hn: hostname.match(hn) is not None
self.hostname = f'~{hostname.pattern}' # ~ for rgx
else:
raise TypeError('Key should be str | Iterable[str] | re.Pattern')
def __repr__(self) -> str:
return self.hostname
def matches(self, hostname: str) -> bool:
"""Check if a given hostname matches our condition."""
return self.cond(hostname)
def add_map(self, rmap: RouteMap) -> None:
"""Add an existing routemap to our domain."""
self.routes |= rmap.routes
class Server:
"""An asynchronous multi-domain server."""
__slots__ = (
'name', 'max_conns', 'gzip', 'debug',
'sock_family', 'before_serving', 'after_serving',
'domains', 'exceptions',
'tasks', '_task_coros'
)
def __init__(self, **kwargs) -> None:
self.name = kwargs.get('name', 'Server')
self.max_conns = kwargs.get('max_conns', 5)
self.gzip = kwargs.get('gzip', 0) # 0-9 valid levels
self.debug = kwargs.get('debug', False)
self.sock_family: Optional[socket.AddressFamily] = None
self.before_serving: Optional[Callable] = None
self.after_serving: Optional[Callable] = None
self.domains = kwargs.get('domains', set())
self.exceptions = 0 # num of exceptions handled
self.tasks = kwargs.get('tasks', set())
self._task_coros = kwargs.get('pending_tasks', set()) # coros not yet run
def set_sock_mode(self, addr: Address) -> None:
"""Determine the type of socket from the address given."""
is_inet = (
isinstance(addr, tuple) and
len(addr) == 2 and
isinstance(addr[0], str) and
isinstance(addr[1], int)
)
if is_inet:
self.sock_family = socket.AF_INET
elif isinstance(addr, str):
self.sock_family = socket.AF_UNIX
else:
raise ValueError('Invalid address.')
@property
def using_unix_socket(self) -> bool:
return self.sock_family is socket.AF_UNIX
# Domain management
def add_domain(self, domain: Domain) -> None:
"""Add a domain to the server."""
self.domains.add(domain)
def add_domains(self, domains: set[Domain]) -> None:
"""Add multiple domains to the server."""
self.domains |= domains
def remove_domain(self, domain: Domain) -> None:
"""Remove a domain from the server."""
self.domains.remove(domain)
def remove_domains(self, domains: set[Domain]) -> None:
"""Remove multiple domains from the server."""
self.domains -= domains
def find_domain(self, hostname: str):
"""Find the first domain matching a given hostname."""
for domain in self.domains:
if domain.matches(hostname):
return domain
# Task management
def add_pending_task(self, coro: Coroutine) -> None:
"""Add a coroutine to be launched as a task at
startup & shutdown cleanly on shutdown."""
self._task_coros.add(coro)
def remove_pending_task(self, coro: Coroutine) -> None:
"""Remove a pending coroutine awaiting server launch."""
self._task_coros.remove(coro)
def add_task(self, task: asyncio.Task) -> None:
"""Add an existing task to be cleaned
up on shutdown."""
self.tasks.add(task)
def remove_task(self, task: asyncio.Task) -> None:
"""Remove an existing task from being
cleaned up on shutdown."""
self.tasks.remove(task)
# True Internals
async def dispatch(self, conn: Connection) -> int:
"""Dispatch the connection to any matching routes."""
host = conn.headers['Host']
path = conn.path
resp = None
if domain := self.find_domain(host):
if route := domain.find_route(path, conn.cmd):
resp = await route.handler(conn) or b''
if resp is not None:
if isinstance(resp, tuple):
# code explicitly given
code, resp = resp
else:
# use 200 as default
code, resp = (200, resp)
# gzip responses larger than a single ethernet frame
# if it's enabled server-side and client supports it
if (
self.gzip > 0 and
'Accept-Encoding' in conn.headers and
'gzip' in conn.headers['Accept-Encoding'] and
len(resp) > 1500 # ethernet frame size (minus headers)
):
# ignore files that're already compressed heavily
if not (
'Content-Type' in conn.resp_headers and
conn.resp_headers['Content-Type'] in (
# TODO: surely there's more i should be ignoring
'image/png', 'image/jpeg'
)
):
resp = gzip.compress(resp, self.gzip)
conn.resp_headers['Content-Encoding'] = 'gzip'
else:
code, resp = (404, b'Not Found.')
await conn.send(code, resp)
return code
async def handle(self, client: socket.socket) -> None:
"""Handle a single client socket from the server."""
if self.debug:
t1 = clock_ns()
# read & parse connection
conn = Connection(client)
await conn.parse()
if self.debug:
t2 = clock_ns()
if 'Host' not in conn.headers:
log('Connection missing Host header.', Ansi.LRED)
client.shutdown(socket.SHUT_RDWR)
client.close()
return
# dispatch the connection
# to the appropriate handler
code = await self.dispatch(conn)
if self.debug:
# event complete, stop timing, log result and cleanup
t3 = clock_ns()
t2_t1 = magnitude_fmt_time(t2 - t1)
t3_t2 = magnitude_fmt_time(t3 - t2)
col = (Ansi.LGREEN if 200 <= code < 300 else
Ansi.LYELLOW if 300 <= code < 400 else
Ansi.LRED)
uri = f'{conn.headers["Host"]}{conn.path}'
log(f'[{conn.cmd}] {code} {uri}', col, end=' | ')
printc(f'Parsing: {t2_t1}', Ansi.LBLUE, end=' | ')
printc(f'Handling: {t3_t2}', Ansi.LBLUE)
try:
client.shutdown(socket.SHUT_RDWR)
client.close()
except socket.error:
pass
def _default_cb(self, t: asyncio.Task) -> None:
"""A simple callback for tasks to log & call exc handler."""
if not t.cancelled():
exc = t.exception()
if exc and not isinstance(exc, (SystemExit, KeyboardInterrupt)):
self.exceptions += 1
loop = asyncio.get_running_loop()
loop.default_exception_handler({
'exception': exc
})
def run(
self, addr: Address,
loop: Optional[asyncio.AbstractEventLoop] = None,
handle_restart: bool = True, # using USR1 signal
) -> None:
"""Run the server indefinitely."""
if not loop:
# no event loop given, check if one's running
try:
loop = asyncio.get_running_loop()
except RuntimeError:
# no event loop running, we need to make our own
if spec := importlib.util.find_spec('uvloop'):
# use uvloop if it's already installed
# TODO: could make this configurable
# incase people want to disable it
# for their own use-cases?
uvloop = importlib.util.module_from_spec(spec)
spec.loader.exec_module(uvloop)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
loop = asyncio.new_event_loop()
self.set_sock_mode(addr) # figure out family (INET4/UNIX)
async def runner() -> None:
log(f'=== Starting up {self.name} ===', Ansi.LMAGENTA)
loop = asyncio.get_running_loop()
# Call our before_serving coroutine,
# if theres one specified.
if self.before_serving:
await self.before_serving()
# Start pending coroutine tasks.
if self.debug and self._task_coros:
log(f'-> Starting {len(self._task_coros)} tasks.', Ansi.LMAGENTA)
for coro in self._task_coros:
task = loop.create_task(coro)
task.add_done_callback(self._default_cb) # XXX: never removed?
self.tasks.add(task)
self._task_coros.clear()
# Setup socket & begin listening
if self.using_unix_socket:
if os.path.exists(addr):
os.remove(addr)
# TODO: this whole section is implemented pretty horribly,
# there's almost certainly a way to use loop.add_reader rather
# than using select, and the rest is a result of using select.
# read/write signal listening socks
sig_rsock, sig_wsock = os.pipe()
os.set_blocking(sig_wsock, False)
signal.set_wakeup_fd(sig_wsock)
# connection listening sock
lsock = socket.socket(self.sock_family)
lsock.setblocking(False)
lsock.bind(addr)
if self.using_unix_socket:
os.chmod(addr, 0o777)
lsock.listen(self.max_conns)
log(f'-> Listening @ {addr}', RGB(0x00ff7f))
# TODO: terminal input support (tty, termios fuckery)
# though, tbh this should be moved into gulag as it's
# mostly a gulag-specific thing, and it'll be easier
# to manage all the printing stuff that way.
should_close = False
should_restart = False
while True:
await asyncio.sleep(0.01) # skip loop iteration
rlist, _, _ = select.select([lsock, sig_rsock], [], [], 0)
for reader in rlist:
if reader is lsock:
# new connection received for server
client, _ = await loop.sock_accept(lsock)
task = loop.create_task(self.handle(client))
task.add_done_callback(self._default_cb)
elif reader is sig_rsock:
# received a blocked signal, shutdown
sig_received = signal.Signals(os.read(sig_rsock, 1)[0])
if sig_received is signal.SIGINT:
print('\x1b[2K', end='\r') # clear ^C from console
elif sig_received is signal.SIGUSR1:
should_restart = True
log(f'Received {signal.strsignal(sig_received)}', Ansi.LRED)
should_close = True
else:
raise RuntimeError(f'Unknown reader {reader}')
if should_close:
break
# server closed, clean things up.
for sock_fd in {lsock.fileno(), sig_rsock, sig_wsock}:
os.close(sock_fd)
signal.set_wakeup_fd(-1)
if self.using_unix_socket:
os.remove(addr)
log('-> Cancelling tasks', Ansi.LMAGENTA)
for task in self.tasks:
task.cancel()
await asyncio.gather(*self.tasks, return_exceptions=True)
if in_progress := [t for t in asyncio.all_tasks()
if t is not asyncio.current_task()]:
try:
# allow up to 5 seconds for in-progress handlers
# to finish their execution, just incase they're
# in a half-complete state. we wouldn't want to
# get any sql tables into a weird state, or alike.
log(f'-> Awaiting {len(in_progress)} '
'in-progress handler(s).', Ansi.LMAGENTA)
await asyncio.wait(in_progress, loop=loop, timeout=5.0)
except asyncio.TimeoutError:
log('-> Timed out awaiting handlers, cancelling them.', Ansi.LMAGENTA)
to_await = []
for task in in_progress:
if not task.cancelled():
task.cancel()
to_await.append(task)
await asyncio.gather(*to_await, return_exceptions=True)
if self.after_serving:
await self.after_serving()
return should_restart
# ignore any signal events in the code, the selector loop will
# pick up on signals from the fd being awoken from set_wakeup_fd.
def _sighandler_noop(signum, frame):
pass
signals = {signal.SIGINT, signal.SIGTERM, signal.SIGHUP}
if handle_restart:
signals.add(signal.SIGUSR1)
for sig in signals:
signal.signal(sig, _sighandler_noop)
def _runner_cb(fut: asyncio.Future) -> None:
if not fut.cancelled():
exc = fut.exception()
if exc and not isinstance(exc, (SystemExit, KeyboardInterrupt)):
loop.default_exception_handler({
'exception': exc
})
loop.stop()
""" run the event loop """
future = asyncio.ensure_future(runner(), loop=loop)
future.add_done_callback(_runner_cb)
try:
loop.run_forever()
finally:
future.remove_done_callback(_runner_cb)
loop.close()
log(f'=== Shut down {self.name} ===', Ansi.LMAGENTA)
should_restart = future.result()
if should_restart:
log('=== Server restarting ===', Ansi.LMAGENTA)
os.execv(sys.executable, [sys.executable] + sys.argv)
| 34.877953
| 90
| 0.55311
|
04cad023afe8c9d57bcf8ba134bbe5389ac180ac
| 4,406
|
py
|
Python
|
cms/utils/__init__.py
|
DrMeers/django-cms-2.0
|
d563d912c99f0c138a66d99829d8d0133226894e
|
[
"BSD-3-Clause"
] | 4
|
2016-05-07T11:50:25.000Z
|
2017-12-04T10:30:20.000Z
|
cms/utils/__init__.py
|
DrMeers/django-cms-2.0
|
d563d912c99f0c138a66d99829d8d0133226894e
|
[
"BSD-3-Clause"
] | 1
|
2015-06-08T08:27:11.000Z
|
2015-06-08T10:40:27.000Z
|
cms/utils/__init__.py
|
DrMeers/django-cms-2.0
|
d563d912c99f0c138a66d99829d8d0133226894e
|
[
"BSD-3-Clause"
] | null | null | null |
# TODO: this is just stuff from utils.py - should be splitted / moved
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
from cms.utils.i18n import get_default_language
# !IMPORTANT: Page cant be imported here, because we will get cyclic import!!
def auto_render(func):
"""Decorator that put automaticaly the template path in the context dictionary
and call the render_to_response shortcut"""
def _dec(request, *args, **kwargs):
t = None
if kwargs.get('only_context', False):
# return only context dictionary
del(kwargs['only_context'])
response = func(request, *args, **kwargs)
if isinstance(response, HttpResponseRedirect):
raise Exception("cannot return context dictionary because a HttpResponseRedirect has been found")
(template_name, context) = response
return context
if "template_name" in kwargs:
t = kwargs['template_name']
del kwargs['template_name']
response = func(request, *args, **kwargs)
if isinstance(response, HttpResponse):
return response
(template_name, context) = response
if not t:
t = template_name
context['template_name'] = t
return render_to_response(t, context, context_instance=RequestContext(request))
return _dec
def get_template_from_request(request, obj=None, no_current_page=False):
"""
Gets a valid template from different sources or falls back to the default
template.
"""
template = None
if len(settings.CMS_TEMPLATES) == 1:
return settings.CMS_TEMPLATES[0][0]
if "template" in request.REQUEST:
template = request.REQUEST['template']
if not template and obj is not None:
template = obj.get_template()
if not template and not no_current_page and hasattr(request, "current_page"):
current_page = request.current_page
if hasattr(current_page, "get_template"):
template = current_page.get_template()
if template is not None and template in dict(settings.CMS_TEMPLATES).keys():
if template == settings.CMS_TEMPLATE_INHERITANCE_MAGIC and obj:
# Happens on admin's request when changing the template for a page
# to "inherit".
return obj.get_template()
return template
return settings.CMS_TEMPLATES[0][0]
def get_language_from_request(request, current_page=None):
from cms.models import Page
"""
Return the most obvious language according the request
"""
if settings.CMS_DBGETTEXT:
return get_default_language()
language = request.REQUEST.get('language', None)
if language:
if not language in dict(settings.CMS_LANGUAGES).keys():
language = None
if language is None:
language = getattr(request, 'LANGUAGE_CODE', None)
if language:
if not language in dict(settings.CMS_LANGUAGES).keys():
language = None
# TODO: This smells like a refactoring oversight - was current_page ever a page object? It appears to be a string now
if language is None and isinstance(current_page, Page):
# in last resort, get the first language available in the page
languages = current_page.get_languages()
if len(languages) > 0:
language = languages[0]
if language is None:
# language must be defined in CMS_LANGUAGES, so check first if there
# is any language with LANGUAGE_CODE, otherwise try to split it and find
# best match
language = get_default_language()
return language
def get_page_from_request(request):
"""
tries to get a page from a request if the page hasn't been handled by the cms urls.py
"""
if hasattr(request, '_current_page_cache'):
return request._current_page_cache
else:
path = request.path
from cms.views import details
kw = {}
# TODO: very ugly - change required!
if path.startswith('/admin/'):
kw['page_id']=path.split("/")[0]
else:
kw['slug']=path[1:-1]
resp = details(request, no404=True, only_context=True, **kw)
return resp['current_page']
| 37.982759
| 121
| 0.660463
|
d61e50ff17af44ff7a412609a82fd8be0ec289c7
| 19
|
py
|
Python
|
version.py
|
alexfikl/PyWENO
|
224fe7459f00578728b151531367c67c62f57c2b
|
[
"BSD-3-Clause"
] | 26
|
2015-07-09T13:32:39.000Z
|
2021-10-13T06:55:07.000Z
|
version.py
|
alexfikl/PyWENO
|
224fe7459f00578728b151531367c67c62f57c2b
|
[
"BSD-3-Clause"
] | 4
|
2015-03-16T16:11:31.000Z
|
2021-03-08T17:33:41.000Z
|
version.py
|
alexfikl/PyWENO
|
224fe7459f00578728b151531367c67c62f57c2b
|
[
"BSD-3-Clause"
] | 12
|
2015-08-14T12:44:37.000Z
|
2022-01-09T12:03:13.000Z
|
version = '0.11.2'
| 9.5
| 18
| 0.578947
|
abf5473a56ccd111e09deec77cc5fa4ffcc19c81
| 2,548
|
py
|
Python
|
iota/multisig/crypto/addresses.py
|
EasonC13/iota.py
|
f596c1ac0d9bcbceda1cf6109cd921943a6599b3
|
[
"MIT"
] | 347
|
2016-12-23T14:28:06.000Z
|
2019-09-30T13:46:30.000Z
|
iota/multisig/crypto/addresses.py
|
EasonC13/iota.py
|
f596c1ac0d9bcbceda1cf6109cd921943a6599b3
|
[
"MIT"
] | 194
|
2016-12-22T21:22:47.000Z
|
2019-10-01T09:01:16.000Z
|
iota/multisig/crypto/addresses.py
|
EasonC13/iota.py
|
f596c1ac0d9bcbceda1cf6109cd921943a6599b3
|
[
"MIT"
] | 147
|
2017-01-08T13:14:47.000Z
|
2019-10-01T22:27:31.000Z
|
from typing import List, Optional
from iota.crypto import HASH_LENGTH
from iota.crypto.kerl import Kerl
from iota.crypto.types import Digest
from iota.multisig.types import MultisigAddress
__all__ = [
'MultisigAddressBuilder',
]
class MultisigAddressBuilder(object):
"""
Creates multisig addresses.
Note that this class generates a single address from multiple
inputs (digests), unlike
:py:class:`iota.crypto.addresses.AddressGenerator` which generates
multiple addresses from a single input (seed).
"""
def __init__(self) -> None:
super(MultisigAddressBuilder, self).__init__()
self._digests: List[Digest] = []
"""
Keeps track of digests that were added, so that we can attach
them to the final :py:class:`MultisigAddress` object.
"""
self._address: Optional[MultisigAddress] = None
"""
Caches the generated address.
Generating the address modifies the internal state of the curl
sponge, so each :py:class:`MultisigAddressBuilder` instance can
only generate a single address.
"""
self._sponge = Kerl()
def add_digest(self, digest: Digest) -> None:
"""
Absorbs a digest into the sponge.
.. important::
Keep track of the order that digests are added!
To spend inputs from a multisig address, you must provide
the private keys in the same order!
References:
- https://github.com/iotaledger/wiki/blob/master/multisigs.md#spending-inputs
"""
if self._address:
raise ValueError('Cannot add digests once an address is extracted.')
self._sponge.absorb(digest.as_trits())
self._digests.append(digest)
def get_address(self) -> MultisigAddress:
"""
Returns the new multisig address.
Note that you can continue to add digests after extracting an
address; the next address will use *all* of the digests that
have been added so far.
"""
if not self._digests:
raise ValueError(
'Must call ``add_digest`` at least once '
'before calling ``get_address``.',
)
if not self._address:
address_trits = [0] * HASH_LENGTH
self._sponge.squeeze(address_trits)
self._address = MultisigAddress.from_trits(
address_trits,
digests=self._digests[:],
)
return self._address
| 29.287356
| 85
| 0.624804
|
c85f9aafe0225abdd8337f26c5344509dd7f18f9
| 3,078
|
py
|
Python
|
app/core/management/utils/notification.py
|
OpenLXP/openlxp-xms
|
ad1a33c6e484a327382956b8541b973c0d020ed8
|
[
"Apache-2.0"
] | null | null | null |
app/core/management/utils/notification.py
|
OpenLXP/openlxp-xms
|
ad1a33c6e484a327382956b8541b973c0d020ed8
|
[
"Apache-2.0"
] | 6
|
2020-12-15T18:33:07.000Z
|
2022-03-18T13:43:22.000Z
|
app/core/management/utils/notification.py
|
OpenLXP/openlxp-xms
|
ad1a33c6e484a327382956b8541b973c0d020ed8
|
[
"Apache-2.0"
] | null | null | null |
import logging
from email.mime.application import MIMEApplication
import boto3
from botocore.exceptions import ClientError
from django.conf import settings
from django.core.mail import EmailMessage
logger = logging.getLogger('dict_config_logger')
# Create SES client
def email_verification(email):
"""Function to send email verification"""
ses = boto3.client('ses')
check = check_if_email_verified(email)
if check:
logger.info("Email is sent for Verification")
response = ses.verify_email_identity(
EmailAddress=email
)
logger.info(response)
def check_if_email_verified(email):
"""Function to check if email id from user is verified """
list_emails = list_email_verified()
if email in list_emails:
logger.info("Email is already Verified")
return False
return True
def list_email_verified():
"""Function to return list of verified emails """
ses = boto3.client('ses')
response = ses.list_identities(
IdentityType='EmailAddress',
MaxItems=10
)
logger.info(response['Identities'])
return response['Identities']
def send_notifications(email, sender):
"""This function sends email of a log file """
logger.info('Sending email to recipients')
# Replace sender@example.com with your "From" address.
# This address must be verified with Amazon SES.
SENDER = sender
# Replace recipient@example.com with a "To" address. If your account
# is still in the sandbox, this address must be verified.
RECIPIENT = email
# The subject line for the email.
SUBJECT = "New Message From OpenLXP Portal"
logger.info(sender)
logger.info(email)
# The full path to the file that will be attached to the email.
ATTACHMENT = getattr(settings, "LOG_PATH", None)
# # The HTML body of the email.
BODY_HTML = """\
<html>
<head></head>
<body>
<h1>Hello!</h1>
<p>Please check the attached file for OpenLXP Notifications</p>
</body>
</html>
"""
# Define the attachment part and encode it using MIMEApplication.
att = MIMEApplication(open(ATTACHMENT, 'rb').read())
# Add a header to tell the email client to treat this part as an
# attachment, and to give the attachment a name.
att.add_header('Content-Disposition', 'attachment',
filename="OpenLXP notifications ")
for each_recipient in RECIPIENT:
try:
# Provide the contents of the email.
mail = EmailMessage(SUBJECT, BODY_HTML, SENDER,
[each_recipient])
mail.content_subtype = "html"
# Add the attachment to the parent container.
mail.attach(att)
mail.send()
logging.FileHandler(getattr(settings, "LOG_PATH", None),
mode='w')
# Display an error if something goes wrong.
except ClientError as e:
logger.error(e.response['Error']['Message'])
continue
| 29.883495
| 72
| 0.64295
|
c1dc66e04e221cf1d5f4f98ab1187340395fe1cc
| 7,531
|
py
|
Python
|
trading_calendars/tests/test_asex_calendar.py
|
quantrocket-llc/trading-calendars
|
b72630cbcb288601c62e61ebe002a9043f9a3112
|
[
"Apache-2.0"
] | 1
|
2020-07-25T06:18:30.000Z
|
2020-07-25T06:18:30.000Z
|
trading_calendars/tests/test_asex_calendar.py
|
quantrocket-llc/trading-calendars
|
b72630cbcb288601c62e61ebe002a9043f9a3112
|
[
"Apache-2.0"
] | 13
|
2021-04-13T06:49:23.000Z
|
2022-03-31T00:08:10.000Z
|
trading_calendars/tests/test_asex_calendar.py
|
quantrocket-llc/trading-calendars
|
b72630cbcb288601c62e61ebe002a9043f9a3112
|
[
"Apache-2.0"
] | 3
|
2020-03-05T23:38:14.000Z
|
2021-12-12T00:31:36.000Z
|
from unittest import TestCase
import pandas as pd
from pytz import UTC
from trading_calendars.exchange_calendar_asex import ASEXExchangeCalendar
from .test_trading_calendar import ExchangeCalendarTestBase
class ASEXCalendarTestCase(ExchangeCalendarTestBase, TestCase):
answer_key_filename = "asex"
calendar_class = ASEXExchangeCalendar
# The ASEX is open from 10:00 to 5:20PM on its longest trading day
MAX_SESSION_HOURS = 7.33
HAVE_EARLY_CLOSES = False
DAYLIGHT_SAVINGS_DATES = ["2018-03-26", "2018-10-29"]
def test_regular_holidays(self):
all_sessions = self.calendar.all_sessions
expected_holidays = [
pd.Timestamp("2019-01-01", tz=UTC), # New Year's Day
pd.Timestamp("2017-01-06", tz=UTC), # Epiphany
pd.Timestamp("2019-03-11", tz=UTC), # Orthodox Ash Monday
pd.Timestamp("2019-03-25", tz=UTC), # National Holiday
pd.Timestamp("2019-04-19", tz=UTC), # Good Friday
pd.Timestamp("2019-04-22", tz=UTC), # Easter Monday
pd.Timestamp("2019-04-26", tz=UTC), # Orthodox Good Friday
pd.Timestamp("2019-04-29", tz=UTC), # Orthodox Easter Monday
pd.Timestamp("2019-05-01", tz=UTC), # Labour Day
pd.Timestamp("2019-06-17", tz=UTC), # Orthodox Whit Monday
pd.Timestamp("2019-08-15", tz=UTC), # Assumption Day
pd.Timestamp("2019-10-28", tz=UTC), # National Holiday
pd.Timestamp("2019-12-24", tz=UTC), # Christmas Eve
pd.Timestamp("2019-12-25", tz=UTC), # Christmas Day
pd.Timestamp("2019-12-26", tz=UTC), # Second Day of Christmas
]
for holiday_label in expected_holidays:
self.assertNotIn(holiday_label, all_sessions)
def test_holidays_fall_on_weekend(self):
all_sessions = self.calendar.all_sessions
# All holidays that fall on a weekend should not be made
# up, so ensure surrounding days are open market
expected_sessions = [
# New Years Day on Sunday, Jan 1st
pd.Timestamp("2011-12-30", tz=UTC),
pd.Timestamp("2012-01-02", tz=UTC),
# Epiphany on Sunday, Jan 6th
pd.Timestamp("2019-01-04", tz=UTC),
pd.Timestamp("2019-01-07", tz=UTC),
# National Holiday on Sunday, Mar 25th
pd.Timestamp("2018-03-23", tz=UTC),
pd.Timestamp("2018-03-26", tz=UTC),
# Labour Day on Sunday, May 1st
pd.Timestamp("2011-04-29", tz=UTC),
pd.Timestamp("2011-05-02", tz=UTC),
# Assumption Day on Saturday, Aug 15th
pd.Timestamp("2015-08-14", tz=UTC),
pd.Timestamp("2015-08-17", tz=UTC),
# National Holiday on Saturday, Oct 28
pd.Timestamp("2015-10-27", tz=UTC),
pd.Timestamp("2015-10-30", tz=UTC),
# Christmas Eve on a Sunday
# Note: 25th, 26th both holidays
pd.Timestamp("2017-12-22", tz=UTC),
pd.Timestamp("2017-12-27", tz=UTC),
# Christmas on a Sunday
# Note: 26th a holiday
pd.Timestamp("2016-12-23", tz=UTC),
pd.Timestamp("2016-12-27", tz=UTC),
# 2nd Day of Christmas on Saturday, Dec 26
# Note: 25th, 24th both holidays
pd.Timestamp("2015-12-23", tz=UTC),
pd.Timestamp("2015-12-28", tz=UTC),
]
for session_label in expected_sessions:
self.assertIn(session_label, all_sessions)
def test_orthodox_easter(self):
"""
The Athens Stock Exchange observes Orthodox (or Eastern) Easter,
as well as Western Easter. All holidays that are tethered to
Easter (i.e. Whit Monday, Good Friday, etc.), are relative to
Orthodox Easter. This test checks that Orthodox Easter and all
related holidays are correct.
"""
all_sessions = self.calendar.all_sessions
expected_holidays = [
# Some Orthodox Easter dates
pd.Timestamp("2005-05-01", tz=UTC),
pd.Timestamp("2006-04-23", tz=UTC),
pd.Timestamp("2009-04-19", tz=UTC),
pd.Timestamp("2013-05-05", tz=UTC),
pd.Timestamp("2015-04-12", tz=UTC),
pd.Timestamp("2018-04-08", tz=UTC),
# Some Orthodox Good Friday dates
pd.Timestamp("2002-05-03", tz=UTC),
pd.Timestamp("2005-04-29", tz=UTC),
pd.Timestamp("2008-04-25", tz=UTC),
pd.Timestamp("2009-04-17", tz=UTC),
pd.Timestamp("2016-04-29", tz=UTC),
pd.Timestamp("2017-04-14", tz=UTC),
# Some Orthodox Whit Monday dates
pd.Timestamp("2002-06-24", tz=UTC),
pd.Timestamp("2005-06-20", tz=UTC),
pd.Timestamp("2006-06-12", tz=UTC),
pd.Timestamp("2008-06-16", tz=UTC),
pd.Timestamp("2013-06-24", tz=UTC),
pd.Timestamp("2016-06-20", tz=UTC),
# Some Orthodox Ash Monday dates
pd.Timestamp("2002-03-18", tz=UTC),
pd.Timestamp("2005-03-14", tz=UTC),
pd.Timestamp("2007-02-19", tz=UTC),
pd.Timestamp("2011-03-07", tz=UTC),
pd.Timestamp("2014-03-03", tz=UTC),
pd.Timestamp("2018-02-19", tz=UTC),
]
for holiday_label in expected_holidays:
self.assertNotIn(holiday_label, all_sessions)
def test_debt_crisis_closure(self):
"""
In 2015, the debt crisis in Greece closed the markets for about
a month. This test makes sure there were no trading days during
that time.
"""
all_sessions = self.calendar.all_sessions
closed_dates = pd.date_range("2015-06-29", "2015-07-31")
for date in closed_dates:
self.assertNotIn(date, all_sessions)
def test_adhoc_holidays(self):
all_sessions = self.calendar.all_sessions
expected_holidays = [
pd.Timestamp("2002-05-07", tz=UTC), # Market Holiday
pd.Timestamp("2004-08-13", tz=UTC), # Assumption Day makeup
pd.Timestamp("2008-03-04", tz=UTC), # Worker strikes
pd.Timestamp("2008-03-05", tz=UTC), # Worker strikes
pd.Timestamp("2013-05-07", tz=UTC), # May Day strikes
pd.Timestamp("2014-12-31", tz=UTC), # New Year's Eve
pd.Timestamp("2016-05-03", tz=UTC), # Labour Day makeup
]
for holiday_label in expected_holidays:
self.assertNotIn(holiday_label, all_sessions)
def test_close_time_change(self):
"""
On Sept 29, 2008, the ASEX decided to push its close time back
from 5:00PM to 5:20PM to close the time gap with Wall Street.
"""
self.assertEqual(
self.calendar.session_close(pd.Timestamp("2006-09-29", tz=UTC)),
pd.Timestamp("2006-09-29 17:00", tz="Europe/Athens"),
)
self.assertEqual(
self.calendar.session_close(pd.Timestamp("2008-09-26", tz=UTC)),
pd.Timestamp("2008-09-26 17:00", tz="Europe/Athens"),
)
self.assertEqual(
self.calendar.session_close(pd.Timestamp("2008-09-29", tz=UTC)),
pd.Timestamp("2008-09-29 17:20", tz="Europe/Athens"),
)
self.assertEqual(
self.calendar.session_close(pd.Timestamp("2008-09-30", tz=UTC)),
pd.Timestamp("2008-09-30 17:20", tz="Europe/Athens"),
)
| 41.607735
| 76
| 0.583721
|
ddac0540196f118c78055da87e0325db4a9bffcc
| 133
|
py
|
Python
|
Python/bloombox/__init__.py
|
sgammon/bloombox-client
|
61720ab677a577992f84ed73e0d7faebda38d164
|
[
"Apache-2.0"
] | null | null | null |
Python/bloombox/__init__.py
|
sgammon/bloombox-client
|
61720ab677a577992f84ed73e0d7faebda38d164
|
[
"Apache-2.0"
] | null | null | null |
Python/bloombox/__init__.py
|
sgammon/bloombox-client
|
61720ab677a577992f84ed73e0d7faebda38d164
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
__doc__ = """
Bloombox API Client for Python
"""
from .tool import Bloombox
__version__ = (0, 0, 1)
| 9.5
| 32
| 0.593985
|
08fa0ae611479cbc0414a8eece26a08f791be531
| 928
|
py
|
Python
|
src/exporter/management/commands/test_export.py
|
xmdy/h9eNi8F5Ut
|
4128d7cbc6105ec0fe69157bd88ef8e30415d6ca
|
[
"Unlicense"
] | null | null | null |
src/exporter/management/commands/test_export.py
|
xmdy/h9eNi8F5Ut
|
4128d7cbc6105ec0fe69157bd88ef8e30415d6ca
|
[
"Unlicense"
] | null | null | null |
src/exporter/management/commands/test_export.py
|
xmdy/h9eNi8F5Ut
|
4128d7cbc6105ec0fe69157bd88ef8e30415d6ca
|
[
"Unlicense"
] | null | null | null |
from django.core.management import BaseCommand
import logging
# These two lines enable debugging at httplib level (requests->urllib3->http.client)
# You will see the REQUEST, including HEADERS and DATA, and RESPONSE with HEADERS but without DATA.
# The only thing missing will be the response.body which is not logged.
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
class Command(BaseCommand):
def handle(self, *args, **options):
from exporter.tasks import GenerateModelExportTask
gmet = GenerateModelExportTask()
gmet.run(1)
| 37.12
| 99
| 0.774784
|
caac875e456eff59a6499c7f96d12616452af438
| 6,367
|
py
|
Python
|
skfem/element/element_global.py
|
ahhuhtal/scikit-fem
|
84ad97bfd2a92d28694f54f6897d97966bda31df
|
[
"BSD-3-Clause"
] | 1
|
2020-05-31T14:07:14.000Z
|
2020-05-31T14:07:14.000Z
|
skfem/element/element_global.py
|
ahhuhtal/scikit-fem
|
84ad97bfd2a92d28694f54f6897d97966bda31df
|
[
"BSD-3-Clause"
] | 1
|
2020-06-01T05:39:31.000Z
|
2020-06-01T05:39:31.000Z
|
skfem/element/element_global.py
|
ahhuhtal/scikit-fem
|
84ad97bfd2a92d28694f54f6897d97966bda31df
|
[
"BSD-3-Clause"
] | null | null | null |
import itertools
import numpy as np
from .element import Element
from .discrete_field import DiscreteField
class ElementGlobal(Element):
"""Elements defined implicitly through global degrees-of-freedom."""
V = None # For caching inverse Vandermonde matrix
derivatives = 2 # By default, include first and second derivatives
tensorial_basis = False
def gbasis(self, mapping, X, i, tind=None):
if tind is None:
tind = np.arange(mapping.mesh.t.shape[1])
if self.V is None:
# initialize power basis
self._pbasis_init(self.maxdeg,
self.dim,
self.derivatives,
self.tensorial_basis)
# construct Vandermonde matrix and invert it
self.V = np.linalg.inv(self._eval_dofs(mapping.mesh))
V = self.V[tind]
x = mapping.F(X, tind=tind)
U = [np.zeros((self.dim,) * k + x[0].shape)
for k in range(self.derivatives + 1)]
N = len(self._pbasis[()])
# loop over new basis
for k in range(self.derivatives + 1):
diffs = list(itertools.product(*((list(range(self.dim)),) * k)))
for itr in range(N):
for diff in diffs:
U[k][diff] += (V[:, itr, i][:, None]
* self._pbasis[diff][itr](*x))
# put higher order derivatives into a single array
hod = np.empty((self.derivatives - 2,), dtype=object)
for k in range(self.derivatives - 2):
hod[k] = U[k + 3]
return (
DiscreteField(
value=U[0],
grad=U[1],
hess=U[2],
hod=hod,
),
)
def _pbasis_create(self, i, j=None, k=None, dx=0, dy=0, dz=0):
"""Return a single power basis function."""
if j is None and k is None: # 1d
cx = 1
if dx > 0:
for l in np.arange(dx, 0, -1):
cx *= i - dx + l
return eval(("lambda x: {}*x**{}"
.format(cx, np.max([i - dx, 0]))))
elif k is None: # 2d
cx = 1
cy = 1
if dx > 0:
for l in np.arange(dx, 0, -1):
cx *= i - dx + l
if dy > 0:
for l in np.arange(dy, 0, -1):
cy *= j - dy + l
return eval(("lambda x, y: {}*x**{}*y**{}"
.format(cx * cy,
np.max([i - dx, 0]),
np.max([j - dy, 0]))))
else: # 3d
cx = 1
cy = 1
cz = 1
if dx > 0:
for l in np.arange(dx, 0, -1):
cx *= i - dx + l
if dy > 0:
for l in np.arange(dy, 0, -1):
cy *= j - dy + l
if dz > 0:
for l in np.arange(dz, 0, -1):
cz *= k - dz + l
return eval(("lambda x, y, z: {}*x**{}*y**{}*z**{}"
.format(cx * cy * cz,
np.max([i - dx, 0]),
np.max([j - dy, 0]),
np.max([k - dz, 0]),)))
def _pbasis_init(self, maxdeg, dim, Ndiff, is_tensorial=False):
"""Define power bases.
Parameters
----------
maxdeg
Maximum degree of the basis
dim
Dimension of the domain.x
Ndiff
Number of derivatives to include.
"""
if is_tensorial:
maxdeg = int(maxdeg / 2)
self._pbasis = {}
for k in range(Ndiff + 1):
diffs = list(itertools.product(*((list(range(dim)),) * k)))
for diff in diffs:
# desc = ''.join([str(d) for d in diff])
dx = sum([1 for d in diff if d == 0])
dy = sum([1 for d in diff if d == 1]) if dim == 2 else None
dz = sum([1 for d in diff if d == 2]) if dim == 3 else None
if dim == 1:
self._pbasis[diff] = [
self._pbasis_create(i=i, dx=dx)
for i in range(maxdeg + 1)
if i <= maxdeg
]
elif dim == 2:
self._pbasis[diff] = [
self._pbasis_create(i=i, j=j, dx=dx, dy=dy)
for i in range(maxdeg + 1)
for j in range(maxdeg + 1)
if is_tensorial or i + j <= maxdeg
]
elif dim == 3:
self._pbasis[diff] = [
self._pbasis_create(i=i, j=j, k=k, dx=dx, dy=dy, dz=dz)
for i in range(maxdeg + 1)
for j in range(maxdeg + 1)
for k in range(maxdeg + 1)
if is_tensorial or i + j + k <= maxdeg
]
def _eval_dofs(self, mesh, tind=None):
if tind is None:
tind = np.arange(mesh.t.shape[1])
N = len(self._pbasis[()])
V = np.zeros((len(tind), N, N))
w = {
'v': np.array([mesh.p[:, mesh.t[itr, tind]]
for itr in range(mesh.t.shape[0])]),
}
if mesh.p.shape[0] >= 2:
w['e'] = np.array([
.5 * (w['v'][itr] + w['v'][(itr + 1) % mesh.t.shape[0]])
for itr in range(mesh.t.shape[0])
])
w['n'] = np.array([
w['v'][itr] - w['v'][(itr + 1) % mesh.t.shape[0]]
for itr in range(mesh.t.shape[0])
])
w['n'][2] = -w['n'][2] # direction swapped due to mesh numbering
for itr in range(3):
w['n'][itr] = np.array([w['n'][itr, 1, :],
-w['n'][itr, 0, :]])
w['n'][itr] /= np.linalg.norm(w['n'][itr], axis=0)
# evaluate dofs, gdof implemented in subclasses
for itr in range(N):
for jtr in range(N):
F = {k: self._pbasis[k][itr] for k in self._pbasis}
V[:, jtr, itr] = self.gdof(F, w, jtr)
return V
| 36.176136
| 79
| 0.406
|
e77b04b6fe39679ccc0d5155727901c8018dd5b0
| 1,693
|
py
|
Python
|
nescient/crypto/tools.py
|
airilyan/nescient
|
a32e671e92a378a194c4ca010bcab37aa75d2096
|
[
"MIT"
] | 4
|
2018-01-23T05:30:15.000Z
|
2020-07-23T13:08:24.000Z
|
nescient/crypto/tools.py
|
airilyan/nescient
|
a32e671e92a378a194c4ca010bcab37aa75d2096
|
[
"MIT"
] | 4
|
2018-01-16T18:56:17.000Z
|
2018-03-13T06:48:24.000Z
|
nescient/crypto/tools.py
|
airilyan/nescient
|
a32e671e92a378a194c4ca010bcab37aa75d2096
|
[
"MIT"
] | 1
|
2020-07-23T13:08:27.000Z
|
2020-07-23T13:08:27.000Z
|
# Nescient: A Python program for packing/unpacking encrypted, salted, and authenticated file containers.
# Copyright (C) 2018 Ariel Antonitis. Licensed under the MIT license.
#
# nescient/crypto/tools.py
""" Various functions and tools for general cryptographic purposes, like secure randomness, padding, etc. """
try: # Define a Python-version-independent source of securely random bytes
import secrets
except ImportError:
import os
def get_random_bytes(n):
return bytes(os.urandom(n))
def randbits(k):
return int.from_bytes(get_random_bytes(k//8), byteorder='big')
else:
def get_random_bytes(n):
return secrets.token_bytes(n)
def randbits(k):
return secrets.randbits(k)
def pad(data, block_size):
""" Pads data to a multiple of the block size in a reversible way, by adding n bytes with value n to the data, where
n is the number of bytes needed to reach a multiple of the block size.
Args:
data: The data to pad to a multiple of the block size. Must be a `bytearray`.
block_size (int): The desired block size. Must be between 1 and 255 (inclusive).
"""
assert(1 <= block_size <= 255)
# Calculate the number of bytes to append
n = block_size - (len(data) % block_size)
# Note that, if the data is already a multiple of the block size, a total of block_size bytes will be appended
data.extend(bytes([n]*n))
def unpad(data):
""" Unpads data previously padded with `pad`.
Args:
data: The data to remove padding from. Must be a `bytearray`. If not previously padded, data may be lost.
"""
n = data[-1] # The number of bytes to remove
del data[-n:]
| 36.021277
| 120
| 0.688128
|
8d04b657a87938c30937484ab984bd07a5d6edc6
| 6,460
|
py
|
Python
|
tests/test/lua/unicode.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 6
|
2019-01-09T11:55:15.000Z
|
2021-06-25T19:52:42.000Z
|
tests/test/lua/unicode.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 65
|
2018-12-12T08:40:38.000Z
|
2022-02-28T09:19:45.000Z
|
tests/test/lua/unicode.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 9
|
2018-11-23T08:59:09.000Z
|
2020-02-04T12:56:35.000Z
|
#!/usr/bin/env python2.7
import locale
import os
import subprocess
import sys
import unicodedata
sys.path.append(os.path.realpath(__file__ + '/../../../lib'))
import udf
from udf import skip
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
@skip('csv data for table fn2.unicodedata is currently not available')
class LuaUnicode(udf.TestCase):
@classmethod
def setUpClass(cls):
sql = udf.fixindent('''
DROP SCHEMA FN2 CASCADE;
CREATE SCHEMA FN2;
CREATE LUA SCALAR SCRIPT
lua_match(c CHAR(1))
RETURNS CHAR(2) AS
magic_set = {}
for c in string.gmatch("()%.+-*?[^$", ".") do
magic_set[c] = true
end
function run(ctx)
local c = ctx.c
if (c ~= null) and (not magic_set[c]) then
local txt = "x" .. c .. "x"
return unicode.utf8.match(txt, c)
end
end
/
CREATE LUA SCALAR SCRIPT
lua_gmatch(text VARCHAR(350))
EMITS (w CHAR(1), c DOUBLE) AS
function run(ctx)
local txt = ctx.text
if txt ~= null then
for c in unicode.utf8.gmatch(txt, ".") do
ctx.emit(c, 1)
end
end
end
/
CREATE LUA SCALAR SCRIPT
lua_gsub(text VARCHAR(350))
EMITS (w CHAR(1), c DOUBLE) AS
function run(ctx)
if ctx.text ~= null
then
unicode.utf8.gsub(ctx.text, '.', function(w) ctx.emit(w,1) end)
end
end
/
CREATE TABLE unicodedata (
codepoint INT NOT NULL,
name VARCHAR(100) ASCII,
uchar VARCHAR(1) UTF8,
to_upper VARCHAR(1) UTF8,
to_lower VARCHAR(1) UTF8,
decimal_value INT,
numeric_value INT,
category VARCHAR(3) ASCII,
bidirectional VARCHAR(3) ASCII,
combining VARCHAR(10) ASCII,
east_asian_width VARCHAR(2) ASCII,
mirrored BOOLEAN,
decomposition VARCHAR(100) ASCII,
NFC VARCHAR(10) UTF8,
NFD VARCHAR(10) UTF8,
NFKC VARCHAR(20) UTF8,
NFKD VARCHAR(20) UTF8
);
IMPORT INTO fn2.unicodedata
FROM LOCAL CSV FILE '/share/fs8/Databases/UDF/unicode.csv'
ROW SEPARATOR = 'CRLF'
REJECT LIMIT 0;''')
cmd = '''%(exaplus)s -c %(conn)s -u sys -P exasol
-no-config -autocommit ON -L -pipe''' % {
'exaplus': os.environ.get('EXAPLUS',
'/usr/opt/EXASuite-4/EXASolution-4.2.9/bin/Console/exaplus'),
'conn': udf.opts.server
}
env = os.environ.copy()
env['PATH'] = '/usr/opt/jdk1.8.0_latest/bin:' + env['PATH']
exaplus = subprocess.Popen(
cmd.split(),
env=env,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
out, _err = exaplus.communicate(sql)
if exaplus.returncode != 0:
cls.log.critical('EXAplus error: %d', exaplus.returncode)
cls.log.error(out)
else:
cls.log.debug(out)
def setUp(self):
'''Mixing different connections (JDBC/ODBC) may result in
the ODBC connection to not see the commits of JDBC.
Therefore, force a new transaction'''
self.commit()
self.query('OPEN SCHEMA fn2')
def test_unicode_match(self):
rows = self.query('''
SELECT * FROM (
SELECT codepoint,
uchar,
fn2.lua_match(uchar),
uchar = fn2.lua_match(uchar) AS m
FROM fn2.unicodedata
)
WHERE (m IS NULL OR m = FALSE)
AND codepoint not in (0,36,37,40,41,42,43,45,46,63,91,94)''')
self.assertRowsEqual([], rows)
@skip('manual test for DWA-13860, DWA-17091')
def test_unicode_gmatch(self):
nrows = self.query('''
SELECT count(uchar) * 3
FROM fn2.unicodedata
WHERE codepoint BETWEEN 382976 AND 385152''')[0][0]
for _ in range(25):
self.query('''
SELECT fn2.lua_gmatch(uchar)
FROM (
SELECT 'x'||uchar||'x' AS uchar
FROM fn2.unicodedata
WHERE codepoint BETWEEN 382976 AND 385152
)''')
self.assertEqual(nrows, self.rowcount())
def test_unicode_gsub(self):
rows = self.query('''
SELECT unicode(w) FROM (
SELECT fn2.lua_gsub(uchar) FROM fn2.unicodedata
) ORDER BY 1 ASC''')
self.assertEqual(1114111, self.rowcount())
s1 = set(range(1,1114112))
s2 = set(x[0] for x in rows)
self.assertEqual(s1 - s2, s2 - s1)
class LuaUnicodePattern(udf.TestCase):
def setUp(self):
self.query('DROP SCHEMA fn3 CASCADE', ignore_errors=True)
self.query('CREATE SCHEMA fn3')
@skip('manual test for DWA-13860')
def test_unicode_gmatch_classes(self):
self.query(udf.fixindent('''
CREATE lua SCALAR SCRIPT
gmatch_pattern(w VARCHAR(1000))
EMITS (w VARCHAR(1000)) AS
function run(ctx)
local word = ctx.w
if word ~= null then
for i in unicode.utf8.gmatch(word, '([%w%p]+)') do
ctx.emit(i)
end
end
end
/
'''))
prefix = 0x1eba
for u in range(sys.maxunicode):
try:
self.query('''
SELECT gmatch_pattern(
unicodechr(%d) || unicodechr(?))
FROM DUAL''' % prefix, u)
#print u
except:
print 'U+%04X' %u, unicodedata.name(unichr(u), 'U+%04X' % u)
if __name__ == '__main__':
udf.main()
# vim: ts=4:sts=4:sw=4:et:fdm=indent
| 32.626263
| 82
| 0.482353
|
d024814b84e06934465f276762d8af06a7a6170a
| 474
|
py
|
Python
|
06_magicgui.py
|
BiAPoL/online_image_processing_napari
|
680d9ceeef5ae188541a96c7125f0fca07f28af5
|
[
"Unlicense"
] | 2
|
2021-05-10T13:44:15.000Z
|
2022-03-16T20:20:39.000Z
|
06_magicgui.py
|
BiAPoL/online_image_processing_napari
|
680d9ceeef5ae188541a96c7125f0fca07f28af5
|
[
"Unlicense"
] | 1
|
2021-05-17T16:11:54.000Z
|
2021-05-19T19:38:50.000Z
|
06_magicgui.py
|
BiAPoL/online_image_processing_napari
|
680d9ceeef5ae188541a96c7125f0fca07f28af5
|
[
"Unlicense"
] | 2
|
2021-05-17T16:36:12.000Z
|
2022-03-18T15:07:14.000Z
|
import napari
from magicgui import magicgui
from napari.types import ImageData
from skimage.filters import gaussian
from skimage.io import imread
viewer = napari.Viewer()
blobs = imread('blobs.tif')
viewer.add_image(blobs)
@magicgui(call_button='Run')
def gaussian_blur(image : ImageData, sigma : float = 2) -> ImageData:
"""
Apply a gaussian blur to an image.
"""
return gaussian(image, sigma)
viewer.window.add_dock_widget(gaussian_blur)
napari.run()
| 21.545455
| 69
| 0.746835
|
0f2fb8d946747b279c6ca69b58c8eb332aae7d0d
| 1,848
|
py
|
Python
|
tests/queryset_pickle/models.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/queryset_pickle/models.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/queryset_pickle/models.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
import datetime
from django.db import DJANGO_VERSION_PICKLE_KEY, models
from django.utils.translation import gettext_lazy as _
def standalone_number():
return 1
class Numbers:
@staticmethod
def get_static_number():
return 2
class PreviousDjangoVersionQuerySet(models.QuerySet):
def __getstate__(self):
state = super().__getstate__()
state[DJANGO_VERSION_PICKLE_KEY] = '1.0'
return state
class MissingDjangoVersionQuerySet(models.QuerySet):
def __getstate__(self):
state = super().__getstate__()
del state[DJANGO_VERSION_PICKLE_KEY]
return state
class Group(models.Model):
name = models.CharField(_('name'), max_length=100)
objects = models.Manager()
previous_django_version_objects = PreviousDjangoVersionQuerySet.as_manager()
missing_django_version_objects = MissingDjangoVersionQuerySet.as_manager()
class Event(models.Model):
title = models.CharField(max_length=100)
group = models.ForeignKey(Group, models.CASCADE)
class Happening(models.Model):
when = models.DateTimeField(blank=True, default=datetime.datetime.now)
name = models.CharField(blank=True, max_length=100, default="test")
number1 = models.IntegerField(blank=True, default=standalone_number)
number2 = models.IntegerField(blank=True, default=Numbers.get_static_number)
event = models.OneToOneField(Event, models.CASCADE, null=True)
class Container:
# To test pickling we need a class that isn't defined on module, but
# is still available from app-cache. So, the Container class moves
# SomeModel outside of module level
class SomeModel(models.Model):
somefield = models.IntegerField()
class M2MModel(models.Model):
groups = models.ManyToManyField(Group)
| 30.295082
| 81
| 0.71645
|
de4abca3aa73f7d5f55a97b231f3f7d075fae41e
| 3,052
|
py
|
Python
|
trainer/custom_methods/inference.py
|
SuijkerbuijkP/Detectron2-GCP
|
927ac14dc584cb4c257444421911aa9d0b6bc5a2
|
[
"MIT"
] | 3
|
2021-05-10T09:26:47.000Z
|
2022-03-25T12:38:31.000Z
|
trainer/custom_methods/inference.py
|
SuijkerbuijkP/Detectron2-GCP
|
927ac14dc584cb4c257444421911aa9d0b6bc5a2
|
[
"MIT"
] | null | null | null |
trainer/custom_methods/inference.py
|
SuijkerbuijkP/Detectron2-GCP
|
927ac14dc584cb4c257444421911aa9d0b6bc5a2
|
[
"MIT"
] | 1
|
2022-03-03T10:39:10.000Z
|
2022-03-03T10:39:10.000Z
|
import os
import cv2
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import Visualizer, ColorMode
from custom_methods import load_checkpoint
def inference(cfg, args):
"""
This function is used to perform inference. It loads the config file and with the corresponding eval_run
parameter looks for the folder in which the to be evaluated model is saved. It loads a predictor, the latest
model file and then performs inference. Pictures are saved to the "predictions" folder inside the corresponding
run.
"""
checkpoint_iteration, bucket = load_checkpoint(cfg, args)
# set prediction threshold and model weights
cfg.defrost()
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_" + checkpoint_iteration + ".pth")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7
# cfg.MODEL.FCOS.INFERENCE_TH_TEST = 0.3
cfg.freeze()
# inference part
predictor = DefaultPredictor(cfg)
# save images in predictions folder, not required with pushing directly to GCP but cleaner
if not os.path.isdir(cfg.OUTPUT_DIR + '/predictions' + str(checkpoint_iteration)):
os.mkdir(cfg.OUTPUT_DIR + '/predictions' + str(checkpoint_iteration))
# for d in DatasetCatalog.get("car_damage_test"):
for d in DatasetCatalog.get("car_damage_val"):
im = cv2.imread(d["file_name"])
# save original image for easy comparison
image_id = str(d["file_name"]).split("/")[-1].split(".")[0]
image_extension = "." + str(d["file_name"]).split("/")[-1].split(".")[1]
cv2.imwrite(cfg.OUTPUT_DIR + '/predictions' + str(checkpoint_iteration) + "/" + image_id + image_extension, im)
# produce predictions
outputs = predictor(im)
v = Visualizer(im[:, :, ::-1],
metadata=MetadataCatalog.get("car_damage_val"),
scale=0.8,
instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels
)
# .to("cpu"), which could be changed for large inference datasets, and save predictions
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imwrite(cfg.OUTPUT_DIR + '/predictions' + str(checkpoint_iteration) + "/" + image_id + "_pred.jpeg",
v.get_image()[:, :, ::-1])
# save to GCP
blob = bucket.blob(cfg.OUTPUT_DIR + '/predictions' + str(checkpoint_iteration)
+ "/" + image_id + image_extension)
blob.upload_from_filename(cfg.OUTPUT_DIR + '/predictions' + str(checkpoint_iteration)
+ "/" + image_id + image_extension)
blob = bucket.blob(cfg.OUTPUT_DIR + '/predictions' + str(checkpoint_iteration)
+ "/" + image_id + "_pred.jpeg")
blob.upload_from_filename(cfg.OUTPUT_DIR + '/predictions' + str(checkpoint_iteration)
+ "/" + image_id + "_pred.jpeg")
| 47.6875
| 119
| 0.643185
|
ff574466e1c5e4b076dfd186ab00acaeaa526178
| 3,545
|
py
|
Python
|
mars/tensor/tests/test_utils.py
|
HarshCasper/mars
|
4c12c968414d666c7a10f497bc22de90376b1932
|
[
"Apache-2.0"
] | 2
|
2019-03-29T04:11:10.000Z
|
2020-07-08T10:19:54.000Z
|
mars/tensor/tests/test_utils.py
|
HarshCasper/mars
|
4c12c968414d666c7a10f497bc22de90376b1932
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/tests/test_utils.py
|
HarshCasper/mars
|
4c12c968414d666c7a10f497bc22de90376b1932
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
from mars.lib.mmh3 import hash_from_buffer as mmh3_hash_from_buffer
from mars.session import new_session
from mars import tensor as mt
from mars.tensor.utils import hash_on_axis, normalize_axis_tuple, fetch_corner_data
class Test(unittest.TestCase):
def testHashOnAxis(self):
hash_from_buffer = lambda x: mmh3_hash_from_buffer(memoryview(x))
a = np.random.rand(10)
result = hash_on_axis(a, 0, 3)
expected = np.array([mmh3_hash_from_buffer(element) % 3 for element in a])
np.testing.assert_array_equal(result, expected)
result = hash_on_axis(a, 0, 1)
expected = np.array([0 for _ in a])
np.testing.assert_array_equal(result, expected)
a = np.random.rand(10, 5)
result = hash_on_axis(a, 0, 3)
expected = np.array([hash_from_buffer(a[i, :]) % 3 for i in range(a.shape[0])])
np.testing.assert_array_equal(result, expected)
result = hash_on_axis(a, 1, 3)
expected = np.array([hash_from_buffer(a[:, i]) % 3 for i in range(a.shape[1])])
np.testing.assert_array_equal(result, expected)
a = np.random.rand(10, 5, 4)
result = hash_on_axis(a, 2, 3)
expected = np.array([hash_from_buffer(a[:, :, i]) % 3 for i in range(a.shape[2])])
np.testing.assert_array_equal(result, expected)
def testNormalizeAxisTuple(self):
self.assertEqual(normalize_axis_tuple(-1, 3), (2,))
self.assertEqual(normalize_axis_tuple([0, -2], 3), (0, 1))
self.assertEqual(sorted(normalize_axis_tuple({0, -2}, 3)), [0, 1])
with self.assertRaises(ValueError) as cm:
normalize_axis_tuple((1, -2), 3, argname='axes')
self.assertIn('axes', str(cm.exception))
with self.assertRaises(ValueError):
normalize_axis_tuple((1, -2), 3)
def testFetchTensorCornerData(self):
sess = new_session()
print_options = np.get_printoptions()
# make sure numpy default option
self.assertEqual(print_options['edgeitems'], 3)
self.assertEqual(print_options['threshold'], 1000)
size = 12
for i in (2, 4, size - 3, size, size + 3):
arr = np.random.rand(i, i, i)
t = mt.tensor(arr, chunk_size=size // 2)
sess.run(t, fetch=False)
corner_data = fetch_corner_data(t, session=sess)
corner_threshold = 1000 if t.size < 1000 else corner_data.size - 1
with np.printoptions(threshold=corner_threshold, suppress=True):
# when we repr corner data, we need to limit threshold that
# it's exactly less than the size
repr_corner_data = repr(corner_data)
with np.printoptions(suppress=True):
repr_result = repr(arr)
self.assertEqual(repr_corner_data, repr_result,
f'failed when size == {i}')
| 36.927083
| 90
| 0.649083
|
cfa3e5be6fe7903efd0c0784b614836d7b6acb73
| 3,264
|
py
|
Python
|
matrixdemos/scripts/DigitalClock.py
|
nachomonkey/MatrixDemos
|
b1cdf1c6d5b6ba946a9e5de2c1430d6ee1bf163e
|
[
"MIT"
] | 3
|
2021-04-07T03:16:17.000Z
|
2021-09-08T04:03:22.000Z
|
matrixdemos/scripts/DigitalClock.py
|
nachomonkey/MatrixDemos
|
b1cdf1c6d5b6ba946a9e5de2c1430d6ee1bf163e
|
[
"MIT"
] | null | null | null |
matrixdemos/scripts/DigitalClock.py
|
nachomonkey/MatrixDemos
|
b1cdf1c6d5b6ba946a9e5de2c1430d6ee1bf163e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Display the time and date on the matrix"""
import sys
import time
from optparse import OptionParser
from rgbmatrix import RGBMatrix, RGBMatrixOptions
from matrixdemos.scripts.utils import *
parser = OptionParser()
parser.set_description("""Display the time and date on a matrix.
By default, this will dim to 1% brightness at from 10 PM to 7 AM
and cease to display the date""")
parser.add_option("--24h", action="store_true", dest="twentyfour_hr",
default=False, help="make this a 24-hour clock instead of 12-hour")
parser.add_option("-a", "--always-on", action="store_false", dest="dimming",
default=True, help="disables the dimming at night")
parser.add_option("-s", "--remove-seconds", action="store_false", dest="secs",
default=True, help="removes the seconds bar at the top of the matrix")
parser.add_option("-d", "--remove-date", action="store_false", dest="date",
default=True, help="stops displaying the date")
parser.add_option("-n", "--no-flash", action="store_false", dest="flash",
default=True, help="makes the colon in the middle stop flashing")
(options, args) = parser.parse_args()
# True for 24 hour clock
CLK_24_HOUR = options.twentyfour_hr
ENABLE_DIMMING = options.dimming
DISPLAY_SECONDS = options.secs
DISPLAY_DATE = options.date
COLON_FLASH = options.flash
# Configuration for the matrix
_options = RGBMatrixOptions()
_options.drop_privileges = False
_options.rows = 32
_options.chain_length = 1
_options.parallel = 1
_options.hardware_mapping = 'adafruit-hat' # If you have an Adafruit HAT: 'adafruit-hat'
matrix = RGBMatrix(options=_options)
SMALL_SIZE = 10
BIG_SIZE = 13
def run():
colon = True
while True:
image, canvas = new_canvas()
t = time.localtime()
dim = False
if t.tm_hour < 7 or t.tm_hour > 21 and ENABLE_DIMMING:
matrix.brightness = 1
dim = True
else:
matrix.brightness = 100
am_pm = "AM"
if t.tm_hour > 11:
am_pm = "PM"
hr = t.tm_hour
if not CLK_24_HOUR:
hr = hr % 12
if hr == 0:
hr = 12
if COLON_FLASH:
colon = not colon
text = f"{hr if not CLK_24_HOUR else t.tm_hour}{':' if colon else ' '}{str(t.tm_min).zfill(2)}"
size = BIG_SIZE
if len(text) >= 5:
size = SMALL_SIZE
if DISPLAY_SECONDS:
for x in range(0, t.tm_sec * 32 // 60):
canvas.point((x, 0), fill=(int(x / 32 * 255) if not dim else 230, 0, 0))
DrawText(canvas, (1 if len(text) >= 5 else 0, 0 if len(text) >= 5 else -3), size=size, text=text, color="red")
if not CLK_24_HOUR:
DrawText(canvas, (0, 10), size=8, text=am_pm, color="RED")
if not dim and DISPLAY_DATE:
DrawText(canvas, (11, 7), size=12, text=time.asctime()[4:7], font="cambriab", color="GREEN")
DrawText(canvas, (0, 20), size=8, text=time.asctime()[:3], color="LIME")
DrawText(canvas, (15, 18), size=12, text=str(t.tm_mday), color="GREEN")
time.sleep(1)
matrix.SetImage(image.convert("RGB"))
def main():
try:
run()
except KeyboardInterrupt:
print()
if __name__ == "__main__":
main()
| 32.969697
| 118
| 0.634498
|
73db823a1b26c6bea9bd690057abf27930711eda
| 9,389
|
py
|
Python
|
object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py
|
paulchou0309/obj
|
d7ae404fa73db60a6fe539d613e48f478b81dbef
|
[
"MIT"
] | 3
|
2019-03-02T09:09:01.000Z
|
2019-11-15T02:09:50.000Z
|
object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py
|
paulchou0309/obj
|
d7ae404fa73db60a6fe539d613e48f478b81dbef
|
[
"MIT"
] | null | null | null |
object_detection/models/faster_rcnn_resnet_v1_feature_extractor.py
|
paulchou0309/obj
|
d7ae404fa73db60a6fe539d613e48f478b81dbef
|
[
"MIT"
] | 1
|
2018-04-03T01:33:47.000Z
|
2018-04-03T01:33:47.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet V1 Faster R-CNN implementation.
See "Deep Residual Learning for Image Recognition" by He et al., 2015.
https://arxiv.org/abs/1512.03385
Note: this implementation assumes that the classification checkpoint used
to finetune this model is trained using the same configuration as that of
the MSRA provided checkpoints
(see https://github.com/KaimingHe/deep-residual-networks), e.g., with
same preprocessing, batch norm scaling, etc.
"""
import tensorflow as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from object_detection.models.nets import resnet_utils
from object_detection.models.nets import resnet_v1
slim = tf.contrib.slim
class FasterRCNNResnetV1FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN Resnet V1 feature extractor implementation."""
def __init__(self,
architecture,
resnet_model,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
architecture: Architecture name of the Resnet V1 model.
resnet_model: Definition of the Resnet V1 model.
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
self._architecture = architecture
self._resnet_model = resnet_model
super(FasterRCNNResnetV1FeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN Resnet V1 preprocessing.
VGG style channel mean subtraction as described here:
https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-readme-md
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
channel_means = [123.68, 116.779, 103.939]
return resized_inputs - [[channel_means]]
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
Raises:
InvalidArgumentError: If the spatial size of `preprocessed_inputs`
(height or width) is less than 33.
ValueError: If the created network is missing the required activation.
"""
if len(preprocessed_inputs.get_shape().as_list()) != 4:
raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
'tensor of shape %s' % preprocessed_inputs.get_shape())
shape_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
['image size must at least be 33 in both height and width.'])
with tf.control_dependencies([shape_assert]):
# Disables batchnorm for fine-tuning with smaller batch sizes.
# TODO: Figure out if it is needed when image batch size is bigger.
with slim.arg_scope(
resnet_utils.resnet_arg_scope(
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
weight_decay=self._weight_decay)):
with tf.variable_scope(
self._architecture, reuse=self._reuse_weights) as var_scope:
_, activations = self._resnet_model(
preprocessed_inputs,
num_classes=None,
is_training=self._train_batch_norm,
global_pool=False,
output_stride=self._first_stage_features_stride,
spatial_squeeze=False,
scope=var_scope)
handle = scope + '/%s/block3' % self._architecture
return activations[handle]
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name (unused).
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
with tf.variable_scope(self._architecture, reuse=self._reuse_weights):
with slim.arg_scope(
resnet_utils.resnet_arg_scope(
batch_norm_epsilon=1e-5,
batch_norm_scale=True,
weight_decay=self._weight_decay)):
with slim.arg_scope([slim.batch_norm],
is_training=self._train_batch_norm):
blocks = [
resnet_utils.Block('block4', resnet_v1.bottleneck, [{
'depth': 2048,
'depth_bottleneck': 512,
'stride': 1
}] * 3)
]
proposal_classifier_features = resnet_utils.stack_blocks_dense(
proposal_feature_maps, blocks)
return proposal_classifier_features
class FasterRCNNResnet50FeatureExtractor(FasterRCNNResnetV1FeatureExtractor):
"""Faster R-CNN Resnet 50 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16,
or if `architecture` is not supported.
"""
super(FasterRCNNResnet50FeatureExtractor, self).__init__(
'resnet_v1_50', resnet_v1.resnet_v1_50, is_training,
first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
class FasterRCNNResnet101FeatureExtractor(FasterRCNNResnetV1FeatureExtractor):
"""Faster R-CNN Resnet 101 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16,
or if `architecture` is not supported.
"""
super(FasterRCNNResnet101FeatureExtractor, self).__init__(
'resnet_v1_101', resnet_v1.resnet_v1_101, is_training,
first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
class FasterRCNNResnet152FeatureExtractor(FasterRCNNResnetV1FeatureExtractor):
"""Faster R-CNN Resnet 152 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16,
or if `architecture` is not supported.
"""
super(FasterRCNNResnet152FeatureExtractor, self).__init__(
'resnet_v1_152', resnet_v1.resnet_v1_152, is_training,
first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
| 37.556
| 80
| 0.681329
|
2b18f53c2745131b925093e7b81aeee823f650ff
| 2,123
|
py
|
Python
|
src/discord_notifier.py
|
danielloera/custom-scrape
|
b3a3286d26348f2066d9333c5bbb548f080c8417
|
[
"MIT"
] | null | null | null |
src/discord_notifier.py
|
danielloera/custom-scrape
|
b3a3286d26348f2066d9333c5bbb548f080c8417
|
[
"MIT"
] | null | null | null |
src/discord_notifier.py
|
danielloera/custom-scrape
|
b3a3286d26348f2066d9333c5bbb548f080c8417
|
[
"MIT"
] | null | null | null |
import asyncio
import discord
import os
token = "DISCORD_TOKEN"
max_files_per_message = 10
def send_scrape_result_messages(scrape_results, channel_name):
client = discord.Client()
@client.event
async def on_ready():
print('Logged in as {0.user}'.format(client))
text_channel = [c for c in client.get_all_channels()
if c.name == channel_name][0]
print('Purging all bot messages')
await delete_all_bot_messages(text_channel)
print('Sending new messages')
await send_messages(text_channel)
async def delete_all_bot_messages(text_channel):
await text_channel.purge(
limit=1000, check=lambda m: m.author == client.user)
async def send_messages(text_channel):
for scrape_result in scrape_results:
await text_channel.send(content=f'{scrape_result.name}:')
result_items = scrape_result.url_to_screenshots_map.items()
if result_items:
for url, screenshots in result_items:
if screenshots:
screenshot_files = [discord.File(
open(s, 'rb')) for s in screenshots]
# Discord only allows 10 files per message.
chunked_files = [screenshot_files[i:i + 10]
for i in
range(0, len(screenshot_files), 10)]
await text_channel.send(
content=f'{len(screenshots)} results from:\n{url}',
files=chunked_files[0])
# Send the rest of the chunks, if present.
if len(chunked_files) > 1:
for chunk in chunked_files[1:]:
await text_channel.send(files=chunk)
for f in screenshot_files:
f.close()
else:
await text_channel.send('Nothing found :(')
await client.close()
client.run(os.getenv(token))
| 40.056604
| 79
| 0.541215
|
e27cfc092093b535d85f470025591ab1e367ac6e
| 4,634
|
py
|
Python
|
nipype/interfaces/ants/base.py
|
PAmcconnell/nipype
|
39fbd5411a844ce7c023964d3295eb7643b95af5
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/ants/base.py
|
PAmcconnell/nipype
|
39fbd5411a844ce7c023964d3295eb7643b95af5
|
[
"Apache-2.0"
] | 2
|
2018-04-26T12:09:32.000Z
|
2018-04-27T06:36:49.000Z
|
nipype/interfaces/ants/base.py
|
PAmcconnell/nipype
|
39fbd5411a844ce7c023964d3295eb7643b95af5
|
[
"Apache-2.0"
] | 1
|
2019-11-14T14:16:57.000Z
|
2019-11-14T14:16:57.000Z
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""The ants module provides basic functions for interfacing with ANTS tools."""
import os
# Local imports
from ... import logging, LooseVersion
from ..base import (CommandLine, CommandLineInputSpec, traits, isdefined,
PackageInfo)
iflogger = logging.getLogger('nipype.interface')
# -Using -1 gives primary responsibilty to ITKv4 to do the correct
# thread limitings.
# -Using 1 takes a very conservative approach to avoid overloading
# the computer (when running MultiProc) by forcing everything to
# single threaded. This can be a severe penalty for registration
# performance.
LOCAL_DEFAULT_NUMBER_OF_THREADS = 1
# -Using NSLOTS has the same behavior as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS
# as long as ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS is not set. Otherwise
# ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS takes precidence.
# This behavior states that you the user explicitly specifies
# num_threads, then respect that no matter what SGE tries to limit.
PREFERED_ITKv4_THREAD_LIMIT_VARIABLE = 'NSLOTS'
ALT_ITKv4_THREAD_LIMIT_VARIABLE = 'ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS'
class Info(PackageInfo):
version_cmd = os.path.join(os.getenv('ANTSPATH', ''),
'antsRegistration') + ' --version'
@staticmethod
def parse_version(raw_info):
for line in raw_info.splitlines():
if line.startswith('ANTs Version: '):
v_string = line.split()[2]
break
else:
return None
# -githash may or may not be appended
v_string = v_string.split('-')[0]
# 2.2.0-equivalent version string
if 'post' in v_string and \
LooseVersion(v_string) >= LooseVersion('2.1.0.post789'):
return '2.2.0'
else:
return '.'.join(v_string.split('.')[:3])
class ANTSCommandInputSpec(CommandLineInputSpec):
"""Base Input Specification for all ANTS Commands
"""
num_threads = traits.Int(
LOCAL_DEFAULT_NUMBER_OF_THREADS,
usedefault=True,
nohash=True,
desc="Number of ITK threads to use")
class ANTSCommand(CommandLine):
"""Base class for ANTS interfaces
"""
input_spec = ANTSCommandInputSpec
_num_threads = LOCAL_DEFAULT_NUMBER_OF_THREADS
def __init__(self, **inputs):
super(ANTSCommand, self).__init__(**inputs)
self.inputs.on_trait_change(self._num_threads_update, 'num_threads')
if not isdefined(self.inputs.num_threads):
self.inputs.num_threads = self._num_threads
else:
self._num_threads_update()
def _num_threads_update(self):
self._num_threads = self.inputs.num_threads
# ONLY SET THE ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS if requested
# by the end user. The default setting did not allow for
# overwriting the default values.
# In ITKv4 (the version used for all ANTS programs), ITK respects
# the SGE controlled $NSLOTS environmental variable.
# If user specifies -1, then that indicates that the system
# default behavior should be the one specified by ITKv4 rules
# (i.e. respect SGE $NSLOTS or environmental variables of threads, or
# user environmental settings)
if (self.inputs.num_threads == -1):
if (ALT_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ):
del self.inputs.environ[ALT_ITKv4_THREAD_LIMIT_VARIABLE]
if (PREFERED_ITKv4_THREAD_LIMIT_VARIABLE in self.inputs.environ):
del self.inputs.environ[PREFERED_ITKv4_THREAD_LIMIT_VARIABLE]
else:
self.inputs.environ.update({
PREFERED_ITKv4_THREAD_LIMIT_VARIABLE:
'%s' % self.inputs.num_threads
})
@staticmethod
def _format_xarray(val):
""" Convenience method for converting input arrays [1,2,3] to
commandline format '1x2x3' """
return 'x'.join([str(x) for x in val])
@classmethod
def set_default_num_threads(cls, num_threads):
"""Set the default number of threads for ITK calls
This method is used to set the default number of ITK threads for all
the ANTS interfaces. However, setting this will not update the output
type for any existing instances. For these, assign the
<instance>.inputs.num_threads
"""
cls._num_threads = num_threads
@property
def version(self):
return Info.version()
| 37.983607
| 79
| 0.671558
|
18c7ee09b22cebf68ce61c9c5b401eccb5c7c7ab
| 3,419
|
py
|
Python
|
relaax/algorithms/da3c/da3c_config.py
|
deeplearninc/relaax
|
a0cf280486dc74dca3857c85ec0e4c34e88d6b2b
|
[
"MIT"
] | 71
|
2017-01-25T00:26:20.000Z
|
2021-02-17T12:39:20.000Z
|
relaax/algorithms/da3c/da3c_config.py
|
deeplearninc/relaax
|
a0cf280486dc74dca3857c85ec0e4c34e88d6b2b
|
[
"MIT"
] | 69
|
2017-01-23T19:29:23.000Z
|
2018-08-21T13:26:39.000Z
|
relaax/algorithms/da3c/da3c_config.py
|
deeplearninc/relaax
|
a0cf280486dc74dca3857c85ec0e4c34e88d6b2b
|
[
"MIT"
] | 13
|
2017-01-23T21:18:09.000Z
|
2019-01-29T23:48:30.000Z
|
from relaax.common.python.config.loaded_config import options
from argparse import Namespace
import random
config = options.get('algorithm')
config.seed = options.get('algorithm/seed', random.randrange(1000000))
config.avg_in_num_batches = options.get('algorithm/avg_in_num_batches', 10)
for key, value in [('use_convolutions', [])]:
if not hasattr(config, key):
setattr(config, key, value)
config.input.history = options.get('algorithm/input/history', 1)
config.input.universe = options.get('algorithm/input/universe', True)
config.activation = options.get('algorithm/activation', 'relu')
config.lstm_type = options.get('algorithm/lstm_type', 'Basic') # Basic | Dilated
config.lstm_num_cores = options.get('algorithm/lstm_num_cores', 8)
config.max_global_step = options.get('algorithm/max_global_step', 5e7)
config.use_linear_schedule = options.get('algorithm/use_linear_schedule', False)
config.initial_learning_rate = options.get('algorithm/initial_learning_rate', 1e-4)
config.learning_rate_end = options.get('algorithm/learning_rate_end', 0.0)
config.optimizer = options.get('algorithm/optimizer', 'Adam') # Adam | RMSProp
# RMSProp default parameters
if not hasattr(config, 'RMSProp'):
config.RMSProp = options.get('algorithm/RMSProp', Namespace())
config.RMSProp.decay = options.get('algorithm/RMSProp/decay', 0.99)
config.RMSProp.epsilon = options.get('algorithm/RMSProp/epsilon', 0.1)
config.policy_clip = options.get('algorithm/policy_clip', False)
config.critic_clip = options.get('algorithm/critic_clip', False)
config.norm_adv = options.get('algorithm/normalize_advantage', False)
config.output.loss_type = options.get('algorithm/output/loss_type', 'Normal') # Normal | Expanded | Extended
config.gradients_norm_clipping = options.get('algorithm/gradients_norm_clipping', False)
config.entropy_beta = options.get('algorithm/entropy_beta', 0.01)
config.entropy_type = options.get('algorithm/entropy_type', 'Gauss') # Gauss | Origin
config.gae_lambda = options.get('algorithm/gae_lambda', 1.00)
config.use_filter = options.get('algorithm/use_filter', False)
config.hogwild = options.get('algorithm/hogwild', False)
config.use_icm = options.get('algorithm/use_icm', False)
config.output.scale = options.get('algorithm/output/scale', 1.0)
config.critic_scale = options.get('algorithm/critic_scale', 1.0)
config.output.action_high = options.get('algorithm/output/action_high', [])
config.output.action_low = options.get('algorithm/output/action_low', [])
config.combine_gradients = options.get('algorithm/combine_gradients', 'fifo')
config.num_gradients = options.get('algorithm/num_gradients', 4)
config.dc_lambda = options.get('algorithm/dc_lambda', 0.05)
config.dc_history = options.get('algorithm/dc_history', 20)
# ICM default parameters
if not hasattr(config, 'icm'):
config.icm = options.get('algorithm/icm', Namespace())
config.icm.nu = options.get('algorithm/icm/nu', 0.8)
config.icm.beta = options.get('algorithm/icm/beta', 0.2)
config.icm.lr = options.get('algorithm/icm/lr', 1e-3)
# KAF default parameters
if not hasattr(config, 'KAF'):
config.KAF = options.get('algorithm/KAF', Namespace())
config.KAF.boundary = options.get('algorithm/KAF/boundary', 2.0)
config.KAF.size = options.get('algorithm/KAF/size', 20)
config.KAF.kernel = options.get('algorithm/KAF/kernel', 'rbf') # rbf | rbf2d
config.KAF.gamma = options.get('algorithm/KAF/gamma', 1.0)
| 44.986842
| 110
| 0.762211
|
3cca1f95a026311998b540b53182de2a8d3efab9
| 640
|
py
|
Python
|
manage.py
|
craiga/music-from-my-tshirt
|
d6f051771c8d2c16fadde78df35a09fd9d8818aa
|
[
"MIT"
] | null | null | null |
manage.py
|
craiga/music-from-my-tshirt
|
d6f051771c8d2c16fadde78df35a09fd9d8818aa
|
[
"MIT"
] | 9
|
2020-03-01T16:48:16.000Z
|
2020-04-29T16:20:26.000Z
|
manage.py
|
craiga/music-from-my-tshirt
|
d6f051771c8d2c16fadde78df35a09fd9d8818aa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "music_from_my_tshirt.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| 29.090909
| 84
| 0.689063
|
ddd3c632cf43ab044ded93d05504354388163487
| 814
|
py
|
Python
|
tests/language_understanding/attribute_extraction/extractor_test.py
|
hamano-takashi/ochiAI-bot
|
16cf2057ada741c08df6ce471049b9da4caeb3ed
|
[
"MIT"
] | 2
|
2016-09-01T17:35:45.000Z
|
2016-09-04T07:09:26.000Z
|
tests/language_understanding/attribute_extraction/extractor_test.py
|
okuribito/GiftConcierge
|
b8b9128303f46965d2a0ac582c120980b9536f2f
|
[
"MIT"
] | null | null | null |
tests/language_understanding/attribute_extraction/extractor_test.py
|
okuribito/GiftConcierge
|
b8b9128303f46965d2a0ac582c120980b9536f2f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
from dialogue_system.language_understanding.attribute_extraction.rule_based_extractor import RuleBasedAttributeExtractor
class AttributeExtractorTest(unittest.TestCase):
def setUp(self):
self.extractor = RuleBasedAttributeExtractor()
def tearDown(self):
pass
def test_extract(self):
attribute = self.extractor.extract(text='ラーメンを食べたい')
self.assertEqual(attribute, {'LOCATION': '', 'GENRE': 'ラーメン', 'MAXIMUM_AMOUNT': ''})
attribute = self.extractor.extract(text='西新宿のあたり')
self.assertEqual(attribute, {'LOCATION': '西新宿', 'GENRE': '', 'MAXIMUM_AMOUNT': ''})
attribute = self.extractor.extract(text='1000円以下で')
self.assertEqual(attribute, {'LOCATION': '', 'GENRE': '', 'MAXIMUM_AMOUNT': '1000'})
| 38.761905
| 120
| 0.685504
|
6bafa5307a1d773ea4fcec3d41ed6f65ba39b0e3
| 42,870
|
py
|
Python
|
web400-10/db.py
|
mehrdad-shokri/CTF_web
|
206529603af3824fc8117166ff978af3495f5a58
|
[
"MIT"
] | 664
|
2016-08-23T01:03:00.000Z
|
2022-03-20T17:02:45.000Z
|
web/db.py
|
0xlen/learn2web
|
19cf250b0515992863ea0f7f0cfeaf59918bbb9d
|
[
"MIT"
] | 12
|
2016-09-09T07:25:12.000Z
|
2021-10-05T21:11:48.000Z
|
web/db.py
|
0xlen/learn2web
|
19cf250b0515992863ea0f7f0cfeaf59918bbb9d
|
[
"MIT"
] | 203
|
2016-10-17T02:15:33.000Z
|
2021-10-17T06:36:37.000Z
|
"""
Database API
(part of web.py)
"""
__all__ = [
"UnknownParamstyle", "UnknownDB", "TransactionError",
"sqllist", "sqlors", "reparam", "sqlquote",
"SQLQuery", "SQLParam", "sqlparam",
"SQLLiteral", "sqlliteral",
"database", 'DB',
]
import time, os, urllib, urlparse
try:
import datetime
except ImportError:
datetime = None
try: set
except NameError:
from sets import Set as set
from utils import threadeddict, storage, iters, iterbetter, safestr, safeunicode
try:
# db module can work independent of web.py
from webapi import debug, config
except:
import sys
debug = sys.stderr
config = storage()
class UnknownDB(Exception):
"""raised for unsupported dbms"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class TransactionError(Exception): pass
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, paramstyle
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, basestring):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, basestring):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (basestring, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
sqlliteral = SQLLiteral
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
dictionary = dictionary.copy() # eval mucks with it
# disable builtins to avoid risk for remote code exection.
dictionary['__builtins__'] = object()
vals = []
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif isinstance(obj, long):
return str(obj)
elif datetime and isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if isinstance(obj, unicode): obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
>>> sqllist(u'abc')
u'abc'
"""
if isinstance(lst, basestring):
return lst
else:
return ', '.join(lst)
def sqlors(left, lst):
"""
`left is a SQL clause like `tablename.arg = `
and `lst` is a list of values. Returns a reparam-style
pair featuring the SQL that ORs together the clause
for each item in the lst.
>>> sqlors('foo = ', [])
<sql: '1=2'>
>>> sqlors('foo = ', [1])
<sql: 'foo = 1'>
>>> sqlors('foo = ', 1)
<sql: 'foo = 1'>
>>> sqlors('foo = ', [1,2,3])
<sql: '(foo = 1 OR foo = 2 OR foo = 3 OR 1=2)'>
"""
if isinstance(lst, iters):
lst = list(lst)
ln = len(lst)
if ln == 0:
return SQLQuery("1=2")
if ln == 1:
lst = lst[0]
if isinstance(lst, iters):
return SQLQuery(['('] +
sum([[left, sqlparam(x), ' OR '] for x in lst], []) +
['1=2)']
)
else:
return left + sqlparam(lst)
def sqlwhere(dictionary, grouping=' AND '):
"""
Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere({'cust_id': 2, 'order_id':3})
<sql: 'order_id = 3 AND cust_id = 2'>
>>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere({'a': 'a', 'b': 'b'}).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
class Transaction:
"""Database transaction."""
def __init__(self, ctx):
self.ctx = ctx
self.transaction_count = transaction_count = len(ctx.transactions)
class transaction_engine:
"""Transaction Engine used in top level transactions."""
def do_transact(self):
ctx.commit(unload=False)
def do_commit(self):
ctx.commit()
def do_rollback(self):
ctx.rollback()
class subtransaction_engine:
"""Transaction Engine used in sub transactions."""
def query(self, q):
db_cursor = ctx.db.cursor()
ctx.db_execute(db_cursor, SQLQuery(q % transaction_count))
def do_transact(self):
self.query('SAVEPOINT webpy_sp_%s')
def do_commit(self):
self.query('RELEASE SAVEPOINT webpy_sp_%s')
def do_rollback(self):
self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s')
class dummy_engine:
"""Transaction Engine used instead of subtransaction_engine
when sub transactions are not supported."""
do_transact = do_commit = do_rollback = lambda self: None
if self.transaction_count:
# nested transactions are not supported in some databases
if self.ctx.get('ignore_nested_transactions'):
self.engine = dummy_engine()
else:
self.engine = subtransaction_engine()
else:
self.engine = transaction_engine()
self.engine.do_transact()
self.ctx.transactions.append(self)
def __enter__(self):
return self
def __exit__(self, exctype, excvalue, traceback):
if exctype is not None:
self.rollback()
else:
self.commit()
def commit(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_commit()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
def rollback(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_rollback()
self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
class DB:
"""Database"""
def __init__(self, db_module, keywords):
"""Creates a database.
"""
# some DB implementaions take optional paramater `driver` to use a specific driver modue
# but it should not be passed to connect
keywords.pop('driver', None)
self.db_module = db_module
self.keywords = keywords
self._ctx = threadeddict()
# flag to enable/disable printing queries
self.printing = config.get('debug_sql', config.get('debug', False))
self.supports_multiple_insert = False
try:
import DBUtils
# enable pooling if DBUtils module is available.
self.has_pooling = True
except ImportError:
self.has_pooling = False
# Pooling can be disabled by passing pooling=False in the keywords.
self.has_pooling = self.keywords.pop('pooling', True) and self.has_pooling
def _getctx(self):
if not self._ctx.get('db'):
self._load_context(self._ctx)
return self._ctx
ctx = property(_getctx)
def _load_context(self, ctx):
ctx.dbq_count = 0
ctx.transactions = [] # stack of transactions
if self.has_pooling:
ctx.db = self._connect_with_pooling(self.keywords)
else:
ctx.db = self._connect(self.keywords)
ctx.db_execute = self._db_execute
if not hasattr(ctx.db, 'commit'):
ctx.db.commit = lambda: None
if not hasattr(ctx.db, 'rollback'):
ctx.db.rollback = lambda: None
def commit(unload=True):
# do db commit and release the connection if pooling is enabled.
ctx.db.commit()
if unload and self.has_pooling:
self._unload_context(self._ctx)
def rollback():
# do db rollback and release the connection if pooling is enabled.
ctx.db.rollback()
if self.has_pooling:
self._unload_context(self._ctx)
ctx.commit = commit
ctx.rollback = rollback
def _unload_context(self, ctx):
del ctx.db
def _connect(self, keywords):
return self.db_module.connect(**keywords)
def _connect_with_pooling(self, keywords):
def get_pooled_db():
from DBUtils import PooledDB
# In DBUtils 0.9.3, `dbapi` argument is renamed as `creator`
# see Bug#122112
if PooledDB.__version__.split('.') < '0.9.3'.split('.'):
return PooledDB.PooledDB(dbapi=self.db_module, **keywords)
else:
return PooledDB.PooledDB(creator=self.db_module, **keywords)
if getattr(self, '_pooleddb', None) is None:
self._pooleddb = get_pooled_db()
return self._pooleddb.connection()
def _db_cursor(self):
return self.ctx.db.cursor()
def _param_marker(self):
"""Returns parameter marker based on paramstyle attribute if this database."""
style = getattr(self, 'paramstyle', 'pyformat')
if style == 'qmark':
return '?'
elif style == 'numeric':
return ':1'
elif style in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, style
def _db_execute(self, cur, sql_query):
"""executes an sql query"""
self.ctx.dbq_count += 1
try:
a = time.time()
query, params = self._process_query(sql_query)
out = cur.execute(query, params)
b = time.time()
except:
if self.printing:
print >> debug, 'ERR:', str(sql_query)
if self.ctx.transactions:
self.ctx.transactions[-1].rollback()
else:
self.ctx.rollback()
raise
if self.printing:
print >> debug, '%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query))
return out
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, params
def _where(self, where, vars):
if isinstance(where, (int, long)):
where = "id = " + sqlparam(where)
#@@@ for backward-compatibility
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, dict):
where = self._where_dict(where)
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, vars)
return where
def _where_dict(self, where):
where_clauses = []
for k, v in where.iteritems():
where_clauses.append(k + ' = ' + sqlquote(v))
if where_clauses:
return SQLQuery.join(where_clauses, " AND ")
else:
return None
def query(self, sql_query, vars=None, processed=False, _test=False):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""
if vars is None: vars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, vars)
if _test: return sql_query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, sql_query)
if db_cursor.description:
names = [x[0] for x in db_cursor.description]
def iterwrapper():
row = db_cursor.fetchone()
while row:
yield storage(dict(zip(names, row)))
row = db_cursor.fetchone()
out = iterbetter(iterwrapper())
out.__len__ = lambda: int(db_cursor.rowcount)
out.list = lambda: [storage(dict(zip(names, x))) \
for x in db_cursor.fetchall()]
else:
out = db_cursor.rowcount
if not self.ctx.transactions:
self.ctx.commit()
return out
def select(self, tables, vars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
>>> db.select('foo', where={'id': 5}, _test=True)
<sql: 'SELECT * FROM foo WHERE id = 5'>
"""
if vars is None: vars = {}
sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, vars) for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test: return qout
return self.query(qout, processed=True)
def where(self, table, what='*', order=None, group=None, limit=None,
offset=None, _test=False, **kwargs):
"""
Selects from `table` where keys are equal to values in `kwargs`.
>>> db = DB(None, {})
>>> db.where('foo', bar_id=3, _test=True)
<sql: 'SELECT * FROM foo WHERE bar_id = 3'>
>>> db.where('foo', source=2, crust='dewey', _test=True)
<sql: "SELECT * FROM foo WHERE source = 2 AND crust = 'dewey'">
>>> db.where('foo', _test=True)
<sql: 'SELECT * FROM foo'>
"""
where = self._where_dict(kwargs)
return self.select(table, what=what, order=order,
group=group, limit=limit, offset=offset, _test=_test,
where=where)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, vars):
if isinstance(val, (int, long)):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
#@@@
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif sql == 'WHERE' and isinstance(val, dict):
nout = self._where_dict(val)
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, vars)
def xjoin(a, b):
if a and b: return a + ' ' + b
else: return a or b
return xjoin(sql, nout)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())">
>>> q.query()
'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
>>> q.values()
[2, 'bob']
"""
def q(x): return "(" + x + ")"
if values:
_keys = SQLQuery.join(values.keys(), ', ')
_values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')
sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "foo@example.com"}, {"name": "bar", "email": "bar@example.com"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (name, email) VALUES ('foo', 'foo@example.com'), ('bar', 'bar@example.com')">
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
for v in values:
if v.keys() != keys:
raise ValueError, 'Not all rows have the same keys'
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out-len(values)+1, out+1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, vars=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if vars is None: vars = {}
where = self._where(where, vars)
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test: return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, vars=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""
if vars is None: vars = {}
where = self._where(where, vars)
q = 'DELETE FROM ' + table
if using: q += ' USING ' + sqllist(using)
if where: q += ' WHERE ' + where
if _test: return q
db_cursor = self._db_cursor()
self._db_execute(db_cursor, q)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def _process_insert_query(self, query, tablename, seqname):
return query
def transaction(self):
"""Start a transaction."""
return Transaction(self.ctx)
class PostgresDB(DB):
"""Postgres driver."""
def __init__(self, **keywords):
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
db_module = import_driver(["psycopg2", "psycopg", "pgdb"], preferred=keywords.pop('driver', None))
if db_module.__name__ == "psycopg2":
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
if db_module.__name__ == "pgdb" and 'port' in keywords:
keywords["host"] += ":" + str(keywords.pop('port'))
# if db is not provided postgres driver will take it from PGDATABASE environment variable
if 'db' in keywords:
keywords['database'] = keywords.pop('db')
self.dbname = "postgres"
self.paramstyle = db_module.paramstyle
DB.__init__(self, db_module, keywords)
self.supports_multiple_insert = True
self._sequences = None
def _process_insert_query(self, query, tablename, seqname):
if seqname is None:
# when seqname is not provided guess the seqname and make sure it exists
seqname = tablename + "_id_seq"
if seqname not in self._get_all_sequences():
seqname = None
if seqname:
query += "; SELECT currval('%s')" % seqname
return query
def _get_all_sequences(self):
"""Query postgres to find names of all sequences used in this database."""
if self._sequences is None:
q = "SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'"
self._sequences = set([c.relname for c in self.query(q)])
return self._sequences
def _connect(self, keywords):
conn = DB._connect(self, keywords)
try:
conn.set_client_encoding('UTF8')
except AttributeError:
# fallback for pgdb driver
conn.cursor().execute("set client_encoding to 'UTF-8'")
return conn
def _connect_with_pooling(self, keywords):
conn = DB._connect_with_pooling(self, keywords)
conn._con._con.set_client_encoding('UTF8')
return conn
class MySQLDB(DB):
def __init__(self, **keywords):
import MySQLdb as db
if 'pw' in keywords:
keywords['passwd'] = keywords['pw']
del keywords['pw']
if 'charset' not in keywords:
keywords['charset'] = 'utf8'
elif keywords['charset'] is None:
del keywords['charset']
self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg
self.dbname = "mysql"
DB.__init__(self, db, keywords)
self.supports_multiple_insert = True
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_id();')
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s () VALUES()" % table
def import_driver(drivers, preferred=None):
"""Import the first available driver or preferred driver.
"""
if preferred:
drivers = [preferred]
for d in drivers:
try:
return __import__(d, None, None, ['x'])
except ImportError:
pass
raise ImportError("Unable to import " + " or ".join(drivers))
class SqliteDB(DB):
def __init__(self, **keywords):
db = import_driver(["sqlite3", "pysqlite2.dbapi2", "sqlite"], preferred=keywords.pop('driver', None))
if db.__name__ in ["sqlite3", "pysqlite2.dbapi2"]:
db.paramstyle = 'qmark'
# sqlite driver doesn't create datatime objects for timestamp columns unless `detect_types` option is passed.
# It seems to be supported in sqlite3 and pysqlite2 drivers, not surte about sqlite.
keywords.setdefault('detect_types', db.PARSE_DECLTYPES)
self.paramstyle = db.paramstyle
keywords['database'] = keywords.pop('db')
keywords['pooling'] = False # sqlite don't allows connections to be shared by threads
self.dbname = "sqlite"
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_rowid();')
def query(self, *a, **kw):
out = DB.query(self, *a, **kw)
if isinstance(out, iterbetter):
del out.__len__
return out
class FirebirdDB(DB):
"""Firebird Database.
"""
def __init__(self, **keywords):
try:
import kinterbasdb as db
except Exception:
db = None
pass
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
keywords['database'] = keywords.pop('db')
self.paramstyle = db.paramstyle
DB.__init__(self, db, keywords)
def delete(self, table, where=None, using=None, vars=None, _test=False):
# firebird doesn't support using clause
using=None
return DB.delete(self, table, where, using, vars, _test)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', ''),
('FIRST', limit),
('SKIP', offset),
('', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order)
)
class MSSQLDB(DB):
def __init__(self, **keywords):
import pymssql as db
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
keywords['database'] = keywords.pop('db')
self.dbname = "mssql"
DB.__init__(self, db, keywords)
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
# MSSQLDB expects params to be a tuple.
# Overwriting the default implementation to convert params to tuple.
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, tuple(params)
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('TOP', limit),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('OFFSET', offset))
def _test(self):
"""Test LIMIT.
Fake presence of pymssql module for running tests.
>>> import sys
>>> sys.modules['pymssql'] = sys.modules['sys']
MSSQL has TOP clause instead of LIMIT clause.
>>> db = MSSQLDB(db='test', user='joe', pw='secret')
>>> db.select('foo', limit=4, _test=True)
<sql: 'SELECT * TOP 4 FROM foo'>
"""
pass
class OracleDB(DB):
def __init__(self, **keywords):
import cx_Oracle as db
if 'pw' in keywords:
keywords['password'] = keywords.pop('pw')
#@@ TODO: use db.makedsn if host, port is specified
keywords['dsn'] = keywords.pop('db')
self.dbname = 'oracle'
db.paramstyle = 'numeric'
self.paramstyle = db.paramstyle
# oracle doesn't support pooling
keywords.pop('pooling', None)
DB.__init__(self, db, keywords)
def _process_insert_query(self, query, tablename, seqname):
if seqname is None:
# It is not possible to get seq name from table name in Oracle
return query
else:
return query + "; SELECT %s.currval FROM dual" % seqname
def dburl2dict(url):
"""
Takes a URL to a database and parses it into an equivalent dictionary.
>>> dburl2dict('postgres:///mygreatdb')
{'pw': None, 'dbn': 'postgres', 'db': 'mygreatdb', 'host': None, 'user': None, 'port': None}
>>> dburl2dict('postgres://james:day@serverfarm.example.net:5432/mygreatdb')
{'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': 5432}
>>> dburl2dict('postgres://james:day@serverfarm.example.net/mygreatdb')
{'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
>>> dburl2dict('postgres://james:d%40y@serverfarm.example.net/mygreatdb')
{'pw': 'd@y', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
>>> dburl2dict('mysql://james:d%40y@serverfarm.example.net/mygreatdb')
{'pw': 'd@y', 'dbn': 'mysql', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
"""
parts = urlparse.urlparse(urllib.unquote(url))
return {'dbn': parts.scheme,
'user': parts.username,
'pw': parts.password,
'db': parts.path[1:],
'host': parts.hostname,
'port': parts.port}
_databases = {}
def database(dburl=None, **params):
"""Creates appropriate database using params.
Pooling will be enabled if DBUtils module is available.
Pooling can be disabled by passing pooling=False in params.
"""
if not dburl and not params:
dburl = os.environ['DATABASE_URL']
if dburl:
params = dburl2dict(dburl)
dbn = params.pop('dbn')
if dbn in _databases:
return _databases[dbn](**params)
else:
raise UnknownDB, dbn
def register_database(name, clazz):
"""
Register a database.
>>> class LegacyDB(DB):
... def __init__(self, **params):
... pass
...
>>> register_database('legacy', LegacyDB)
>>> db = database(dbn='legacy', db='test', user='joe', passwd='secret')
"""
_databases[name] = clazz
register_database('mysql', MySQLDB)
register_database('postgres', PostgresDB)
register_database('sqlite', SqliteDB)
register_database('firebird', FirebirdDB)
register_database('mssql', MSSQLDB)
register_database('oracle', OracleDB)
def _interpolate(format):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
from tokenize import tokenprog
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = format.find("$", pos)
if dollar < 0:
break
nextchar = format[dollar + 1]
if nextchar == "{":
chunks.append((0, format[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, format[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, format[pos:dollar]))
match, pos = matchorfail(format, dollar + 1)
while pos < len(format):
if format[pos] == "." and \
pos + 1 < len(format) and format[pos + 1] in namechars:
match, pos = matchorfail(format, pos + 1)
elif format[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(format, pos)
tstart, tend = match.regs[3]
token = format[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, format[dollar + 1:pos]))
else:
chunks.append((0, format[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(format):
chunks.append((0, format[pos:]))
return chunks
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33.518374
| 124
| 0.540448
|
89d81fac464384046db5804df4b171df506faecf
| 1,089
|
py
|
Python
|
LeetCode_Solutions/206. Reverse Linked List.py
|
foxfromworld/Coding-Interview-Preparation-with-LeetCode-and-An-Algorithm-Book
|
e0c704d196fe0d8452ea639a92f2a75c3b46a9b0
|
[
"BSD-2-Clause"
] | null | null | null |
LeetCode_Solutions/206. Reverse Linked List.py
|
foxfromworld/Coding-Interview-Preparation-with-LeetCode-and-An-Algorithm-Book
|
e0c704d196fe0d8452ea639a92f2a75c3b46a9b0
|
[
"BSD-2-Clause"
] | null | null | null |
LeetCode_Solutions/206. Reverse Linked List.py
|
foxfromworld/Coding-Interview-Preparation-with-LeetCode-and-An-Algorithm-Book
|
e0c704d196fe0d8452ea639a92f2a75c3b46a9b0
|
[
"BSD-2-Clause"
] | null | null | null |
# Source : https://leetcode.com/problems/reverse-linked-list/
# Author : foxfromworld
# Date : 02/02/2021
# Second attempt
class Solution:
def reverseList(self, head): # Recursive
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return head
if not head.next:
return head
p = self.reverseList(head.next)
head.next.next = head
head.next = None
return p
# Date : 02/02/2021
# First attempt (iterative)
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseList(self, head): # Iterative
"""
:type head: ListNode
:rtype: ListNode
"""
# curr curr.next
# ↓ ↓
# prev 1 -> 2 -> None
# prev curr curr.next
# ↓ ↓ ↓
# None <- 1 -> 2 -> None
prev, curr = None, head
while curr:
#curr.next, prev, curr = prev, curr, curr.next
prev, curr.next, curr = curr, prev, curr.next
return prev
| 23.170213
| 61
| 0.55831
|
4128e4eb07a7469218931ef28ab94b81e497c809
| 15,025
|
py
|
Python
|
talib/StopStrategy.py
|
isaacdlp/pyalgosamples
|
e1a6504c6cd0d906ef082c81037b59b137a99105
|
[
"Apache-2.0"
] | 3
|
2017-12-01T16:21:54.000Z
|
2020-03-17T01:55:21.000Z
|
talib/StopStrategy.py
|
isaacdlp/pyalgosamples
|
e1a6504c6cd0d906ef082c81037b59b137a99105
|
[
"Apache-2.0"
] | 1
|
2019-04-17T06:17:09.000Z
|
2020-05-26T04:00:16.000Z
|
talib/StopStrategy.py
|
isaacdlp/pyalgosamples
|
e1a6504c6cd0d906ef082c81037b59b137a99105
|
[
"Apache-2.0"
] | 2
|
2017-11-23T10:01:04.000Z
|
2019-11-06T15:35:41.000Z
|
# PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
SCENARIO = 2
DBFEED = False
TRAILING = False
from pyalgotrade import strategy, plotter, dataseries
from pyalgotrade.technical import ma, macd, cross
from pyalgotrade.stratanalyzer import drawdown, returns, sharpe
from pyalgotrade.utils import stats
import datetime
import pyalgotrade.logger as logger
import math
from pyalgotrade import broker
from pyalgotrade.broker import backtesting
from pyalgoext import dbfeed
from pyalgotrade.barfeed import yahoofeed
from pyalgotrade.talibext import indicator
class SessionOpenStopOrder(backtesting.StopOrder):
def __init__(self, action, position, pricePer, quantity, instrumentTraits):
if action == broker.Order.Action.SELL:
pricePer = 1 - pricePer
else:
pricePer = 1 + pricePer
self._dateTime = position.getStrategy().getCurrentDateTime()
self._pricePer = pricePer
self._position = position
backtesting.StopOrder.__init__(self, action, position.getInstrument(), position.getLastPrice() * pricePer, quantity, instrumentTraits)
position.getStrategy().getBarsProcessedEvent().subscribe(self.__onBars)
def __onBars(self, strategy, bars):
instrument = self._position.getInstrument()
if self._dateTime.date != bars.getDateTime().date:
if instrument in bars:
self._StopOrder__stopPrice = bars[instrument].getPrice() * self._pricePer
self._dateTime = bars.getDateTime()
class TrailingStopOrder(backtesting.StopOrder):
def __init__(self, action, position, pricePer, quantity, instrumentTraits):
backtesting.StopOrder.__init__(self, action, position.getInstrument(), 0, quantity, instrumentTraits)
if action == broker.Order.Action.SELL:
pricePer = 1 - pricePer
else:
pricePer = 1 + pricePer
self._pricePer = pricePer
self._position = position
self._refPrice = 0
def getStopPrice(self):
lastPrice = self._position.getLastPrice()
if self.getAction() == broker.Order.Action.SELL:
if self._refPrice < lastPrice:
self._refPrice = lastPrice
else:
if self._refPrice > lastPrice:
self._refPrice = lastPrice
return self._refPrice * self._pricePer
class MyBenchmark(strategy.BacktestingStrategy):
def __init__(self, feed, stopPer, stopTrailing, delay):
myBroker = backtesting.Broker(1000000, feed, backtesting.TradePercentage(0.002))
myBroker.setAllowNegativeCash(True)
myBroker.getFillStrategy().setVolumeLimit(None)
super(MyBenchmark, self).__init__(feed, myBroker)
self._delay = delay
self._feed = feed
self._session = 0
self._liquidity = 0.05
self._posMax = len(feed.getRegisteredInstruments())
self._posLong = {}
self._posShort = {}
self.startDateTime = None
self.endDateTime = None
self._stopPer = stopPer
self._stopTrailing = stopTrailing
self.setUseAdjustedValues(True)
def onEnterOk(self, position):
order = position.getEntryOrder()
if order.getAction() == broker.Order.Action.BUY:
self.logOp("COMPRA", order)
if self._stopTrailing:
stopOrder = TrailingStopOrder(broker.Order.Action.SELL, position, self._stopPer, position.getShares(), order.getInstrumentTraits())
stopOrder.setGoodTillCanceled(True)
position._Position__submitAndRegisterOrder(stopOrder)
position._Position__exitOrder = stopOrder
else:
position.exitStop(order.getExecutionInfo().getPrice() * (1 - self._stopPer), True)
else:
self.logOp("VENTA CORTA", order)
if self._stopTrailing:
stopOrder = TrailingStopOrder(broker.Order.Action.BUY_TO_COVER, position, self._stopPer, math.fabs(position.getShares()), order.getInstrumentTraits())
stopOrder.setGoodTillCanceled(True)
position._Position__submitAndRegisterOrder(stopOrder)
position._Position__exitOrder = stopOrder
else:
position.exitStop(order.getExecutionInfo().getPrice() * (1 + self._stopPer), True)
def onEnterCanceled(self, position):
order = position.getEntryOrder()
if order.getAction() == broker.Order.Action.BUY:
del self._posLong[position.getInstrument()]
self.logOp("COMPRA CANCELADA", order)
else:
del self._posShort[position.getInstrument()]
self.logOp("VENTA CORTA CANCELADA", order)
def onExitOk(self, position):
order = position.getExitOrder()
if order.getAction() == broker.Order.Action.SELL:
del self._posLong[position.getInstrument()]
self.logOp("VENTA", order)
else:
del self._posShort[position.getInstrument()]
self.logOp("COMPRA PARA CUBRIR", order)
def onExitCanceled(self, position):
# If the exit was canceled, re-submit it.
position.exitMarket()
order = position.getExitOrder()
if order.getAction() == broker.Order.Action.SELL:
self.logOp("VENTA CANCELADA en %s" % (self.getCurrentDateTime().date()), order)
else:
self.logOp("COMPRA PARA CUBRIR CANCELADA en %s" % (self.getCurrentDateTime().date()), order)
def onBars(self, bars):
# Wait for the same bar than the strategy
if self._delay > 0:
self._delay -= 1
return
for instrument, bar in bars.items():
if instrument not in self._posLong:
self.prepareEnter(instrument, bars)
def prepareEnter(self, instrument, bars, action=broker.Order.Action.BUY):
if (len(self._posLong) + len(self._posShort)) < self._posMax:
bar = bars[instrument]
perInstrument = self.getBroker().getEquity() / self._posMax
cash = self.getBroker().getCash()
if perInstrument > cash:
perInstrument = cash
perInstrument *= (1 - self._liquidity)
amount = int(perInstrument / bar.getPrice())
if amount > 0:
if (action == broker.Order.Action.BUY):
self._posLong[instrument] = self.enterLong(instrument, amount, True)
else:
self._posShort[instrument] = self.enterShort(instrument, amount, True)
def prepareExit(self, position):
order = position.getExitOrder()
if not order:
position.exitMarket()
elif isinstance(order, broker.StopOrder):
# order._Order__state = broker.Order.State.CANCELED
# position.exitMarket()
position.cancelExit()
def onStart(self):
startDateTime = self.getCurrentDateTime()
# Problem at Yahoo Feeds start with None
if not startDateTime:
startDateTime = self.getFeed().peekDateTime()
self.startDateTime = startDateTime
def onFinish(self, bars):
self.endDateTime = bars.getDateTime()
def logOp(self, type, order):
self.info("%s %s %s" % (type, order.getInstrument(), order.getExecutionInfo()))
class MyBasicStrategy(MyBenchmark):
def __init__(self, feed, stopPer, stopTrailing, smaShort, smaLong):
MyBenchmark.__init__(self, feed, stopPer, stopTrailing, smaLong)
self._smaShort = {}
self._smaLong = {}
self._macdPrice = {}
self._macdVol = {}
for instrument in feed.getRegisteredInstruments():
self._smaShort[instrument] = ma.SMA(self._feed[instrument].getPriceDataSeries(), smaShort)
self._smaLong[instrument] = ma.SMA(self._feed[instrument].getPriceDataSeries(), smaLong)
self._macdPrice[instrument] = macd.MACD(self._feed[instrument].getPriceDataSeries(), 12, 26, 9)
self._macdVol[instrument] = macd.MACD(self._feed[instrument].getVolumeDataSeries(), 12, 26, 9)
def getSMAShorts(self):
return self._smaShort
def getSMALongs(self):
return self._smaLong
def onBars(self, bars):
for instrument, bar in bars.items():
# Wait for enough bars to be available to calculate a SMA.
if not self._smaLong[instrument] or self._smaLong[instrument][-1] is None:
return
pri = self._macdPrice[instrument]
vol = self._macdVol[instrument]
if instrument in self._posLong:
if cross.cross_below(self._smaShort[instrument], self._smaLong[instrument]):
position = self._posLong[instrument]
self.prepareExit(position)
elif instrument in self._posShort:
if cross.cross_above(pri.getSignal(), pri):
position = self._posShort[instrument]
self.prepareExit(position)
if cross.cross_above(self._smaShort[instrument], self._smaLong[instrument]):
self.prepareEnter(instrument, bars)
elif cross.cross_below(pri.getSignal(), pri) and cross.cross_above(vol.getSignal(), vol):
self.prepareEnter(instrument, bars, broker.Order.Action.SELL_SHORT)
class MyTaLibStrategy(MyBasicStrategy):
def __init__(self, feed, stopPer, stopTrailing, smaShort, smaLong, aroonPeriod):
MyBasicStrategy.__init__(self, feed, stopPer, stopTrailing, smaShort, smaLong)
self._aroon = {}
self._aroonPeriod = aroonPeriod
for instrument in feed.getRegisteredInstruments():
self._aroon[instrument] = dataseries.SequenceDataSeries()
def onBars(self, bars):
for instrument, bar in bars.items():
# Wait for enough bars to be available to calculate a SMA.
if not self._smaLong[instrument] or self._smaLong[instrument][-1] is None:
return
barDs = self.getFeed().getDataSeries(instrument)
#closeDs = barDs.getCloseDataSeries()
aroon = indicator.AROONOSC(barDs, self._aroonPeriod + 1, self._aroonPeriod)
self._aroon[instrument].appendWithDateTime(self.getCurrentDateTime(), aroon[-1])
pri = self._macdPrice[instrument]
vol = self._macdVol[instrument]
if instrument in self._posLong:
if cross.cross_below(self._smaShort[instrument], self._smaLong[instrument]) and aroon[-1] < -25:
position = self._posLong[instrument]
self.prepareExit(position)
elif instrument in self._posShort:
if cross.cross_above(pri.getSignal(), pri) and aroon[-1] > 25:
position = self._posShort[instrument]
self.prepareExit(position)
if cross.cross_above(self._smaShort[instrument], self._smaLong[instrument]) and aroon[-1] > 25:
self.prepareEnter(instrument, bars)
elif cross.cross_below(pri.getSignal(), pri) and cross.cross_above(vol.getSignal(), vol) and aroon[-1] < -25:
self.prepareEnter(instrument, bars, broker.Order.Action.SELL_SHORT)
if __name__ == "__main__":
config = {
'user': 'root',
'password': '',
'host': '127.0.0.1',
'database': 'ibex35',
'raise_on_warnings': True,
}
logger.log_format = "[%(levelname)s] %(message)s"
stopPer = 0.15
smaShort = 50
smaLong = 200
aroonPeriod = 25
startDate = datetime.date(2001, 05, 24)
endDate = datetime.date(2015, 12, 21)
instrument = 'GAS.MC'
feed = None
if DBFEED:
feed = dbfeed.DbFeed(config, [], 100, startDate, endDate)
feed.registerInstrument(instrument)
else:
feed = yahoofeed.Feed()
feed.sanitizeBars(True)
feed.addBarsFromCSV(instrument, instrument + ".csv")
myStrategy = None
if SCENARIO == 2:
myStrategy = MyBasicStrategy(feed, stopPer, TRAILING, smaShort, smaLong)
elif SCENARIO == 3:
myStrategy = MyTaLibStrategy(feed, stopPer, TRAILING, smaShort, smaLong, aroonPeriod)
else:
myStrategy = MyBenchmark(feed, stopPer, TRAILING, smaLong)
# Strategy
returnsAnalyzer = returns.Returns()
myStrategy.attachAnalyzer(returnsAnalyzer)
returnsAnalyzer.getReturns().setMaxLen(1000000)
sharpeAnalyzer = sharpe.SharpeRatio()
myStrategy.attachAnalyzer(sharpeAnalyzer)
drawDownAnalyzer = drawdown.DrawDown()
myStrategy.attachAnalyzer(drawDownAnalyzer)
# Attach a plotter to the strategy
plt = plotter.StrategyPlotter(myStrategy, True)
if hasattr(myStrategy, '_aroon'):
subPlot = plt.getOrCreateSubplot("Aroon")
subPlot.addDataSeries("Aroon", myStrategy._aroon[instrument])
if hasattr(myStrategy, '_smaShort'):
subPlot = plt.getOrCreateSubplot("SMA")
subPlot.addDataSeries("SMALong", myStrategy._smaLong[instrument])
subPlot.addDataSeries("SMAShort", myStrategy._smaShort[instrument])
subPlot = plt.getOrCreateSubplot("MACD")
subPlot.addDataSeries("MACDPrice", myStrategy._macdPrice[instrument])
subPlot.addDataSeries("MACDVolume", myStrategy._macdVol[instrument])
capStart = myStrategy.getBroker().getEquity()
myStrategy.info("CAPITAL INICIAL: $%.4f" % capStart)
# Run the strategy
myStrategy.run()
# Show basic information
allRet = returnsAnalyzer.getReturns()
capEnd = myStrategy.getBroker().getEquity()
myStrategy.info("CAPITAL FINAL: $%.4f" % capEnd)
myStrategy.info(" ")
myStrategy.info("Rentabilidad: %.4f%%" % (100 * (capEnd - capStart) / capStart))
myStrategy.info("Rentabilidad Anualizada: %.4f%%" % (100 * (math.pow((capEnd / capStart), (365.0 / ((myStrategy.endDateTime - myStrategy.startDateTime).days))) - 1)))
myStrategy.info("Volatilidad Anualizada: %.4f%%" % (100 * stats.stddev(allRet, 1) * math.sqrt(252)))
myStrategy.info("Ratio de Sharpe Anualizado: %.4f" % (100 * sharpeAnalyzer.getSharpeRatio(0.0036, True)))
myStrategy.info("DrawDown Maximo: %.4f%%" % (100 * drawDownAnalyzer.getMaxDrawDown()))
myStrategy.info("DrawDown Mas Largo: %s dias" % (drawDownAnalyzer.getLongestDrawDownDuration().days))
# Plot the strategy.
plt.plot()
| 40.389785
| 170
| 0.651514
|
89617eb32366723ef73b8d3df1f49c4c6ea58cc5
| 12,751
|
py
|
Python
|
active_code_base/lok_scraper_1.py
|
justin-napolitano/USSupremeCourtMetaDataGraph
|
d2d53915cdba33848bdd81f661d3c496ca518966
|
[
"Apache-2.0"
] | null | null | null |
active_code_base/lok_scraper_1.py
|
justin-napolitano/USSupremeCourtMetaDataGraph
|
d2d53915cdba33848bdd81f661d3c496ca518966
|
[
"Apache-2.0"
] | null | null | null |
active_code_base/lok_scraper_1.py
|
justin-napolitano/USSupremeCourtMetaDataGraph
|
d2d53915cdba33848bdd81f661d3c496ca518966
|
[
"Apache-2.0"
] | null | null | null |
#library_of_congress_scraper.py
from __future__ import print_function
from bs4 import BeautifulSoup
import requests
import lxml.etree as etree
import xml.etree.ElementTree as ET
import json
import pandas as pd
import os
import time
import random
import math
from pprint import pprint
#import load_vars as lv
import html
import yaml
from yaml import Loader, Dumper
import glob
import datetime
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google.oauth2 import service_account
from googleapiclient.http import MediaIoBaseDownload, MediaFileUpload
from flatten_json import flatten
import networkx as nx
class node_json():
def __init__(self, data):
self.out = self.flatten(data)
self.graph = nx.Graph()
def create_node(key):
testing = "do_something"
def node_generator(self, data, name =''):
output = {}
keys_at_level = []
# If the Nested key-value
# pair is of dict type
if type(data) is dict:
for k, v in data:
create_node(k)
node_generator(v)
elif type(data) is list:
for item in data:
node_genrator(item)
else:
#this item is no longer a dictionary or list
create_node(data)
#flatten(hierarchak)_dict)
return out_put
class search_result():
def __init__(self,dict_item,num_columns,colnum_string):
self.key = dict_item.key()
self.value = dict_item.value()
self.column_string = colnum_string
self.index = num_columns
self.range = self.create_column_range_string()
self.request_body = self.create_column_request()
def create_column_request(self):
request_body = {
'range': self.range,
"majorDimension": "COLUMNS",
"values": [self.value]
}
return request_body
def create_column_range_string(self):
rnge = "'Sheet1'" + "!" + self.column_string + str(1)
return rnge
def colnum_string(self, num_columns):
string = ""
while num_columns > 0:
num_columns, remainder = divmod(num_columns - 1, 26)
string = chr(65 + remainder) + string
return string
class google_drive:
def __init__(self,creds):
self.service = self.get_drive_service(creds)
def test(self):
pprint("hello I exist")
def get_drive_service(self, creds):
"""Shows basic usage of the Drive v3 API.
Prints the names and ids of the first 10 files the user has access to.
"""
SCOPES = []
#creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
service = build('drive', 'v3', credentials=creds)
# Call the Drive v3 API
results = service.files().list(
pageSize=10, fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
if not items:
print('No files found.')
else:
print('Files:')
for item in items:
print(u'{0} ({1})'.format(item['name'], item['id']))
return service
def create_folder(self,title):
drive_service = self.service
file_metadata = {
'name': '{}'.format(title),
'mimeType': 'application/vnd.google-apps.folder'
}
file = drive_service.files().create(body=file_metadata,
fields='id').execute()
print('Folder ID: %s' % file.get('id'))
def add_spreadsheet_to_folder(self ,folder_id,title):
drive_service = self.service
file_metadata = {
'name': '{}'.format(title),
'parents': [folder_id],
'mimeType': 'application/vnd.google-apps.spreadsheet',
}
res = drive_service.files().create(body=file_metadata).execute()
#print(res)
return res
class google_sheet():
def __init__(self,creds):
self.service =self.get_sheet_service(creds)
def get_sheet_service(self,creds):
service = build('sheets', 'v4', credentials=creds)
return service.spreadsheets()
class google_creds():
def __init__(self,creds_path):
self.creds = self.get_creds(creds_path)
def get_creds(self,creds_path):
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
print("no creds")
else:
creds = service_account.Credentials.from_service_account_file(creds_path)
#creds = ServiceAccountCredentials.from_json_keyfile_name('add_json_file_here.json', SCOPES)
#flow = InstalledAppFlow.from_client_secrets_file(
# 'credentials.json', SCOPES)
#creds = flow.run_local_server(port=0)
# Save the credentials for the next run
#with open('token.json', 'w') as token:
# token.write(creds.to_json())
return creds
class config():
def __init__(self,file_path):
#self.yaml_stream = file("config.yaml", 'r')
self.data = self.load_config(file_path)
def load_config(self,file_path):
#print("test")
stream = open(file_path, 'r')
data = yaml.load(stream,Loader = Loader)
#pprint(data)
return data
class search_results_page():
def __init__(self,base_url = "https://www.loc.gov/collections",collection = "united-states-reports",json_parameter = "fo=json",results_per_page = "c=150",query_param = "?",page_param ="sp=",page_num = 1,column_lookup_table = {},num_columns = 0):
self.search_url = self.create_search_url(base_url,collection,json_parameter,results_per_page,query_param,page_param,page_num)
self.response = self.request_data()
self.response_json = self.response_to_json()
#self.soup_html = self.html_parse()
self.next_url = self.get_next_url()
self.page_num = page_num
self.response_json_flat = self.flatten_result()
self.num_columns = num_columns
self.column_lookup_table = column_lookup_table
self.map_columns_to_lookup_table()
def create_search_result_node(self):
for item in self.response_json_flat:
for k,v in item.items():
if k not in self.column_lookup_table:
column_string = self.colnum_string()
self.column_lookup_table[k] = self.colnum_string(self.num_columns)
self.num_columns += 1
else:
continue
def append_to_data_list(self,rnge,d,data_list):#rename to _data_list
request_body_tmp = {
'range': rnge,
"majorDimension": "COLUMNS",
"values": [d]
}
data_list.append(request_body_tmp)
def map_columns_to_batch_request_list(self):
for item in self.response_json_flat:
for k,v in item.items():
mapped_column_key = self.column_lookup_table[k]
rnge = "'Sheet1'" + "!" + column_key + str(1)
def colnum_string(self):
string = ""
while self.num_columns > 0:
self.num_columns, remainder = divmod(self.num_columns - 1, 26)
string = chr(65 + remainder) + string
return string
def map_columns_to_lookup_table(self):
for item in self.response_json_flat:
for k in item.keys():
if k not in self.column_lookup_table:
self.column_lookup_table[k] = self.colnum_string()
self.num_columns += 1
else:
continue
#return column_lookup_table
def get_next_url(self):
return (self.response_json['pagination']['next'])
def create_search_url(self,base_url,collection,json_parameter,results_per_page,query_param,page_param,page_num):
url_sep ="/"
page_param = page_param +(str(page_num))
query = "&".join([json_parameter,results_per_page,page_param])
query = query_param + query
search_url = url_sep.join([base_url,collection,query])
#pprint(search_url)
return search_url
def say_hello(self):
pprint(self.base_url)
def request_data(self):
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.11 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9',
'Accept-Encoding': 'identity'
}
return requests.get(self.search_url,headers=headers)
def response_to_json(self):
return self.response.json()
def html_parse(self):
soup=BeautifulSoup(self.response.content,'lxml')
#pprint(soup)
return soup
def flatten_result(self):
flat_result_list = []
for item in self.response_json['results']:
flat_json = flatten(item)
flat_result_list.append(flat_json)
return flat_result_list
def create_google_credentials_object(creds_path = 'credentials.json'):
google_credentials_object = google_creds(creds_path)
return google_credentials_object
def create_config_object(file_path = 'config.yaml'):
config_object = config(file_path)
return config_object
def search_result_generator(base_url = "https://www.loc.gov/collections",collection = "united-states-reports",json_parameter = "fo=json",results_per_page = "c=150",query_param = "?",page_param ="sp=",page_num = 1,condition =True):
while condition ==True:
search_results_page_object = create_search_results_page_object(base_url,collection,json_parameter,results_per_page,query_param,page_param,page_num)
if search_results_page_object.next_url != None:
condition =True
page_num = page_num + 1
yield search_results_page_object
else:
condition = False
yield search_results_page_object
def create_search_results_page_object(base_url = "https://www.loc.gov/collections",collection = "united-states-reports",json_parameter = "fo=json",results_per_page = "c=150",query_param = "?",page_param ="sp=",page_num = 1,column_lookup_table = {}):
#search = search_results(base_url,collection,json_parameter,results_per_page,query_param,page_param,page_num)
#pprint(search.search_url)
return search_results_page(base_url,collection,json_parameter,results_per_page,query_param,page_param,page_num)
def create_google_drive_object(google_creds):
drive_service_object = google_drive(google_creds)
return drive_service_object
def create_google_sheet_object(google_creds):
sheet_service_object = google_sheet(google_creds)
return sheet_service_object
def create_new_google_sheet(google_drive_object,folder_id,title):
sheet_meta_data = google_drive_object.add_spreadsheet_to_folder(folder_id, title)
return sheet_meta_data
def flatten_result(result_json):
flat_json = flatten(result_json)
return flat_json
def main():
search_result = create_search_results_page_object()
pprint(search_result.response_json['results'][0])
#flatten_result(search_result.response_json['results'][0])
#pprint(search_result.response_json['results'][0])
#config = create_config_object()
#google_credentials_object = create_google_credentials_object()
#drive_service_object = create_google_drive_object(google_credentials_object.creds)
#sheets_service_object = create_google_sheet_object(google_credentials_object.creds)
#drive_service_object.test()
#sheet_meta_data = create_new_google_sheet(drive_service_object,config.data['google']['output_folder_id'],'testing')
#pprint(search_url.base_url)
if __name__ == "__main__":
main()
| 33.555263
| 249
| 0.643479
|
3690d63070a5c31e459fb7abc66d13c03767835f
| 1,715
|
py
|
Python
|
lte/gateway/python/integ_tests/s1aptests/test_attach_dl_udp_data.py
|
remo5000/magma
|
1d1dd9a23800a8e07b1ce016776d93e12430ec15
|
[
"BSD-3-Clause"
] | 3
|
2019-08-16T17:03:09.000Z
|
2019-08-23T21:57:48.000Z
|
lte/gateway/python/integ_tests/s1aptests/test_attach_dl_udp_data.py
|
remo5000/magma
|
1d1dd9a23800a8e07b1ce016776d93e12430ec15
|
[
"BSD-3-Clause"
] | 14
|
2019-11-15T12:01:18.000Z
|
2019-12-12T14:37:42.000Z
|
lte/gateway/python/integ_tests/s1aptests/test_attach_dl_udp_data.py
|
119Vik/magma-1
|
107a7b374466a837fc0a49b283ba9d6ff1d702e3
|
[
"BSD-3-Clause"
] | 3
|
2019-11-15T15:56:25.000Z
|
2019-11-21T10:34:59.000Z
|
"""
Copyright (c) 2017-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import unittest
import s1ap_types
import s1ap_wrapper
class TestAttachDlUdpData(unittest.TestCase):
def setUp(self):
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
def tearDown(self):
self._s1ap_wrapper.cleanup()
def test_attach_dl_udp_data(self):
""" Attach and send DL UDP data """
self._s1ap_wrapper.configUEDevice(1)
req = self._s1ap_wrapper.ue_req
print("************************* Running End to End attach for ",
"UE id ", req.ue_id)
# Now actually complete the attach
self._s1ap_wrapper._s1_util.attach(
req.ue_id, s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t)
# Wait on EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
print("************************* Running UE downlink (UDP) for UE id ",
req.ue_id)
with self._s1ap_wrapper.configDownlinkTest(
req, duration=1, is_udp=True) as test:
test.verify()
print("************************* Running UE detach for UE id ",
req.ue_id)
# Now detach the UE
self._s1ap_wrapper.s1_util.detach(
req.ue_id, s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value,
True)
if __name__ == "__main__":
unittest.main()
| 31.181818
| 79
| 0.631487
|
dafdab436a1a7a71bf8966cda11df784acbba9b6
| 285
|
py
|
Python
|
ipymaterialui/_version.py
|
trungleduc/ipymaterialui
|
9ced90551d0ee007e4a968d1f35156bdb1a56805
|
[
"MIT"
] | 81
|
2018-11-23T22:03:11.000Z
|
2022-03-14T11:04:04.000Z
|
ipymaterialui/_version.py
|
trungleduc/ipymaterialui
|
9ced90551d0ee007e4a968d1f35156bdb1a56805
|
[
"MIT"
] | 16
|
2018-11-26T18:23:46.000Z
|
2022-01-22T03:12:11.000Z
|
ipymaterialui/_version.py
|
trungleduc/ipymaterialui
|
9ced90551d0ee007e4a968d1f35156bdb1a56805
|
[
"MIT"
] | 17
|
2019-01-16T15:19:54.000Z
|
2022-01-02T18:32:09.000Z
|
version_info = (0, 1, 4, 'final')
_specifier_ = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc', 'final': ''}
__version__ = '%s.%s.%s%s' % (
version_info[0], version_info[1], version_info[2],
'' if version_info[3] == 'final' else _specifier_[version_info[3]]+str(version_info[4]))
| 35.625
| 90
| 0.617544
|
f3aca903aec347756c32ee5e9796a2917a798134
| 2,130
|
py
|
Python
|
alipay/aop/api/response/AlipayMarketingActivityDeliveryCreateResponse.py
|
Anning01/alipay-sdk-python-all
|
2adff354483ab7c2e615d6ad646d0b75c9c7803d
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/response/AlipayMarketingActivityDeliveryCreateResponse.py
|
Anning01/alipay-sdk-python-all
|
2adff354483ab7c2e615d6ad646d0b75c9c7803d
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/response/AlipayMarketingActivityDeliveryCreateResponse.py
|
Anning01/alipay-sdk-python-all
|
2adff354483ab7c2e615d6ad646d0b75c9c7803d
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.ErrorDeliveryConfig import ErrorDeliveryConfig
from alipay.aop.api.domain.SuccessDeliveryConfig import SuccessDeliveryConfig
class AlipayMarketingActivityDeliveryCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayMarketingActivityDeliveryCreateResponse, self).__init__()
self._error_delivery_config_list = None
self._success_delivery_config_list = None
@property
def error_delivery_config_list(self):
return self._error_delivery_config_list
@error_delivery_config_list.setter
def error_delivery_config_list(self, value):
if isinstance(value, list):
self._error_delivery_config_list = list()
for i in value:
if isinstance(i, ErrorDeliveryConfig):
self._error_delivery_config_list.append(i)
else:
self._error_delivery_config_list.append(ErrorDeliveryConfig.from_alipay_dict(i))
@property
def success_delivery_config_list(self):
return self._success_delivery_config_list
@success_delivery_config_list.setter
def success_delivery_config_list(self, value):
if isinstance(value, list):
self._success_delivery_config_list = list()
for i in value:
if isinstance(i, SuccessDeliveryConfig):
self._success_delivery_config_list.append(i)
else:
self._success_delivery_config_list.append(SuccessDeliveryConfig.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(AlipayMarketingActivityDeliveryCreateResponse, self).parse_response_content(response_content)
if 'error_delivery_config_list' in response:
self.error_delivery_config_list = response['error_delivery_config_list']
if 'success_delivery_config_list' in response:
self.success_delivery_config_list = response['success_delivery_config_list']
| 42.6
| 118
| 0.722535
|
0cb926b27c1f1fe0b0a98abc4dce32bddb95c12d
| 17,619
|
py
|
Python
|
scripts/Sorts.py
|
blackb0x3/Edward
|
fae35c2bad13a19c7bfb2f0d5d9810762169a086
|
[
"MIT"
] | 2
|
2019-01-08T00:32:10.000Z
|
2019-06-24T14:29:24.000Z
|
scripts/Sorts.py
|
lukebarker3/Edward
|
fae35c2bad13a19c7bfb2f0d5d9810762169a086
|
[
"MIT"
] | 5
|
2021-03-09T01:31:15.000Z
|
2022-02-17T20:41:27.000Z
|
scripts/Sorts.py
|
lukebarker3/Edward
|
fae35c2bad13a19c7bfb2f0d5d9810762169a086
|
[
"MIT"
] | null | null | null |
from scripts.Algorithm import Algorithm, AlgorithmError
from models.Stack import Stack
import numpy as np
class Sort(Algorithm):
"""
Base class for algorithms which sort 1-dimensional lists.
"""
def generate_collection(self, *args, **kwargs):
"""
Generates a list for a sorting algorithm.
:param args: Ordered list of args.
:param kwargs: Keyword args.
:return: The generated collection.
"""
min = kwargs.get('min', 1)
max = kwargs.get('max', 1000)
size = kwargs.get('size', 10)
coll = [int(v) for v in np.random.choice(range(min, max + 1), size)]
shuffles = 5
# shuffle collection 5 times using fisher yates
while shuffles > 0:
s = size
while (s > 0):
s = s - 1
i = int(np.floor(np.random.random() * s) - 1)
if i < 0:
i = 0
temp = coll[s]
coll[s] = coll[i]
coll[i] = temp
shuffles -= 1
self.oldcollection = list(coll)
def collection_is_valid(self):
"""
Determines if the collection is valid for this algorithm.
In this case, a list.
:return: True if the collection is a list, False otherwise.
"""
return isinstance(self.oldcollection, list)
def has_worked(self):
"""
Determines if the sorting algorithm worked correctly as intended.
:raise: AlgorithmError if the collection wasn't sorted correctly.
:return: True if the collection was sorted correctly.
"""
if self._is_sorted() is False:
raise AlgorithmError("The algorithm did not sort the collection correctly.")
return True
def _is_sorted(self, desc=False):
"""
Determines if a collection has been sorted. Default is ascending order.
:param desc: Checks collection is sorted in descending order.
:return: True if collection is sorted in the specified order, false otherwise.
"""
if desc is True:
return all(self.newcollection[i] >= self.newcollection[i + 1] for i in range(len(self.newcollection) - 1))
else:
return all(self.newcollection[i] <= self.newcollection[i + 1] for i in range(len(self.newcollection) - 1))
def execute(self):
"""
Executes this algorithm's steps on the provided collection.
"""
raise NotImplementedError("Please use a specific sort algorithm's execute() function.")
@staticmethod
def metadata():
"""
Returns the algorithm's metadata - space complexity, time complexity, algorithm description etc.
"""
raise NotImplementedError("Please use a specific sort algorithm's metadata() function.")
class InsertionSort(Sort):
name = "Insertion Sort"
description = """An in-place, comparison-based sorting algorithm. It sorts array by shifting elements one by one and inserting the right element at the right position."""
steps = ["First Step", "Second Step", "Finally...", "Done"]
best_case = "O(n) comparisons, O(1) swaps"
average_case = "O(n<sup>2</sup>) comparisons, O(n<sup>2</sup>) swaps"
worst_case = "O(n<sup>2</sup>) comparisons, O(n<sup>2</sup>) swaps"
def execute(self):
"""
Sorts a collection by using the insertion sort algorithm.
"""
length = len(self.newcollection)
for i in range(1, length):
key = self.newcollection[i]
j = i - 1
while j >= 0 and key < self.newcollection[j]:
self.newcollection[j + 1] = self.newcollection[j]
j = j - 1
self.newcollection[j + 1] = key
@staticmethod
def metadata():
return {
"name" : InsertionSort.name,
"description" : InsertionSort.description,
"steps" : InsertionSort.steps,
"best_case" : InsertionSort.best_case,
"average_case": InsertionSort.average_case,
"worst_case" : InsertionSort.worst_case
}
class TraditionalBubbleSort(Sort):
def execute(self):
"""
Sorts a collection by using the traditional bubble sort algorithm.
"""
length = len(self.newcollection)
for i in range(length):
for j in range(length - i - 1):
if self.newcollection[j] > self.newcollection[j + 1]:
temp = self.newcollection[j]
self.newcollection[j] = self.newcollection[j + 1]
self.newcollection[j + 1] = temp
@staticmethod
def metadata():
return {}
class OptimisedBubbleSort(Sort):
def execute(self):
"""
Sorts a collection by using the optimised bubble sort algorithm.
"""
length = len(self.newcollection)
for i in range(length):
swapped = False
for j in range(0, length - i - 1):
if self.newcollection[j] > self.newcollection[j + 1]:
temp = self.newcollection[j]
self.newcollection[j] = self.newcollection[j + 1]
self.newcollection[j + 1] = temp
swapped = True
if not swapped:
break
@staticmethod
def metadata():
return {}
class SelectionSort(Sort):
def execute(self):
"""
Sorts a collection using the selection sort algorithm.
"""
length = len(self.newcollection)
for i in range(length):
first = i
for j in range(i + 1, length):
if self.newcollection[first] > self.newcollection[j]:
first = j
temp = self.newcollection[i]
self.newcollection[i] = self.newcollection[first]
self.newcollection[first] = temp
@staticmethod
def metadata():
return {}
class QuickSort(Sort):
def partition(self, low, high):
"""
Sorts current partition
:param low: lowest index of current partition
:param high: highest index of current partition
:return: sorted partition
"""
index = low - 1
pivot = self.newcollection[high]
for i in range(low, high):
if self.newcollection[i] <= pivot:
# smaller element's index incremented
i += 1
temp = self.newcollection[index]
self.newcollection[index] = self.newcollection[i]
self.newcollection[i] = temp
temp = self.newcollection[index + 1]
self.newcollection[index + 1] = self.newcollection[high]
self.newcollection[high] = temp
return index + 1
class RecursiveQuickSort(QuickSort):
def execute(self):
"""
Sorts a collection using the recursive version of the quicksort algorithm.
"""
self.doIt(0, len(self.newcollection) - 1)
def doIt(self, low, high):
"""
Actually sorts the collection.
:param low: low index of current partition
:param high: high index of current partition
:return: sorted array
"""
if low < high:
pivot = self.partition(low, high)
self.doIt(low, pivot - 1)
self.doIt(pivot + 1, high)
@staticmethod
def metadata():
return {}
class IterativeQuickSort(QuickSort):
def execute(self):
"""
Sorts a collection using the iterative version of the quicksort algorithm.
"""
# Create alternate stack
size = len(self.newcollection)
stack = Stack()
# push initial values
stack.push(0, size - 1)
# keep popping from stack if it is not empty
while stack.pointer >= 0:
# pop first and last index of partition
high = stack.pop()
low = stack.pop()
# set pivot to it's correct position in order to sort array
p = self.partition(low, high)
if p - 1 > low:
stack.push(low, p - 1)
if p + 1 < high:
stack.push(p + 1, high)
return None
@staticmethod
def metadata():
return {}
class MergeSort(Sort):
description = """"""
steps = []
best_case = "O(n log n)"
average_case = "O(n log n)"
worst_case = "O(n log n)"
@staticmethod
def metadata():
return {
"description": MergeSort.description,
"steps": dict(list(enumerate(MergeSort.steps, start=1))),
"best_case": MergeSort.best_case,
"worst_case": MergeSort.worst_case,
"average_case": MergeSort.average_case
}
class TopDownMergeSort(MergeSort):
def execute(self):
"""
Sorts a collection using the top-down implementation (i.e. using recursion) of the merge sort.
:return: The sorted collection.
"""
self.newcollection = self.perform_sort(self.newcollection)
def perform_sort(self, collection):
"""
Actually performs the sorting algorithm on the provided collection.
:param collection: The collection to be sorted.
:return: The sorted collection, after merging is completed.
"""
size = len(collection)
if size <= 1:
return None
else:
left = list()
right = list()
for i, x in enumerate(collection):
if i < size / 2:
left.append(x)
else:
right.append(x)
left = self.perform_sort(left)
right = self.perform_sort(left)
return self.merge(left, right)
def merge(self, left, right):
"""
Merges two sublists together and returns the ordered union of the two lists.
:param left: The first sublist.
:param right: The second sublist.
:return: The merged collection.
"""
result = list()
while len(left) > 0 and len(right) > 0:
if left[0] <= right[0]:
result.append(left[0])
left.pop(0)
else:
result.append(right[0])
right.pop(0)
return result
class BottomUpMergeSort(MergeSort):
def execute(self):
"""
Sorts a collection using the bottom-up implementation (i.e. using iteration) of the merge sort.
:return: The sorted list.
"""
current_size = 1
while current_size < len(self.newcollection) - 1:
left = 0
while left < len(self.newcollection) - 1:
mid = left + current_size - 1
right = ((2 * current_size + left - 1, len(self.newcollection) - 1)[2 * current_size + left - 1 > len(self.newcollection)-1])
self.merge(left, mid, right)
left = left + (current_size * 2)
current_size *= 2
def merge(self, left, mid, right):
"""
Merges all sublists together to form the sorted array.
:param left: Lower bound.
:param mid: Middle value.
:param right: Upper bound.
:return: A merged collection.
"""
n1 = mid - left + 1
n2 = right - mid
L = [0] * n1
R = [0] * n2
for i in range(0, n1):
L[i] = self.newcollection[left + i]
for i in range(0, n2):
R[i] = self.newcollection[mid + i + 1]
i, j, k = 0, 0, left
while i < n1 and j < n2:
if L[i] > R[j]:
self.newcollection[k] = R[j]
j += 1
else:
self.newcollection[k] = L[i]
i += 1
k += 1
while i < n1:
self.newcollection[k] = L[i]
i += 1
k += 1
while j < n2:
self.newcollection[k] = R[j]
j += 1
k += 1
class HeapSort(Sort):
description = """"""
steps = []
best_case = ""
average_case = ""
worst_case = ""
@staticmethod
def metadata():
return {
"description": HeapSort.description,
"steps": dict(list(enumerate(HeapSort.steps, start=1))),
"best_case": HeapSort.best_case,
"worst_case": HeapSort.worst_case,
"average_case": HeapSort.average_case
}
def execute(self):
"""
Executes the heap sort algorithm on the provided collection.
:return: The sorted collection.
"""
size = len(self.newcollection)
for i in range(size, -1, -1):
self.heapify(self.newcollection, size, i)
for i in range(size - 1, 0, -1):
self.newcollection[i], self.newcollection[0] = self.newcollection[0], self.newcollection[i]
self.heapify(self.newcollection, i, 0)
def heapify(self, collection, heap_size, root_index):
"""
Creates a max-heap from the provided collection.
:param collection: The collection to transform into a max-heap.
:param heap_size: The size of the heap.
:param root_index: The root index of the subtree to be heapified.
:return: The generated max-heap.
"""
largest = root_index
left = 2 * root_index + 1
right = 2 * root_index + 2
if left < heap_size and collection[root_index] < collection[left]:
largest = left
if right < heap_size and collection[largest] < collection[right]:
largest = right
if largest != root_index:
collection[root_index], collection[largest] = collection[largest], collection[root_index] # swap
self.heapify(collection, heap_size, largest)
class ShellSort(Sort):
description = """"""
steps = []
best_case = ""
average_case = ""
worst_case = ""
@staticmethod
def metadata():
return {
"description": ShellSort.description,
"steps": dict(list(enumerate(ShellSort.steps, start=1))),
"best_case": ShellSort.best_case,
"worst_case": ShellSort.worst_case,
"average_case": ShellSort.average_case
}
def execute(self):
"""
Executes the shell sort algorithm on the provided collection.
:return: The sorted collection.
"""
size = len(self.newcollection)
gap = size / 2
while gap > 0:
for i in range(gap, size):
temp = self.newcollection[i]
j = i
# shift earlier gap-sorted elements up until the correct
# location is found
while j >= gap and self.newcollection[j - gap] > temp:
self.newcollection[j] = self.newcollection[j - gap]
j -= gap
# original element is now in its correct location
self.newcollection[j] = temp
gap /= 2
class CountingSort(Sort):
description = """"""
steps = []
best_case = ""
average_case = ""
worst_case = ""
@staticmethod
def metadata():
return {
"description": CountingSort.description,
"steps": dict(list(enumerate(CountingSort.steps, start=1))),
"best_case": CountingSort.best_case,
"worst_case": CountingSort.worst_case,
"average_case": CountingSort.average_case
}
def execute(self):
"""
Executes the counting sort algorithm on the provided collection.
:return: The sorted collection.
"""
size = len(self.newcollection)
output = [0] * size
k = max(self.newcollection)
count = [0] * k
for item in self.newcollection:
count[item] += 1
total = 0
for i in range(k):
old_count = count[i]
count[i] = total
total += old_count
for item in self.newcollection:
output[count[item]] = item
count[item] += 1
self.newcollection = output
class BucketSort(Sort):
description = """"""
steps = []
best_case = ""
average_case = ""
worst_case = ""
@staticmethod
def metadata():
return {
"description": BucketSort.description,
"steps": dict(list(enumerate(BucketSort.steps, start=1))),
"best_case": BucketSort.best_case,
"worst_case": BucketSort.worst_case,
"average_case": BucketSort.average_case
}
def execute(self):
"""
Executes the bucket sort algorithm on the provided collection.
:return: The sorted collection.
"""
buckets = list()
max_val = max(self.newcollection)
size = len(self.newcollection)
spread = max_val / size
current_min = 0
current_max = spread
while current_max < max_val:
bucket = list()
for item in self.newcollection.copy():
if item >= current_min and item <= current_max:
bucket.append(item)
self.newcollection.remove(item)
current_min = current_max + 1
current_max += spread
buckets.append(bucket)
for bucket in buckets:
bucket.sort() # can't be arsed to recurse over every bucket
self.newcollection += bucket
| 28.602273
| 174
| 0.548783
|
667fb44da65f1ed89e8088be797743dc6c224cf7
| 652
|
py
|
Python
|
sql/exec.py
|
a-was/flask-template
|
42403b5099a6b54c2f0c0884998b89bdd67258ce
|
[
"MIT"
] | null | null | null |
sql/exec.py
|
a-was/flask-template
|
42403b5099a6b54c2f0c0884998b89bdd67258ce
|
[
"MIT"
] | null | null | null |
sql/exec.py
|
a-was/flask-template
|
42403b5099a6b54c2f0c0884998b89bdd67258ce
|
[
"MIT"
] | null | null | null |
import os
import sqlite3
DB_FILE = '../database.sqlite3'
db = sqlite3.connect(DB_FILE)
sql_files = [f for f in os.listdir('.') if f.endswith('.sql')]
for sql_file in sql_files:
with open(sql_file) as f:
for command in f.read().split(';'):
try:
db.execute(command)
except sqlite3.IntegrityError:
print('WARNING - This command cannot be executed due to IntegrityError - {}'.format(command.strip()))
except sqlite3.OperationalError:
print('ERROR - This command raised OperationalError - {}'.format(command.strip()))
db.commit()
db.close()
| 34.315789
| 118
| 0.605828
|
7cc56b545086a456e747d71b894d38001d92b6e1
| 13,912
|
py
|
Python
|
tests/test_uri_templates.py
|
astonm/falcon
|
90d0cd6c69b527a666912e0d0752cf303c134a0c
|
[
"Apache-2.0"
] | 1
|
2018-10-07T21:06:10.000Z
|
2018-10-07T21:06:10.000Z
|
tests/test_uri_templates.py
|
astonm/falcon
|
90d0cd6c69b527a666912e0d0752cf303c134a0c
|
[
"Apache-2.0"
] | null | null | null |
tests/test_uri_templates.py
|
astonm/falcon
|
90d0cd6c69b527a666912e0d0752cf303c134a0c
|
[
"Apache-2.0"
] | null | null | null |
"""Application tests for URI templates using simulate_get().
These tests differ from those in test_default_router in that they are
a collection of sanity-checks that exercise the full framework code
path via simulate_get(), vs. probing the router directly.
"""
from datetime import datetime
import uuid
import pytest
import six
import falcon
from falcon import testing
from falcon.routing.util import SuffixedMethodNotFoundError
_TEST_UUID = uuid.uuid4()
_TEST_UUID_2 = uuid.uuid4()
_TEST_UUID_STR = str(_TEST_UUID)
_TEST_UUID_STR_2 = str(_TEST_UUID_2)
_TEST_UUID_STR_SANS_HYPHENS = _TEST_UUID_STR.replace('-', '')
class IDResource(object):
def __init__(self):
self.id = None
self.name = None
self.called = False
def on_get(self, req, resp, id):
self.id = id
self.called = True
self.req = req
class NameResource(object):
def __init__(self):
self.id = None
self.name = None
self.called = False
def on_get(self, req, resp, id, name):
self.id = id
self.name = name
self.called = True
class NameAndDigitResource(object):
def __init__(self):
self.id = None
self.name51 = None
self.called = False
def on_get(self, req, resp, id, name51):
self.id = id
self.name51 = name51
self.called = True
class FileResource(object):
def __init__(self):
self.file_id = None
self.called = False
def on_get(self, req, resp, file_id):
self.file_id = file_id
self.called = True
class FileDetailsResource(object):
def __init__(self):
self.file_id = None
self.ext = None
self.called = False
def on_get(self, req, resp, file_id, ext):
self.file_id = file_id
self.ext = ext
self.called = True
class ResourceWithSuffixRoutes(object):
def __init__(self):
self.get_called = False
self.post_called = False
self.put_called = False
def on_get(self, req, resp, collection_id, item_id):
self.collection_id = collection_id
self.item_id = item_id
self.get_called = True
def on_post(self, req, resp, collection_id, item_id):
self.collection_id = collection_id
self.item_id = item_id
self.post_called = True
def on_put(self, req, resp, collection_id, item_id):
self.collection_id = collection_id
self.item_id = item_id
self.put_called = True
def on_get_collection(self, req, resp, collection_id):
self.collection_id = collection_id
self.get_called = True
def on_post_collection(self, req, resp, collection_id):
self.collection_id = collection_id
self.post_called = True
def on_put_collection(self, req, resp, collection_id):
self.collection_id = collection_id
self.put_called = True
@pytest.fixture
def resource():
return testing.SimpleTestResource()
@pytest.fixture
def client():
return testing.TestClient(falcon.API())
def test_root_path(client, resource):
client.app.add_route('/', resource)
client.simulate_get('/')
assert resource.called
def test_no_vars(client, resource):
client.app.add_route('/hello/world', resource)
client.simulate_get('/hello/world')
assert resource.called
@pytest.mark.skipif(six.PY3, reason='Test only applies to Python 2')
def test_unicode_literal_routes(client, resource):
client.app.add_route(u'/hello/world', resource)
client.simulate_get('/hello/world')
assert resource.called
def test_special_chars(client, resource):
client.app.add_route('/hello/world.json', resource)
client.app.add_route('/hello(world)', resource)
client.simulate_get('/hello/world_json')
assert not resource.called
client.simulate_get('/helloworld')
assert not resource.called
client.simulate_get('/hello/world.json')
assert resource.called
client.simulate_get('/hello(world)')
assert resource.called
@pytest.mark.parametrize('field_name', [
'id',
'id123',
'widget_id',
])
def test_single(client, resource, field_name):
template = '/widgets/{{{}}}'.format(field_name)
client.app.add_route(template, resource)
client.simulate_get('/widgets/123')
assert resource.called
assert resource.captured_kwargs[field_name] == '123'
@pytest.mark.parametrize('uri_template,', [
'/{id:int}',
'/{id:int(3)}',
'/{id:int(min=123)}',
'/{id:int(min=123, max=123)}',
])
def test_int_converter(client, uri_template):
resource1 = IDResource()
client.app.add_route(uri_template, resource1)
result = client.simulate_get('/123')
assert result.status_code == 200
assert resource1.called
assert resource1.id == 123
assert resource1.req.path == '/123'
@pytest.mark.parametrize('uri_template,', [
'/{id:int(2)}',
'/{id:int(min=124)}',
'/{id:int(num_digits=3, max=100)}',
])
def test_int_converter_rejections(client, uri_template):
resource1 = IDResource()
client.app.add_route(uri_template, resource1)
result = client.simulate_get('/123')
assert result.status_code == 404
assert not resource1.called
@pytest.mark.parametrize('uri_template, path, dt_expected', [
(
'/{start_year:int}-to-{timestamp:dt}',
'/1961-to-1969-07-21T02:56:00Z',
datetime(1969, 7, 21, 2, 56, 0)
),
(
'/{start_year:int}-to-{timestamp:dt("%Y-%m-%d")}',
'/1961-to-1969-07-21',
datetime(1969, 7, 21)
),
(
'/{start_year:int}/{timestamp:dt("%Y-%m-%d %H:%M")}',
'/1961/1969-07-21 14:30',
datetime(1969, 7, 21, 14, 30)
),
(
'/{start_year:int}-to-{timestamp:dt("%Y-%m")}',
'/1961-to-1969-07-21',
None
),
])
def test_datetime_converter(client, resource, uri_template, path, dt_expected):
client.app.add_route(uri_template, resource)
result = client.simulate_get(path)
if dt_expected is None:
assert result.status_code == 404
assert not resource.called
else:
assert result.status_code == 200
assert resource.called
assert resource.captured_kwargs['start_year'] == 1961
assert resource.captured_kwargs['timestamp'] == dt_expected
@pytest.mark.parametrize('uri_template, path, expected', [
(
'/widgets/{widget_id:uuid}',
'/widgets/' + _TEST_UUID_STR,
{'widget_id': _TEST_UUID}
),
(
'/widgets/{widget_id:uuid}/orders',
'/widgets/' + _TEST_UUID_STR_SANS_HYPHENS + '/orders',
{'widget_id': _TEST_UUID}
),
(
'/versions/diff/{left:uuid()}...{right:uuid()}',
'/versions/diff/{}...{}'.format(_TEST_UUID_STR, _TEST_UUID_STR_2),
{'left': _TEST_UUID, 'right': _TEST_UUID_2, }
),
(
'/versions/diff/{left:uuid}...{right:uuid()}',
'/versions/diff/{}...{}'.format(_TEST_UUID_STR, _TEST_UUID_STR_2),
{'left': _TEST_UUID, 'right': _TEST_UUID_2, }
),
(
'/versions/diff/{left:uuid()}...{right:uuid}',
'/versions/diff/{}...{}'.format(_TEST_UUID_STR, _TEST_UUID_STR_2),
{'left': _TEST_UUID, 'right': _TEST_UUID_2, }
),
(
'/widgets/{widget_id:uuid}/orders',
'/widgets/' + _TEST_UUID_STR_SANS_HYPHENS[:-1] + '/orders',
None
),
])
def test_uuid_converter(client, resource, uri_template, path, expected):
client.app.add_route(uri_template, resource)
result = client.simulate_get(path)
if expected is None:
assert result.status_code == 404
assert not resource.called
else:
assert result.status_code == 200
assert resource.called
assert resource.captured_kwargs == expected
def test_uuid_converter_complex_segment(client, resource):
client.app.add_route('/pages/{first:uuid}...{last:uuid}', resource)
first_uuid = uuid.uuid4()
last_uuid = uuid.uuid4()
result = client.simulate_get('/pages/{}...{}'.format(
first_uuid,
last_uuid
))
assert result.status_code == 200
assert resource.called
assert resource.captured_kwargs['first'] == first_uuid
assert resource.captured_kwargs['last'] == last_uuid
@pytest.mark.parametrize('uri_template, path, expected', [
(
'/{food:spam}',
'/something',
{'food': 'spam!'}
),
(
'/{food:spam(")")}:{food_too:spam("()")}',
'/bacon:eggs',
{'food': 'spam!', 'food_too': 'spam!'}
),
(
'/({food:spam()}){food_too:spam("()")}',
'/(bacon)eggs',
{'food': 'spam!', 'food_too': 'spam!'}
),
])
def test_converter_custom(client, resource, uri_template, path, expected):
class SpamConverter(object):
def __init__(self, useless_text=None):
pass
def convert(self, fragment):
return 'spam!'
client.app.router_options.converters['spam'] = SpamConverter
client.app.add_route(uri_template, resource)
result = client.simulate_get(path)
assert result.status_code == 200
assert resource.called
assert resource.captured_kwargs == expected
def test_single_trailing_slash(client):
resource1 = IDResource()
client.app.add_route('/1/{id}/', resource1)
result = client.simulate_get('/1/123')
assert result.status == falcon.HTTP_200
assert resource1.called
assert resource1.id == '123'
assert resource1.req.path == '/1/123'
resource2 = IDResource()
client.app.add_route('/2/{id}/', resource2)
result = client.simulate_get('/2/123/')
assert result.status == falcon.HTTP_404
assert not resource2.called
assert resource2.id is None
resource3 = IDResource()
client.app.add_route('/3/{id}/', resource3)
client.app.req_options.strip_url_path_trailing_slash = True
result = client.simulate_get('/3/123/')
assert result.status == falcon.HTTP_200
assert resource3.called
assert resource3.id == '123'
assert resource3.req.path == '/3/123'
resource4 = IDResource()
client.app.add_route('/4/{id}', resource4)
client.app.req_options.strip_url_path_trailing_slash = False
result = client.simulate_get('/4/123/')
assert result.status == falcon.HTTP_404
assert not resource4.called
assert resource4.id is None
def test_multiple(client):
resource = NameResource()
client.app.add_route('/messages/{id}/names/{name}', resource)
test_id = 'bfb54d43-219b-4336-a623-6172f920592e'
test_name = '758e3922-dd6d-4007-a589-50fba0789365'
path = '/messages/' + test_id + '/names/' + test_name
client.simulate_get(path)
assert resource.called
assert resource.id == test_id
assert resource.name == test_name
@pytest.mark.parametrize('uri_template', [
'//',
'//begin',
'/end//',
'/in//side',
])
def test_empty_path_component(client, resource, uri_template):
with pytest.raises(ValueError):
client.app.add_route(uri_template, resource)
@pytest.mark.parametrize('uri_template', [
'',
'no',
'no/leading_slash',
])
def test_relative_path(client, resource, uri_template):
with pytest.raises(ValueError):
client.app.add_route(uri_template, resource)
@pytest.mark.parametrize('reverse', [True, False])
def test_same_level_complex_var(client, reverse):
file_resource = FileResource()
details_resource = FileDetailsResource()
routes = [
('/files/{file_id}', file_resource),
('/files/{file_id}.{ext}', details_resource)
]
if reverse:
routes.reverse()
for uri_template, resource in routes:
client.app.add_route(uri_template, resource)
file_id_1 = 'bc6b201d-b449-4290-a061-8eeb9f7b1450'
file_id_2 = '33b7f34c-6ee6-40e6-89a3-742a69b59de0'
ext = 'a4581b95-bc36-4c08-a3c2-23ba266abdf2'
path_1 = '/files/' + file_id_1
path_2 = '/files/' + file_id_2 + '.' + ext
client.simulate_get(path_1)
assert file_resource.called
assert file_resource.file_id == file_id_1
client.simulate_get(path_2)
assert details_resource.called
assert details_resource.file_id == file_id_2
assert details_resource.ext == ext
def test_adding_suffix_routes(client):
resource_with_suffix_routes = ResourceWithSuffixRoutes()
client.app.add_route(
'/collections/{collection_id}/items/{item_id}', resource_with_suffix_routes)
client.app.add_route(
'/collections/{collection_id}/items', resource_with_suffix_routes, suffix='collection')
# GET
client.simulate_get('/collections/123/items/456')
assert resource_with_suffix_routes.collection_id == '123'
assert resource_with_suffix_routes.item_id == '456'
assert resource_with_suffix_routes.get_called
client.simulate_get('/collections/foo/items')
assert resource_with_suffix_routes.collection_id == 'foo'
# POST
client.simulate_post('/collections/foo234/items/foo456')
assert resource_with_suffix_routes.collection_id == 'foo234'
assert resource_with_suffix_routes.item_id == 'foo456'
assert resource_with_suffix_routes.post_called
client.simulate_post('/collections/foo123/items')
assert resource_with_suffix_routes.collection_id == 'foo123'
# PUT
client.simulate_put('/collections/foo345/items/foo567')
assert resource_with_suffix_routes.collection_id == 'foo345'
assert resource_with_suffix_routes.item_id == 'foo567'
assert resource_with_suffix_routes.put_called
client.simulate_put('/collections/foo321/items')
assert resource_with_suffix_routes.collection_id == 'foo321'
def test_custom_error_on_suffix_route_not_found(client):
resource_with_suffix_routes = ResourceWithSuffixRoutes()
with pytest.raises(SuffixedMethodNotFoundError):
client.app.add_route(
'/collections/{collection_id}/items', resource_with_suffix_routes, suffix='bad-alt')
| 28.923077
| 96
| 0.663959
|
8892a312c25a77c510e2f5bcdda5dfccb726a542
| 808
|
py
|
Python
|
samples/findings/create_occurrence_with_context.py
|
prince737/security-advisor-sdk-python
|
a06f6fe8180377a6ca8291ba74cff326cb56b539
|
[
"Apache-2.0"
] | null | null | null |
samples/findings/create_occurrence_with_context.py
|
prince737/security-advisor-sdk-python
|
a06f6fe8180377a6ca8291ba74cff326cb56b539
|
[
"Apache-2.0"
] | 17
|
2020-05-30T11:21:06.000Z
|
2021-04-20T10:01:09.000Z
|
samples/findings/create_occurrence_with_context.py
|
prince737/security-advisor-sdk-python
|
a06f6fe8180377a6ca8291ba74cff326cb56b539
|
[
"Apache-2.0"
] | 4
|
2020-05-18T12:38:03.000Z
|
2021-04-20T07:13:47.000Z
|
from ibm_cloud_security_advisor import FindingsApiV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
authenticator = IAMAuthenticator(apikey='abc')
findings_service =FindingsApiV1(authenticator=authenticator)
findings_service.set_service_url("https://us-south.secadvisor.cloud.ibm.com/findings")
response = findings_service.create_occurrence(
account_id="abc123",
provider_id="sdktest",
note_name="abc123/providers/sdktest/notes/sdk_note_id1",
kind="FINDING",
id="sdk_occ_id1",
context = {
"region": "us-south",
"resource_type": "Cluster",
"service_name": "Kubernetes Cluster",
"account_id": "abc123"
},
finding={
"severity": "LOW",
"next_steps": [
{
"title": "string",
"url": "string"
}
]
}
)
print(response)
| 25.25
| 86
| 0.699257
|
15677836385fd481bf78ef94bf5eb5c540aa0f25
| 4,886
|
py
|
Python
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Codec/RawYUVFramer.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 12
|
2015-10-20T10:22:01.000Z
|
2021-07-19T10:09:44.000Z
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Codec/RawYUVFramer.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 2
|
2015-10-20T10:22:55.000Z
|
2017-02-13T11:05:25.000Z
|
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Codec/RawYUVFramer.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 6
|
2015-03-09T12:51:59.000Z
|
2020-03-01T13:06:21.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=========================
Raw YUV video data framer
=========================
This component takes a raw stream of YUV video data and breaks it into
invidual frames. It sends them out one at a time, tagged with relevant data
such as the frame size.
Many components that expect uncompressed video require it to be structured into
frames in this way, rather than as a raw stream of continuous data. This
component fulfills that requirement.
Example Usage
-------------
Reading and encoding raw video::
imagesize = (352, 288) # "CIF" size video
Pipeline(ReadFileAdapter("raw352x288video.yuv", ...other args...),
RawYUVFramer(imagesize),
DiracEncoder(preset="CIF"),
).activate()
More Detail
-----------
Receives raw yuv video data, as strings on its "inbox" inbox.
Sends out individual frames packaged in a dictionary::
{
"yuv" : (y_data, u_data, v_data), # a tuple of strings
"size" : (width, height), # in pixels
"pixformat" : "YUV420_planar", # raw video data format
}
The component will terminate if it receives a shutdownMicroprocess or
producerFinished message on its "control" inbox. The message is passed on out of
the "signal" outbox.
"""
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdownMicroprocess
class RawYUVFramer(component):
"""
RawYUVFramer(size,pixformat) -> raw yuv video data framing component
Creates a component that frames a raw stream of YUV video data into frames.
Keyword arguments:
- size -- (width,height) size of a video frame in pixels
- pixformat -- raw video data format (default="YUV420_Planar")
"""
def __init__(self, size, pixformat = "YUV420_planar"):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(RawYUVFramer, self).__init__()
self.size = size
self.pixformat = pixformat
# if pixformat != YUV420_planar
# raise ValueError("Can't handle anything except YUV420_planar at the mo. Sorry!")
ysize = size[0]*size[1]
usize = ysize / 4
vsize = usize
self.planes = { "y":"", "u":"", "v":"" }
self.sizes = { "y":ysize, "u":usize, "v":vsize }
def main(self):
"""Main loop"""
done = False
while not done:
while self.dataReady("inbox"):
raw = self.recv("inbox")
self.packAndSend(raw)
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdownMicroprocess):
self.send(msg, "signal")
done=True
if not done:
self.pause()
yield 1
def flushFrame(self):
"""Send out a frame, flushing buffers"""
frame = { "pixformat":self.pixformat,
"size":self.size,
"yuv":(self.planes['y'], self.planes['u'], self.planes['v'])
}
self.send( frame, "outbox" )
self.planes['y'] = ""
self.planes['u'] = ""
self.planes['v'] = ""
def packAndSend(self, raw):
"""
packAndSend(raw) -> None
Pack incoming raw data into y,u,v planes, and triggers a flush when all
planes are full.
"""
while raw:
filled = False
for plane in ['y','u','v']:
remainder = self.sizes[plane] - len(self.planes[plane])
filled = len(raw) >= remainder
topupsize = min( len(raw), remainder )
if topupsize:
self.planes[plane] += raw[:topupsize]
raw = raw[topupsize:]
if filled:
self.flushFrame()
__kamaelia_components__ = ( RawYUVFramer, )
| 31.522581
| 94
| 0.58289
|
6435733434f639fb8ac729cbe13f584e3d89269e
| 204
|
py
|
Python
|
alfpy/version.py
|
hasibaasma/alfpy
|
c8c0c1300108015746320cede2207ac57e630d3e
|
[
"MIT"
] | 19
|
2017-02-20T17:42:02.000Z
|
2021-12-16T19:07:17.000Z
|
alfpy/version.py
|
eggleader/alfpy
|
e0782e9551458ef17ab29df8af13fc0f8925e894
|
[
"MIT"
] | 3
|
2018-03-12T23:54:27.000Z
|
2020-12-09T21:53:19.000Z
|
alfpy/version.py
|
eggleader/alfpy
|
e0782e9551458ef17ab29df8af13fc0f8925e894
|
[
"MIT"
] | 6
|
2016-12-06T09:12:04.000Z
|
2021-09-24T14:40:47.000Z
|
# I store the version here so:
# 1) I don't load dependencies by storing it in __init__.py
# 2) I can import it in setup.py for the same reason.
# 3) I can import it into any module.
__version__ = '1.0.6'
| 40.8
| 59
| 0.710784
|
ba12e9a710d19ab220b292d6595884a556c7f01d
| 2,156
|
py
|
Python
|
myapp/ules_ops.py
|
CaoJohn/myproject
|
31474a2cecf4694d50acd9c594f26f241fd41048
|
[
"Apache-2.0"
] | null | null | null |
myapp/ules_ops.py
|
CaoJohn/myproject
|
31474a2cecf4694d50acd9c594f26f241fd41048
|
[
"Apache-2.0"
] | null | null | null |
myapp/ules_ops.py
|
CaoJohn/myproject
|
31474a2cecf4694d50acd9c594f26f241fd41048
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.http import JsonResponse
from django.views.decorators.http import require_http_methods
from django.core import serializers
import requests
import json
import sys
import os
import time
import random
from random import choice
import rstr
from selenium import webdriver
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from .ules.operate import transfromList, Factory
from .ules.dbresource import DBsource
from .ules.page import WorkLoginPage
from .models import Book,TestCase,TestCaseStep
# Create your views here.
@require_http_methods(["GET"])
def run_testcase(request):
response = {}
try:
testcasestep = TestCaseStep.objects.filter(case_name=request.GET.get('case_name')).order_by('step_order')
testcase = TestCase.objects.filter(case_name=request.GET.get('case_name'))
response['msg'] = 'success'
response['error_num'] = 0
except Exception as e:
response['msg'] = str(e)
response['error_num'] = 1
print(len(testcasestep))
for i in testcasestep.values():
print(i)
driver = webdriver.Chrome("/usr/local/bin/chromedriver")
# driver.get("https://work-test.1688.com")
driver.get("https://work.1688.com")
time.sleep(3)
print(driver.current_url)
time.sleep(2)
workloginpage = WorkLoginPage(driver)
workloginpage.login()
time.sleep(3)
driver.get(testcase.values()[0]["url"])
args_dics = transfromList(testcasestep.values())
print(args_dics)
time.sleep(2)
factory = Factory()
for args_dic in args_dics:
print(args_dic)
time.sleep(3)
a = factory.getOps(driver, args_dic)
a.operate(args_dic)
#转换为其他日期格式,如:"%Y%m%d_%H_%M_%S"
driver.get_screenshot_as_file('/Users/jiangwen/Documents/John/work/projects/python/django/pics/'+args_dic["step_name"]+time.strftime("%Y%m%d_%H_%M_%S", time.localtime(int(time.time())))+'.png')
time.sleep(10)
driver.close()
return JsonResponse(response)
| 34.774194
| 201
| 0.702226
|
8b80a641d4fc8c353c87017881b3902f01cf852e
| 2,978
|
py
|
Python
|
consumers/consumer.py
|
estarguars113/udacity-kafka-project
|
a74a074a044d9e5a2b8d2efbf13bbe89248f7dfa
|
[
"MIT"
] | null | null | null |
consumers/consumer.py
|
estarguars113/udacity-kafka-project
|
a74a074a044d9e5a2b8d2efbf13bbe89248f7dfa
|
[
"MIT"
] | null | null | null |
consumers/consumer.py
|
estarguars113/udacity-kafka-project
|
a74a074a044d9e5a2b8d2efbf13bbe89248f7dfa
|
[
"MIT"
] | 1
|
2021-09-09T06:00:47.000Z
|
2021-09-09T06:00:47.000Z
|
"""Defines core consumer functionality"""
import logging
from confluent_kafka import Consumer, OFFSET_BEGINNING
from confluent_kafka.avro import AvroConsumer, CachedSchemaRegistryClient
from confluent_kafka.avro.serializer import SerializerError
from tornado import gen
logger = logging.getLogger(__name__)
SCHEMA_REGISTRY_URL = "http://localhost:8081"
BROKER_URL = "localhost:9092"
class KafkaConsumer:
"""Defines the base kafka consumer class"""
def __init__(
self,
topic_name_pattern,
message_handler,
is_avro=True,
offset_earliest=False,
sleep_secs=1.0,
consume_timeout=0.1,
):
"""Creates a consumer object for asynchronous use"""
self.topic_name_pattern = topic_name_pattern
self.message_handler = message_handler
self.sleep_secs = sleep_secs
self.consume_timeout = consume_timeout
self.offset_earliest = offset_earliest
self.broker_properties = {
"bootstrap.servers": BROKER_URL,
"group.id": "0",
"auto.offset.reset": "earliest" if offset_earliest else "latest"
}
print("consumer initialized", topic_name_pattern)
if is_avro is True:
self.broker_properties["schema.registry.url"] = SCHEMA_REGISTRY_URL
self.consumer = AvroConsumer(
self.broker_properties
)
else:
self.consumer = Consumer(self.broker_properties)
self.consumer.subscribe(
[self.topic_name_pattern],
on_assign=self.on_assign
)
def on_assign(self, consumer, partitions):
"""Callback for when topic assignment takes place"""
print("on assign", self.topic_name_pattern)
for _partition in partitions:
consumer.offset = OFFSET_BEGINNING
logger.info("partitions assigned for %s", self.topic_name_pattern)
consumer.assign(partitions)
async def consume(self):
"""Asynchronously consumes data from kafka topic"""
while True:
num_results = 1
while num_results > 0:
num_results = self._consume()
await gen.sleep(self.sleep_secs)
def _consume(self):
"""Polls for a message. Returns 1 if a message was received, 0 otherwise"""
message = self.consumer.poll(1.0)
print("message", self.topic_name_pattern, message)
if message is None:
return 0
elif message.error() is not None:
print(f"error from consumer {message.error()}")
return 0
else:
try:
print("consume", self.topic_name_pattern, message.value())
return 1
except KeyError as e:
print(f"Failed to unpack message {e}")
return 0
def close(self):
"""Cleans up any open kafka consumers"""
self.consumer.close()
| 32.021505
| 83
| 0.619879
|
6b11409ba9acc3e95fdc223e5b009418fe06c989
| 450
|
py
|
Python
|
Exercicios-Python/081.py
|
LuizHenriqudesouza419/Exercicios-de-Python3-main
|
af53cc1eea1e22a304e206a453c4b24bf67615a8
|
[
"MIT"
] | 1
|
2021-11-08T22:59:33.000Z
|
2021-11-08T22:59:33.000Z
|
Exercicios-Python/081.py
|
LuizHenriqudesouza419/Exercicios-de-Python3-main
|
af53cc1eea1e22a304e206a453c4b24bf67615a8
|
[
"MIT"
] | null | null | null |
Exercicios-Python/081.py
|
LuizHenriqudesouza419/Exercicios-de-Python3-main
|
af53cc1eea1e22a304e206a453c4b24bf67615a8
|
[
"MIT"
] | null | null | null |
# Extraindo dados de uma lista
valores = []
while True:
valores.append(int(input('Digite um valor: ')))
resp = str(input('Quer continuar? S/N '))
if resp in 'Nn':
break
print('=-' * 30)
print(f'Você digitou {len(valores)} elementos.')
valores.sort(reverse=True)
print(f'Os valores em ordem decrescente são {valores}')
if 5 in valores:
print('O valor 5 faz parte da lista')
else:
print('0 valor 5 não faz parte da lista')
| 30
| 55
| 0.66
|
ae9d204ff5a7bac18384a99ec1e50ed1ab0903e4
| 119
|
py
|
Python
|
notifications/admin.py
|
NSP-Community/NSP-Production
|
329af474892b911f2025a920fdb3ab390bb6de81
|
[
"MIT"
] | null | null | null |
notifications/admin.py
|
NSP-Community/NSP-Production
|
329af474892b911f2025a920fdb3ab390bb6de81
|
[
"MIT"
] | 8
|
2020-06-05T18:39:09.000Z
|
2022-03-11T23:26:32.000Z
|
notifications/admin.py
|
NSP-Community/NSP-Production
|
329af474892b911f2025a920fdb3ab390bb6de81
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Notification)
| 19.833333
| 33
| 0.798319
|
890cddc05af768e5bc35ca59882eae1c61a3fb87
| 1,552
|
py
|
Python
|
django/authority/urls.py
|
cmu-lib/authority
|
9b8d5f2f0b6b5ae50ca1de4f85fde5a3aa003167
|
[
"MIT"
] | null | null | null |
django/authority/urls.py
|
cmu-lib/authority
|
9b8d5f2f0b6b5ae50ca1de4f85fde5a3aa003167
|
[
"MIT"
] | null | null | null |
django/authority/urls.py
|
cmu-lib/authority
|
9b8d5f2f0b6b5ae50ca1de4f85fde5a3aa003167
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import include, path, re_path
from rest_framework import routers
import authority.views
import entity.views
router = routers.DefaultRouter()
urlpatterns = [
path("", authority.views.HomeView.as_view(), name="home"),
path("api/", include(router.urls)),
path("person/<int:pk>/populate_viaf/", entity.views.PopulateVIAF.as_view(), name="person-populate-viaf"),
path("person/<int:pk>/populate_lcnaf/", entity.views.PopulateLCNAF.as_view(), name="person-populate-lcnaf"),
path(
"reconcile/",
authority.views.ReconciliationEndpoint.as_view(),
name="reconciliation_endpoint",
),
path(
"reconcile/extend/",
authority.views.DataExtensionEndpoint.as_view(),
name="reconcile-extend",
),
path(
"reconcile/suggest/",
authority.views.SuggestEndpoint.as_view(),
name="reconcile-suggest",
),
path(
"reconcile/suggest/flyout/<int:pk>/",
entity.views.FlyoutView.as_view(),
name="flyout",
),
path(
"reconcile/preview/<int:pk>/",
entity.views.PreviewView.as_view(),
name="preview",
),
path(
"current_user/",
authority.views.CurrentUserView.as_view(),
name="current_user",
),
path("auth/", include("rest_framework.urls", namespace="rest_framework")),
path("admin/", admin.site.urls),
path("accounts/", include("django.contrib.auth.urls")),
path("silk/", include("silk.urls", namespace="silk")),
]
| 31.673469
| 112
| 0.640464
|
6dfcc397ca1f7c6668c9cb145eff1d5efff94498
| 1,224
|
py
|
Python
|
python3-virtualenv/lib/python3.6/site-packages/pip/_vendor/html5lib/filters/whitespace.py
|
GinaJame/Portfolio_MLH
|
541709dcf034ddca885a8b08f9922dc352c113f8
|
[
"MIT"
] | 1
|
2022-03-06T13:06:33.000Z
|
2022-03-06T13:06:33.000Z
|
python3-virtualenv/lib/python3.6/site-packages/pip/_vendor/html5lib/filters/whitespace.py
|
GinaJame/Portfolio_MLH
|
541709dcf034ddca885a8b08f9922dc352c113f8
|
[
"MIT"
] | 1
|
2020-05-16T02:22:36.000Z
|
2020-05-16T02:22:36.000Z
|
python3-virtualenv/lib/python3.6/site-packages/pip/_vendor/html5lib/filters/whitespace.py
|
GinaJame/Portfolio_MLH
|
541709dcf034ddca885a8b08f9922dc352c113f8
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, unicode_literals
import re
from . import base
from ..constants import rcdataElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
SPACES_REGEX = re.compile("[%s]+" % spaceCharacters)
class Filter(base.Filter):
"""Collapses whitespace except in pre, textarea, and script elements"""
spacePreserveElements = frozenset(["pre", "textarea"] + list(rcdataElements))
def __iter__(self):
preserve = 0
for token in base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag" and (
preserve or token["name"] in self.spacePreserveElements
):
preserve += 1
elif type == "EndTag" and preserve:
preserve -= 1
elif not preserve and type == "SpaceCharacters" and token["data"]:
# Test on token["data"] above to not introduce spaces where there were not
token["data"] = " "
elif not preserve and type == "Characters":
token["data"] = collapse_spaces(token["data"])
yield token
def collapse_spaces(text):
return SPACES_REGEX.sub(" ", text)
| 29.142857
| 90
| 0.610294
|
32025083588ea20b5d526464330b3afb8f14ed28
| 4,109
|
py
|
Python
|
code-merge/ant.py
|
scrapingredditboys/AI-Fundamentals-TSP
|
dfafd8e0d9adb87997886ef9a835be5f49b02871
|
[
"BSD-3-Clause"
] | null | null | null |
code-merge/ant.py
|
scrapingredditboys/AI-Fundamentals-TSP
|
dfafd8e0d9adb87997886ef9a835be5f49b02871
|
[
"BSD-3-Clause"
] | null | null | null |
code-merge/ant.py
|
scrapingredditboys/AI-Fundamentals-TSP
|
dfafd8e0d9adb87997886ef9a835be5f49b02871
|
[
"BSD-3-Clause"
] | null | null | null |
import math
import random
import sys
from threading import *
class Ant(Thread):
def __init__(self, ID, start_node, colony, beta, q0, rho):
Thread.__init__(self)
self.ID = ID
self.start_node = start_node
self.colony = colony
self.curr_node = self.start_node
self.graph = self.colony.graph
self.path_vec = []
self.path_vec.append(self.start_node)
self.path_cost = 0
# same meaning as in standard equations
self.Beta = beta
#self.Q0 = 1 # Q0 = 1 works just fine for 10 city case (no explore)
self.Q0 = q0
self.Rho = rho
# store the nodes remaining to be explored here
self.nodes_to_visit = {}
for i in range(0, self.graph.num_nodes):
if i != self.start_node:
self.nodes_to_visit[i] = i
# create n X n matrix 0'd out to start
self.path_mat = []
for i in range(0, self.graph.num_nodes):
self.path_mat.append([0]*self.graph.num_nodes)
# overide Thread's run()
def run(self):
graph = self.colony.graph
while not self.end():
# we need exclusive access to the graph
graph.lock.acquire()
new_node = self.state_transition_rule(self.curr_node)
self.path_cost += graph.delta(self.curr_node, new_node)
self.path_vec.append(new_node)
self.path_mat[self.curr_node][new_node] = 1 #adjacency matrix representing path
#print ("Ant %s : %s, %s" % (self.ID, self.path_vec, self.path_cost,))
self.local_updating_rule(self.curr_node, new_node)
graph.lock.release()
self.curr_node = new_node
# don't forget to close the tour
self.path_cost += graph.delta(self.path_vec[-1], self.path_vec[0])
# send our results to the colony
self.colony.update(self)
print ("Ant thread %s terminating." % (self.ID,))
# allows thread to be restarted (calls Thread.__init__)
self.__init__(ID=self.ID, start_node=self.start_node, colony=self.colony, beta=self.Beta, q0=self.Q0, rho=self.Rho)
def end(self):
return not self.nodes_to_visit
# described in report -- determines next node to visit after curr_node
def state_transition_rule(self, curr_node):
graph = self.colony.graph
q = random.random()
max_node = -1
if q < self.Q0:
max_val = -1
val = None
for node in self.nodes_to_visit.values():
if graph.tau(curr_node, node) == 0:
raise Exception("tau = 0")
val = graph.tau(curr_node, node) * math.pow(graph.etha(curr_node, node), self.Beta)
if val > max_val:
max_val = val
max_node = node
else:
sum = 0
node = -1
for node in self.nodes_to_visit.values():
if graph.tau(curr_node, node) == 0:
raise Exception("tau = 0")
sum += graph.tau(curr_node, node) * math.pow(graph.etha(curr_node, node), self.Beta)
if sum == 0:
raise Exception("sum = 0")
avg = sum / len(self.nodes_to_visit)
#print ("avg = %s" % (avg,))
for node in self.nodes_to_visit.values():
p = graph.tau(curr_node, node) * math.pow(graph.etha(curr_node, node), self.Beta)
if p > avg:
#print ("p = %s" % (p,))
max_node = node
if max_node == -1:
max_node = node
if max_node < 0:
raise Exception("max_node < 0")
del self.nodes_to_visit[max_node]
return max_node
# phermone update rule for indiv ants
def local_updating_rule(self, curr_node, next_node):
graph = self.colony.graph
val = (1 - self.Rho) * graph.tau(curr_node, next_node) + (self.Rho * graph.tau0)
graph.update_tau(curr_node, next_node, val)
| 32.872
| 123
| 0.558043
|
d7c73f759265caf4926a950d7d40034177299a03
| 1,893
|
py
|
Python
|
compiler.py
|
h3ko-ctrl/Demeter
|
7e725bca45128c783e1fc236d702dd258e3ee95d
|
[
"MIT"
] | null | null | null |
compiler.py
|
h3ko-ctrl/Demeter
|
7e725bca45128c783e1fc236d702dd258e3ee95d
|
[
"MIT"
] | null | null | null |
compiler.py
|
h3ko-ctrl/Demeter
|
7e725bca45128c783e1fc236d702dd258e3ee95d
|
[
"MIT"
] | null | null | null |
# Compiler for the Demeter language
import os
import sys
from src.option_parser import *
from src.lexer.lexer import *
import src.parser.parser as analyzer
from src.error import FileNotFound
class Compiler:
DIRNAME, FILENAME = os.path.split(os.path.abspath(__file__))
def __init__(self, options, source=None):
# Verify the existence of the source file and open it
self.path = os.path.join(self.DIRNAME, source)
if os.path.exists(self.path):
self.source = open(self.path, mode="rb")
else:
raise FileNotFound(self.path)
self.filename = os.path.basename(self.source.name)
self.verbose = False
if "verbose" in options.keys():
self.verbose = options["verbose"]
self.tokens = None
self.ast = None
return
def log(self, alert):
if self.verbose:
sys.stdout.write("%s \n" % alert)
def run(self):
self.log("Compiling: %s" % self.filename)
# Compiling process
# Lexing
lexer.input(self.source.read())
# while True:
# token = lexer.token()
# if not token:
# break
# self.log('(%s, %r, %d, %d)' % (token.type, token.value, token.lineno, token.lexpos))
# Parsing : outputs AST
self.ast = analyzer.parser.parse(lexer=lexer)
for _ in self.ast:
self.log(_)
# : outputs AT
# Code Generator
# Compiling
self.source.close()
return
if __name__ == "__main__":
(k_options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
compiling_options = {}
if k_options.verbose:
compiling_options = {"verbose": k_options.verbose}
app = Compiler(compiling_options, source=args[0])
app.run()
| 24.269231
| 98
| 0.582673
|
b61a2b2a1a57a2f5ec08888ee665a73fdead769d
| 64,742
|
py
|
Python
|
public/Python27/Lib/cookielib.py
|
NingrumFadillah/cekmutasi
|
1fccb6cafb874c2a80ece9b71d7c682fd44dbd48
|
[
"MIT"
] | 1
|
2020-11-26T18:53:46.000Z
|
2020-11-26T18:53:46.000Z
|
public/Python27/Lib/cookielib.py
|
NingrumFadillah/cekmutasi
|
1fccb6cafb874c2a80ece9b71d7c682fd44dbd48
|
[
"MIT"
] | null | null | null |
public/Python27/Lib/cookielib.py
|
NingrumFadillah/cekmutasi
|
1fccb6cafb874c2a80ece9b71d7c682fd44dbd48
|
[
"MIT"
] | 3
|
2017-04-07T12:02:22.000Z
|
2020-03-23T12:11:55.000Z
|
"""HTTP cookie handling for web clients.
This module has (now fairly distant) origins in Gisle Aas' Perl module
HTTP::Cookies, from the libwww-perl library.
Docstrings, comments and debug strings in this code refer to the
attributes of the HTTP cookie system as cookie-attributes, to distinguish
them clearly from Python attributes.
Class diagram (note that BSDDBCookieJar and the MSIE* classes are not
distributed with the Python standard library, but are available from
http://wwwsearch.sf.net/):
CookieJar____
/ \ \
FileCookieJar \ \
/ | \ \ \
MozillaCookieJar | LWPCookieJar \ \
| | \
| ---MSIEBase | \
| / | | \
| / MSIEDBCookieJar BSDDBCookieJar
|/
MSIECookieJar
"""
__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy',
'FileCookieJar', 'LWPCookieJar', 'lwp_cookie_str', 'LoadError',
'MozillaCookieJar']
import re, urlparse, copy, time, urllib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import httplib # only for the default HTTP port
from calendar import timegm
debug = False # set to True to enable debugging via the logging module
logger = None
def _debug(*args):
if not debug:
return
global logger
if not logger:
import logging
logger = logging.getLogger("cookielib")
return logger.debug(*args)
DEFAULT_HTTP_PORT = str(httplib.HTTP_PORT)
MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
"instance initialised with one)")
def _warn_unhandled_exception():
# There are a few catch-all except: statements in this module, for
# catching input that's bad in unexpected ways. Warn if any
# exceptions are caught there.
import warnings, traceback, StringIO
f = StringIO.StringIO()
traceback.print_exc(None, f)
msg = f.getvalue()
warnings.warn("cookielib bug!\n%s" % msg, stacklevel=2)
# Date/time conversion
# -----------------------------------------------------------------------------
EPOCH_YEAR = 1970
def _timegm(tt):
year, month, mday, hour, min, sec = tt[:6]
if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
return timegm(tt)
else:
return None
DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
MONTHS_LOWER = []
for month in MONTHS: MONTHS_LOWER.append(month.lower())
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec = time.gmtime(t)[:6]
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
year, mon, mday, hour, min, sec)
def time2netscape(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like this:
Wed, DD-Mon-YYYY HH:MM:SS GMT
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7]
return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec)
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$")
def offset_from_tz_string(tz):
offset = None
if tz in UTC_ZONES:
offset = 0
else:
m = TIMEZONE_RE.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
return offset
def _str2time(day, mon, yr, hr, min, sec, tz):
# translate month name to number
# month numbers start with 1 (January)
try:
mon = MONTHS_LOWER.index(mon.lower())+1
except ValueError:
# maybe it's already a number
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
# make sure clock elements are defined
if hr is None: hr = 0
if min is None: min = 0
if sec is None: sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
# find "obvious" year
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0: yr = yr + 100
else: yr = yr - 100
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
t = _timegm((yr, mon, day, hr, min, sec, tz))
if t is not None:
# adjust time using timezone string, to get absolute time since epoch
if tz is None:
tz = "UTC"
tz = tz.upper()
offset = offset_from_tz_string(tz)
if offset is None:
return None
t = t - offset
return t
STRICT_DATE_RE = re.compile(
r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
WEEKDAY_RE = re.compile(
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I)
LOOSE_HTTP_DATE_RE = re.compile(
r"""^
(\d\d?) # day
(?:\s+|[-\/])
(\w+) # month
(?:\s+|[-\/])
(\d+) # year
(?:
(?:\s+|:) # separator before clock
(\d\d?):(\d\d) # hour:min
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
\s*
(?:\(\w+\))? # ASCII representation of timezone in parens.
\s*$""", re.X)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = STRICT_DATE_RE.search(text)
if m:
g = m.groups()
mon = MONTHS_LOWER.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return _timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = WEEKDAY_RE.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = LOOSE_HTTP_DATE_RE.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
ISO_DATE_RE = re.compile(
"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
[-\/]?
(\d\d?) # day
(?:
(?:\s+|[-:Tt]) # separator before clock
(\d\d?):?(\d\d) # hour:min
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
([-+]?\d\d?:?(:?\d\d)?
|Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
\s*$""", re.X)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
1994-02-03 14:15:29 -0100 -- ISO 8601 format
1994-02-03 14:15:29 -- zone is optional
1994-02-03 -- only date
1994-02-03T14:15:29 -- Use T as separator
19940203T141529Z -- ISO 8601 compact format
19940203 -- only date
"""
# clean up
text = text.lstrip()
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = ISO_DATE_RE.search(text)
if m is not None:
# XXX there's an extra bit of the timezone I'm ignoring here: is
# this the right thing to do?
yr, mon, day, hr, min, sec, tz, _ = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
# Header parsing
# -----------------------------------------------------------------------------
def unmatched(match):
"""Return unmatched part of re.Match object."""
start, end = match.span(0)
return match.string[:start]+match.string[end:]
HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)")
HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)")
HEADER_ESCAPE_RE = re.compile(r"\\(.)")
def split_header_words(header_values):
r"""Parse header values into a list of lists containing key,value pairs.
The function knows how to deal with ",", ";" and "=" as well as quoted
values after "=". A list of space separated tokens are parsed as if they
were separated by ";".
If the header_values passed as argument contains multiple values, then they
are treated as if they were a single value separated by comma ",".
This means that this function is useful for parsing header fields that
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
the requirement for tokens).
headers = #header
header = (token | parameter) *( [";"] (token | parameter))
token = 1*<any CHAR except CTLs or separators>
separators = "(" | ")" | "<" | ">" | "@"
| "," | ";" | ":" | "\" | <">
| "/" | "[" | "]" | "?" | "="
| "{" | "}" | SP | HT
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
qdtext = <any TEXT except <">>
quoted-pair = "\" CHAR
parameter = attribute "=" value
attribute = token
value = token | quoted-string
Each header is represented by a list of key/value pairs. The value for a
simple token (not part of a parameter) is None. Syntactically incorrect
headers will not necessarily be parsed as you would want.
This is easier to describe with some examples:
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[[('text/html', None), ('charset', 'iso-8859-1')]]
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[[('Basic', None), ('realm', '"foobar"')]]
"""
assert not isinstance(header_values, basestring)
result = []
for text in header_values:
orig_text = text
pairs = []
while text:
m = HEADER_TOKEN_RE.search(text)
if m:
text = unmatched(m)
name = m.group(1)
m = HEADER_QUOTED_VALUE_RE.search(text)
if m: # quoted value
text = unmatched(m)
value = m.group(1)
value = HEADER_ESCAPE_RE.sub(r"\1", value)
else:
m = HEADER_VALUE_RE.search(text)
if m: # unquoted value
text = unmatched(m)
value = m.group(1)
value = value.rstrip()
else:
# no value, a lone token
value = None
pairs.append((name, value))
elif text.lstrip().startswith(","):
# concatenated headers, as per RFC 2616 section 4.2
text = text.lstrip()[1:]
if pairs: result.append(pairs)
pairs = []
else:
# skip junk
non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
assert nr_junk_chars > 0, (
"split_header_words bug: '%s', '%s', %s" %
(orig_text, text, pairs))
text = non_junk
if pairs: result.append(pairs)
return result
HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])")
def join_header_words(lists):
"""Do the inverse (almost) of the conversion done by split_header_words.
Takes a list of lists of (key, value) pairs and produces a single header
value. Attribute values are quoted if needed.
>>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]])
'text/plain; charset="iso-8859/1"'
>>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]])
'text/plain, charset="iso-8859/1"'
"""
headers = []
for pairs in lists:
attr = []
for k, v in pairs:
if v is not None:
if not re.search(r"^\w+$", v):
v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \
v = '"%s"' % v
k = "%s=%s" % (k, v)
attr.append(k)
if attr: headers.append("; ".join(attr))
return ", ".join(headers)
def _strip_quotes(text):
if text.startswith('"'):
text = text[1:]
if text.endswith('"'):
text = text[:-1]
return text
def parse_ns_headers(ns_headers):
"""Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
"""
known_attrs = ("expires", "domain", "path", "secure",
# RFC 2109 attrs (may turn up in Netscape cookies, too)
"version", "port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
for ii, param in enumerate(re.split(r";\s*", ns_header)):
param = param.rstrip()
if param == "": continue
if "=" not in param:
k, v = param, None
else:
k, v = re.split(r"\s*=\s*", param, 1)
k = k.lstrip()
if ii != 0:
lc = k.lower()
if lc in known_attrs:
k = lc
if k == "version":
# This is an RFC 2109 cookie.
v = _strip_quotes(v)
version_set = True
if k == "expires":
# convert expires date to seconds since epoch
v = http2time(_strip_quotes(v)) # None if invalid
pairs.append((k, v))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result
IPV4_RE = re.compile(r"\.\d+$")
def is_HDN(text):
"""Return True if text is a host domain name."""
# XXX
# This may well be wrong. Which RFC is HDN defined in, if any (for
# the purposes of RFC 2965)?
# For the current implementation, what about IPv6? Remember to look
# at other uses of IPV4_RE also, if change this.
if IPV4_RE.search(text):
return False
if text == "":
return False
if text[0] == "." or text[-1] == ".":
return False
return True
def domain_match(A, B):
"""Return True if domain A domain-matches domain B, according to RFC 2965.
A and B may be host domain names or IP addresses.
RFC 2965, section 1:
Host names can be specified either as an IP address or a HDN string.
Sometimes we compare one host name with another. (Such comparisons SHALL
be case-insensitive.) Host A's name domain-matches host B's if
* their host name strings string-compare equal; or
* A is a HDN string and has the form NB, where N is a non-empty
name string, B has the form .B', and B' is a HDN string. (So,
x.y.com domain-matches .Y.com but not Y.com.)
Note that domain-match is not a commutative operation: a.b.c.com
domain-matches .c.com, but not the reverse.
"""
# Note that, if A or B are IP addresses, the only relevant part of the
# definition of the domain-match algorithm is the direct string-compare.
A = A.lower()
B = B.lower()
if A == B:
return True
if not is_HDN(A):
return False
i = A.rfind(B)
if i == -1 or i == 0:
# A does not have form NB, or N is the empty string
return False
if not B.startswith("."):
return False
if not is_HDN(B[1:]):
return False
return True
def liberal_is_HDN(text):
"""Return True if text is a sort-of-like a host domain name.
For accepting/blocking domains.
"""
if IPV4_RE.search(text):
return False
return True
def user_domain_match(A, B):
"""For blocking/accepting domains.
A and B may be host domain names or IP addresses.
"""
A = A.lower()
B = B.lower()
if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
if A == B:
# equal IP addresses
return True
return False
initial_dot = B.startswith(".")
if initial_dot and A.endswith(B):
return True
if not initial_dot and A == B:
return True
return False
cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urlparse.urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = cut_port_re.sub("", host, 1)
return host.lower()
def eff_request_host(request):
"""Return a tuple (request-host, effective request-host name).
As defined by RFC 2965, except both are lowercased.
"""
erhn = req_host = request_host(request)
if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
erhn = req_host + ".local"
return req_host, erhn
def request_path(request):
"""request-URI, as defined by RFC 2965."""
url = request.get_full_url()
#scheme, netloc, path, parameters, query, frag = urlparse.urlparse(url)
#req_path = escape_path("".join(urlparse.urlparse(url)[2:]))
path, parameters, query, frag = urlparse.urlparse(url)[2:]
if parameters:
path = "%s;%s" % (path, parameters)
path = escape_path(path)
req_path = urlparse.urlunparse(("", "", path, "", query, frag))
if not req_path.startswith("/"):
# fix bad RFC 2396 absoluteURI
req_path = "/"+req_path
return req_path
def request_port(request):
host = request.get_host()
i = host.find(':')
if i >= 0:
port = host[i+1:]
try:
int(port)
except ValueError:
_debug("nonnumeric port: '%s'", port)
return None
else:
port = DEFAULT_HTTP_PORT
return port
# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
def uppercase_escaped_char(match):
return "%%%s" % match.group(1).upper()
def escape_path(path):
"""Escape any invalid characters in HTTP URL, and uppercase all escapes."""
# There's no knowing what character encoding was used to create URLs
# containing %-escapes, but since we have to pick one to escape invalid
# path characters, we pick UTF-8, as recommended in the HTML 4.0
# specification:
# http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
# And here, kind of: draft-fielding-uri-rfc2396bis-03
# (And in draft IRI specification: draft-duerst-iri-05)
# (And here, for new URI schemes: RFC 2718)
if isinstance(path, unicode):
path = path.encode("utf-8")
path = urllib.quote(path, HTTP_PATH_SAFE)
path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
return path
def reach(h):
"""Return reach of host h, as defined by RFC 2965, section 1.
The reach R of a host name H is defined as follows:
* If
- H is the host domain name of a host; and,
- H has the form A.B; and
- A has no embedded (that is, interior) dots; and
- B has at least one embedded dot, or B is the string "local".
then the reach of H is .B.
* Otherwise, the reach of H is H.
>>> reach("www.acme.com")
'.acme.com'
>>> reach("acme.com")
'acme.com'
>>> reach("acme.local")
'.local'
"""
i = h.find(".")
if i >= 0:
#a = h[:i] # this line is only here to show what a is
b = h[i+1:]
i = b.find(".")
if is_HDN(h) and (i >= 0 or b == "local"):
return "."+b
return h
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
req_host = request_host(request)
if not domain_match(req_host, reach(request.get_origin_req_host())):
return True
else:
return False
class Cookie:
"""HTTP Cookie.
This class represents both Netscape and RFC 2965 cookies.
This is deliberately a very simple class. It just holds attributes. It's
possible to construct Cookie instances that don't comply with the cookie
standards. CookieJar.make_cookies is the factory function for Cookie
objects -- it deals with cookie parsing, supplying defaults, and
normalising to the representation used in this class. CookiePolicy is
responsible for checking them to see whether they should be accepted from
and returned to the server.
Note that the port may be present in the headers, but unspecified ("Port"
rather than"Port=80", for example); if this is the case, port is None.
"""
def __init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False,
):
if version is not None: version = int(version)
if expires is not None: expires = int(expires)
if port is None and port_specified is True:
raise ValueError("if port is None, port_specified must be false")
self.version = version
self.name = name
self.value = value
self.port = port
self.port_specified = port_specified
# normalise case, as per RFC 2965 section 3.3.3
self.domain = domain.lower()
self.domain_specified = domain_specified
# Sigh. We need to know whether the domain given in the
# cookie-attribute had an initial dot, in order to follow RFC 2965
# (as clarified in draft errata). Needed for the returned $Domain
# value.
self.domain_initial_dot = domain_initial_dot
self.path = path
self.path_specified = path_specified
self.secure = secure
self.expires = expires
self.discard = discard
self.comment = comment
self.comment_url = comment_url
self.rfc2109 = rfc2109
self._rest = copy.copy(rest)
def has_nonstandard_attr(self, name):
return name in self._rest
def get_nonstandard_attr(self, name, default=None):
return self._rest.get(name, default)
def set_nonstandard_attr(self, name, value):
self._rest[name] = value
def is_expired(self, now=None):
if now is None: now = time.time()
if (self.expires is not None) and (self.expires <= now):
return True
return False
def __str__(self):
if self.port is None: p = ""
else: p = ":"+self.port
limit = self.domain + p + self.path
if self.value is not None:
namevalue = "%s=%s" % (self.name, self.value)
else:
namevalue = self.name
return "<Cookie %s for %s>" % (namevalue, limit)
def __repr__(self):
args = []
for name in ("version", "name", "value",
"port", "port_specified",
"domain", "domain_specified", "domain_initial_dot",
"path", "path_specified",
"secure", "expires", "discard", "comment", "comment_url",
):
attr = getattr(self, name)
args.append("%s=%s" % (name, repr(attr)))
args.append("rest=%s" % repr(self._rest))
args.append("rfc2109=%s" % repr(self.rfc2109))
return "Cookie(%s)" % ", ".join(args)
class CookiePolicy:
"""Defines which cookies get accepted from and returned to server.
May also modify cookies, though this is probably a bad idea.
The subclass DefaultCookiePolicy defines the standard rules for Netscape
and RFC 2965 cookies -- override that if you want a customised policy.
"""
def set_ok(self, cookie, request):
"""Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.
"""
raise NotImplementedError()
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server."""
raise NotImplementedError()
def domain_return_ok(self, domain, request):
"""Return false if cookies should not be returned, given cookie domain.
"""
return True
def path_return_ok(self, path, request):
"""Return false if cookies should not be returned, given cookie path.
"""
return True
class DefaultCookiePolicy(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies."""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""Constructor arguments should be passed as keyword arguments only."""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override .set_ok(), be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
_debug(" Set-Cookie2 without version attribute (%s=%s)",
cookie.name, cookie.value)
return False
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request.is_unverifiable() and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
_debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
_debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
_debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
_debug(" domain %s is not in user allow-list", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if self.strict_domain and (domain.count(".") >= 2):
# XXX This should probably be compared with the Konqueror
# (kcookiejar.cpp) and Mozilla implementations, but it's a
# losing battle.
i = domain.rfind(".")
j = domain.rfind(".", 0, i)
if j == 0: # domain like .foo.bar
tld = domain[i+1:]
sld = domain[j+1:i]
if sld.lower() in ("co", "ac", "com", "edu", "org", "net",
"gov", "mil", "int", "aero", "biz", "cat", "coop",
"info", "jobs", "mobi", "museum", "name", "pro",
"travel", "eu") and len(tld) == 2:
# domain like .co.uk
_debug(" country-code second level domain %s", domain)
return False
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
_debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
_debug(" effective request-host %s (even with added "
"initial dot) does not end end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
_debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
_debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
_debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
_debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override .return_ok(), be sure to call this method. If it
returns false, so should your subclass (assuming your subclass wants to
be more strict about which cookies to return).
"""
# Path has already been checked by .path_return_ok(), and domain
# blocking done by .domain_return_ok().
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
for n in "version", "verifiability", "secure", "expires", "port", "domain":
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request.is_unverifiable() and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.get_type() != "https":
_debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
_debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
_debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
_debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
_debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
_debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
req_host, erhn = eff_request_host(request)
if not req_host.startswith("."):
req_host = "."+req_host
if not erhn.startswith("."):
erhn = "."+erhn
if not (req_host.endswith(domain) or erhn.endswith(domain)):
#_debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
_debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
_debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
_debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
_debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
def deepvalues(mapping):
"""Iterates over nested mapping, depth-first, in sorted order by key."""
values = vals_sorted_by_key(mapping)
for obj in values:
mapping = False
try:
obj.items
except AttributeError:
pass
else:
mapping = True
for subobj in deepvalues(obj):
yield subobj
if not mapping:
yield obj
# Used as second parameter to dict.get() method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try
urllib2.build_opener(HTTPCookieProcessor).open(url).
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
magic_re = r"^\#LWP-Cookies-(\d+\.\d+)"
def __init__(self, policy=None):
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies_lock = _threading.RLock()
self._cookies = {}
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
_debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
_debug(" not returning cookie")
continue
_debug(" it's a match")
cookies.append(cookie)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
like ['foo="bar"; $Path="/"', ...]
The $Version attribute is also added when appropriate (currently only
once per request).
"""
# add cookies in order of most specific (ie. longest) path first
cookies.sort(key=lambda arg: len(arg.path), reverse=True)
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
finally:
self._cookies_lock.release()
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if k in standard:
# only first value is significant
continue
if k == "domain":
if v is None:
_debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
_debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
try:
v = int(v)
except ValueError:
_debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ("port", "comment", "commenturl")):
_debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None:
try:
version = int(version)
except ValueError:
return None # invalid version, ignore cookie
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
_debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None)
if rfc2109_as_ns is None:
rfc2109_as_ns = not self._policy.rfc2965
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_ns:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object."""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.getheaders("Set-Cookie2")
ns_hdrs = headers.getheaders("Set-Cookie")
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except Exception:
_warn_unhandled_exception()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except Exception:
_warn_unhandled_exception()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return key not in lookup
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so."""
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set."""
c = self._cookies
self._cookies_lock.acquire()
try:
if cookie.domain not in c: c[cookie.domain] = {}
c2 = c[cookie.domain]
if cookie.path not in c2: c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
finally:
self._cookies_lock.release()
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request."""
_debug("extract_cookies: %s", response.info())
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
_debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
"""
self._cookies_lock.acquire()
try:
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the
.save() method won't save expired cookies anyway (unless you ask
otherwise by passing a true ignore_expires argument).
"""
self._cookies_lock.acquire()
try:
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
def __iter__(self):
return deepvalues(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
# derives from IOError for backwards-compatibility with Python 2.4.0
class LoadError(IOError): pass
class FileCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a file."""
def __init__(self, filename=None, delayload=False, policy=None):
"""
Cookies are NOT loaded from the named file until either the .load() or
.revert() method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None:
try:
filename+""
except:
raise ValueError("filename must be string-like")
self.filename = filename
self.delayload = bool(delayload)
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file."""
raise NotImplementedError()
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close()
def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or IOError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
self._cookies_lock.acquire()
try:
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except (LoadError, IOError):
self._cookies = old_state
raise
finally:
self._cookies_lock.release()
from _LWPCookieJar import LWPCookieJar, lwp_cookie_str
from _MozillaCookieJar import MozillaCookieJar
| 35.967778
| 83
| 0.559328
|
adf9ce4aead61526e9c4734619b639637db18744
| 10,995
|
py
|
Python
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py
|
zmxdream/Paddle
|
04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c
|
[
"Apache-2.0"
] | 8
|
2016-08-15T07:02:27.000Z
|
2016-08-24T09:34:00.000Z
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py
|
zmxdream/Paddle
|
04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c
|
[
"Apache-2.0"
] | 1
|
2020-09-08T01:45:28.000Z
|
2020-09-08T01:45:28.000Z
|
python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py
|
zmxdream/Paddle
|
04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c
|
[
"Apache-2.0"
] | 5
|
2021-12-10T11:20:06.000Z
|
2022-02-18T05:18:12.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
from quant_dequant_test import QuantDequantTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig
class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest):
def setUp(self):
self.set_params()
def network():
self.data = fluid.data(
name='data', shape=[1, 28, 28], dtype='float32')
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
matmul_out = fluid.layers.matmul(
x=self.data,
y=self.data,
transpose_x=self.transpose_x,
transpose_y=self.transpose_y,
alpha=self.alpha)
fc_out = fluid.layers.fc(input=matmul_out,
size=10,
num_flatten_dims=1,
bias_attr=False,
act=None)
result = fluid.layers.relu(fc_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = fluid.layers.mean(loss)
return avg_loss, result
self.main_program.random_seed = 2
self.startup_program.random_seed = 2
self.test_main_program.random_seed = 2
#self.test_startup_program.random_seed = 2
with fluid.unique_name.guard():
with fluid.program_guard(self.main_program, self.startup_program):
self.loss, result = network()
opt = fluid.optimizer.Adam(learning_rate=0.0001)
opt.minimize(self.loss)
with fluid.unique_name.guard():
with fluid.program_guard(self.test_main_program,
self.startup_program):
network()
self.feeds = {"data": np.random.random([1, 28, 28]).astype("float32")}
self.fetch_list = [result]
self.enable_trt = True
self.trt_parameters = TensorRTMatMulQuantDequantDims3Test.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False)
self.activation_quantize_type = 'moving_average_abs_max'
self.weight_quantize_type = 'channel_wise_abs_max'
def set_params(self):
self.transpose_x = False
self.transpose_y = False
self.alpha = 1.0
def test_check_output(self):
#self.quant_dequant()
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(
use_gpu, atol=1, flatten=False, rtol=1e-1)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TensorRTMatMulQuantDequantDims3TransposeXTest(
TensorRTMatMulQuantDequantDims3Test):
def set_params(self):
self.transpose_x = True
self.transpose_y = False
self.alpha = 2.1
class TensorRTMatMulQuantDequantDims3TransposeYTest(
TensorRTMatMulQuantDequantDims3Test):
def set_params(self):
self.transpose_x = False
self.transpose_y = True
self.alpha = 3.9
class TensorRTMatMulQuantDequantDims3TransposeXYTest(
TensorRTMatMulQuantDequantDims3Test):
def set_params(self):
self.transpose_x = True
self.transpose_y = True
self.alpha = 8.4
class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest):
def setUp(self):
self.set_params()
def network():
self.data = fluid.data(
name='data', shape=[1, 28, 28], dtype='float32')
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
reshape_out = fluid.layers.reshape(self.data, shape=[1, 4, 14, 14])
matmul_out = fluid.layers.matmul(
x=reshape_out,
y=reshape_out,
transpose_x=self.transpose_x,
transpose_y=self.transpose_y,
alpha=self.alpha)
out = fluid.layers.batch_norm(matmul_out, is_test=True)
fc_out = fluid.layers.fc(input=matmul_out,
size=10,
num_flatten_dims=1,
bias_attr=False,
act=None)
result = fluid.layers.relu(fc_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = fluid.layers.mean(loss)
return avg_loss, result
self.main_program.random_seed = 2
self.startup_program.random_seed = 2
self.test_main_program.random_seed = 2
#self.test_startup_program.random_seed = 2
with fluid.unique_name.guard():
with fluid.program_guard(self.main_program, self.startup_program):
self.loss, result = network()
opt = fluid.optimizer.Adam(learning_rate=0.0001)
opt.minimize(self.loss)
with fluid.unique_name.guard():
with fluid.program_guard(self.test_main_program,
self.startup_program):
network()
self.feeds = {"data": np.random.random([1, 28, 28]).astype("float32")}
self.fetch_list = [result]
self.enable_trt = True
self.trt_parameters = TensorRTMatMulQuantDequantDims4Test.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False)
self.activation_quantize_type = 'moving_average_abs_max'
self.weight_quantize_type = 'channel_wise_abs_max'
def set_params(self):
self.transpose_x = False
self.transpose_y = False
self.alpha = 1.0
def test_check_output(self):
#self.quant_dequant()
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(
use_gpu, atol=1, flatten=False, rtol=1e-1)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TensorRTMatMulQuantDequantDims4TransposeXTest(
TensorRTMatMulQuantDequantDims4Test):
def set_params(self):
self.transpose_x = True
self.transpose_y = False
self.alpha = 3.2
class TensorRTMatMulQuantDequantDims4TransposeYTest(
TensorRTMatMulQuantDequantDims4Test):
def set_params(self):
self.transpose_x = False
self.transpose_y = True
self.alpha = 7.5
class TensorRTMatMulQuantDequantDims4TransposeXYTest(
TensorRTMatMulQuantDequantDims4Test):
def set_params(self):
self.transpose_x = True
self.transpose_y = True
self.alpha = 11.2
class TensorRTMatMulQuantDequantDims3DynamicTest(QuantDequantTest):
def setUp(self):
self.set_params()
def network():
self.data = fluid.data(
name='data', shape=[-1, 28, 28], dtype='float32')
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
matmul_out = fluid.layers.matmul(
x=self.data,
y=self.data,
transpose_x=self.transpose_x,
transpose_y=self.transpose_y,
alpha=self.alpha)
out = fluid.layers.batch_norm(matmul_out, is_test=True)
fc_out = fluid.layers.fc(input=matmul_out,
size=10,
num_flatten_dims=1,
bias_attr=False,
act=None)
result = fluid.layers.relu(fc_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = fluid.layers.mean(loss)
return avg_loss, result
self.main_program.random_seed = 2
self.startup_program.random_seed = 2
self.test_main_program.random_seed = 2
#self.test_startup_program.random_seed = 2
with fluid.unique_name.guard():
with fluid.program_guard(self.main_program, self.startup_program):
self.loss, result = network()
opt = fluid.optimizer.Adam(learning_rate=0.0001)
opt.minimize(self.loss)
with fluid.unique_name.guard():
with fluid.program_guard(self.test_main_program,
self.startup_program):
network()
self.feeds = {"data": np.random.random([3, 28, 28]).astype("float32")}
self.fetch_list = [result]
self.enable_trt = True
self.trt_parameters = TensorRTMatMulQuantDequantDims3DynamicTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False)
self.dynamic_shape_params = TensorRTMatMulQuantDequantDims3DynamicTest.DynamicShapeParam(
{
'data': [1, 28, 28]
}, {'data': [4, 28, 28]}, {'data': [3, 28, 28]}, False)
self.activation_quantize_type = 'moving_average_abs_max'
self.weight_quantize_type = 'channel_wise_abs_max'
def set_params(self):
self.transpose_x = False
self.transpose_y = False
self.alpha = 1.0
def test_check_output(self):
#self.quant_dequant()
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(
use_gpu, atol=1, flatten=False, rtol=1e-1)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TensorRTMatMulQuantDequantDims4TransposeXDynamicTest(
TensorRTMatMulQuantDequantDims3DynamicTest):
def set_params(self):
self.transpose_x = True
self.transpose_y = False
self.alpha = 2.0
class TensorRTMatMulQuantDequantDims4TransposeYDynamicTest(
TensorRTMatMulQuantDequantDims3DynamicTest):
def set_params(self):
self.transpose_x = False
self.transpose_y = True
self.alpha = 2.2
class TensorRTMatMulQuantDequantDims4TransposeXYDynamicTest(
TensorRTMatMulQuantDequantDims3DynamicTest):
def set_params(self):
self.transpose_x = True
self.transpose_y = True
self.alpha = 7.8
if __name__ == "__main__":
unittest.main()
| 38.444056
| 97
| 0.614097
|
7aa9b6189235d5029b4cd82b6f7aefaf395a296a
| 4,155
|
py
|
Python
|
influxdb_service_sdk/model/monitor_config/app_health_config_layer_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
influxdb_service_sdk/model/monitor_config/app_health_config_layer_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
influxdb_service_sdk/model/monitor_config/app_health_config_layer_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: app_health_config_layer.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from influxdb_service_sdk.model.monitor_config import app_health_config_metrics_pb2 as influxdb__service__sdk_dot_model_dot_monitor__config_dot_app__health__config__metrics__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='app_health_config_layer.proto',
package='monitor_config',
syntax='proto3',
serialized_options=_b('ZHgo.easyops.local/contracts/protorepo-models/easyops/model/monitor_config'),
serialized_pb=_b('\n\x1d\x61pp_health_config_layer.proto\x12\x0emonitor_config\x1aIinfluxdb_service_sdk/model/monitor_config/app_health_config_metrics.proto\"y\n\x14\x41ppHealthConfigLayer\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06weight\x18\x03 \x01(\x05\x12\x37\n\x07metrics\x18\x04 \x03(\x0b\x32&.monitor_config.AppHealthConfigMetricsBJZHgo.easyops.local/contracts/protorepo-models/easyops/model/monitor_configb\x06proto3')
,
dependencies=[influxdb__service__sdk_dot_model_dot_monitor__config_dot_app__health__config__metrics__pb2.DESCRIPTOR,])
_APPHEALTHCONFIGLAYER = _descriptor.Descriptor(
name='AppHealthConfigLayer',
full_name='monitor_config.AppHealthConfigLayer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='monitor_config.AppHealthConfigLayer.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='monitor_config.AppHealthConfigLayer.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight', full_name='monitor_config.AppHealthConfigLayer.weight', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metrics', full_name='monitor_config.AppHealthConfigLayer.metrics', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=124,
serialized_end=245,
)
_APPHEALTHCONFIGLAYER.fields_by_name['metrics'].message_type = influxdb__service__sdk_dot_model_dot_monitor__config_dot_app__health__config__metrics__pb2._APPHEALTHCONFIGMETRICS
DESCRIPTOR.message_types_by_name['AppHealthConfigLayer'] = _APPHEALTHCONFIGLAYER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AppHealthConfigLayer = _reflection.GeneratedProtocolMessageType('AppHealthConfigLayer', (_message.Message,), {
'DESCRIPTOR' : _APPHEALTHCONFIGLAYER,
'__module__' : 'app_health_config_layer_pb2'
# @@protoc_insertion_point(class_scope:monitor_config.AppHealthConfigLayer)
})
_sym_db.RegisterMessage(AppHealthConfigLayer)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 43.28125
| 463
| 0.787485
|
2f770405365d5f98807e2397c0eb50ec905bcf1b
| 2,120
|
py
|
Python
|
tests/cli/test_cli.py
|
fintzd/rasa
|
6359be5509c7d87cd29c2ab5149bc45e843fea85
|
[
"Apache-2.0"
] | 9,701
|
2019-04-16T15:46:27.000Z
|
2022-03-31T11:52:18.000Z
|
tests/cli/test_cli.py
|
fintzd/rasa
|
6359be5509c7d87cd29c2ab5149bc45e843fea85
|
[
"Apache-2.0"
] | 6,420
|
2019-04-16T15:58:22.000Z
|
2022-03-31T17:54:35.000Z
|
tests/cli/test_cli.py
|
fintzd/rasa
|
6359be5509c7d87cd29c2ab5149bc45e843fea85
|
[
"Apache-2.0"
] | 3,063
|
2019-04-16T15:23:52.000Z
|
2022-03-31T00:01:12.000Z
|
from pathlib import Path
from typing import Callable
from _pytest.pytester import RunResult, Testdir
import pytest
import sys
def test_cli_start_is_fast(testdir: Testdir):
"""
Checks that a call to ``rasa --help`` does not import any slow imports.
If this is failing this means, that a simple "rasa --help" commands imports
`tensorflow` which makes our CLI extremely slow. In case this test is failing
you've very likely added a global import of "tensorflow" which should be
avoided. Consider making this import (or the import of its parent module)
a local import.
If you are clueless where that import happens, you can run
```
python -X importtime -m rasa.__main__ --help 2> import.log
tuna import.log
```
to get the import chain.
(make sure to run with python >= 3.7, and install tune (pip install tuna))
"""
rasa_path = str(
(Path(__file__).parent / ".." / ".." / "rasa" / "__main__.py").absolute()
)
args = [sys.executable, "-X", "importtime", rasa_path, "--help"]
result = testdir.run(*args)
assert result.ret == 0
# tensorflow is slow -> can't get imported when running basic CLI commands
result.stderr.no_fnmatch_line("*tensorflow.python.eager")
def test_data_convert_help(run: Callable[..., RunResult]):
output = run("--help")
help_text = """usage: rasa [-h] [--version]
{init,run,shell,train,interactive,telemetry,test,visualize,data,export,x,evaluate}
..."""
lines = help_text.split("\n")
# expected help text lines should appear somewhere in the output
printed_help = set(output.outlines)
for line in lines:
assert line in printed_help
@pytest.mark.xfail(
sys.platform == "win32", reason="--version doesn't print anything on Windows"
)
def test_version_print_lines(run: Callable[..., RunResult]):
output = run("--version")
output_text = "".join(output.outlines)
assert "Rasa Version" in output_text
assert "Python Version" in output_text
assert "Operating System" in output_text
assert "Python Path" in output_text
| 33.650794
| 94
| 0.679245
|
780d8077ed4d0aa066d88f8ebd44a0e5d3e4aa03
| 5,256
|
py
|
Python
|
torchfilter/filters/_extended_information_filter.py
|
brentyi/torchfilter
|
da0250baf2197f59b6e67f37cafdd63015380cbb
|
[
"MIT"
] | 84
|
2020-09-08T07:33:04.000Z
|
2022-03-30T17:25:00.000Z
|
torchfilter/filters/_extended_information_filter.py
|
brentyi/diffbayes
|
da0250baf2197f59b6e67f37cafdd63015380cbb
|
[
"MIT"
] | 4
|
2020-11-03T14:32:11.000Z
|
2021-05-12T02:49:49.000Z
|
torchfilter/filters/_extended_information_filter.py
|
brentyi/diffbayes
|
da0250baf2197f59b6e67f37cafdd63015380cbb
|
[
"MIT"
] | 18
|
2020-11-04T22:20:55.000Z
|
2021-12-21T10:23:26.000Z
|
"""Private module; avoid importing from directly.
"""
from typing import cast
import fannypack
import torch
from overrides import overrides
from .. import types
from ..base import DynamicsModel, KalmanFilterBase, KalmanFilterMeasurementModel
class ExtendedInformationFilter(KalmanFilterBase):
"""Information form of a Kalman filter; generally equivalent to an EKF but
internally parameterizes uncertainties with the inverse covariance matrix.
For building estimators with more complex observation spaces (eg images), see
`VirtualSensorExtendedInformationFilter`.
"""
def __init__(
self,
*,
dynamics_model: DynamicsModel,
measurement_model: KalmanFilterMeasurementModel,
):
super().__init__(
dynamics_model=dynamics_model, measurement_model=measurement_model
)
# Parameterize posterior uncertainty with inverse covariance
self.information_vector: torch.Tensor
"""torch.Tensor: Information vector of our posterior; shape should be
`(N, state_dim)`."""
self.information_matrix: torch.Tensor
"""torch.Tensor: Information matrix of our posterior; shape should be
`(N, state_dim, state_dim)`."""
# overrides
@property
def belief_covariance(self) -> types.CovarianceTorch:
"""Posterior covariance. Shape should be `(N, state_dim, state_dim)`."""
return fannypack.utils.cholesky_inverse(torch.cholesky(self.information_matrix))
# overrides
@belief_covariance.setter
def belief_covariance(self, covariance: types.CovarianceTorch):
self.information_matrix = fannypack.utils.cholesky_inverse(
torch.cholesky(covariance)
)
@overrides
def _predict_step(self, *, controls: types.ControlsTorch) -> None:
# Get previous belief
prev_mean = self._belief_mean
prev_covariance = self.belief_covariance
N, state_dim = prev_mean.shape
# Compute mu_{t+1|t}, covariance, and Jacobian
pred_mean, dynamics_tril = self.dynamics_model(
initial_states=prev_mean, controls=controls
)
dynamics_covariance = dynamics_tril @ dynamics_tril.transpose(-1, -2)
dynamics_A_matrix = self.dynamics_model.jacobian(
initial_states=prev_mean, controls=controls
)
assert dynamics_covariance.shape == (N, state_dim, state_dim)
assert dynamics_A_matrix.shape == (N, state_dim, state_dim)
# Calculate Sigma_{t+1|t}
pred_information_matrix = fannypack.utils.cholesky_inverse(
torch.cholesky(
dynamics_A_matrix
@ prev_covariance
@ dynamics_A_matrix.transpose(-1, -2)
+ dynamics_covariance
)
)
pred_information_vector = (
pred_information_matrix @ pred_mean[:, :, None]
).squeeze(-1)
# Update internal state
self._belief_mean = pred_mean
self.information_matrix = pred_information_matrix
self.information_vector = pred_information_vector
@overrides
def _update_step(self, *, observations: types.ObservationsTorch) -> None:
# Extract/validate inputs
assert isinstance(
observations, types.ObservationsNoDictTorch
), "For standard EKF, observations must be tensor!"
observations = cast(types.ObservationsNoDictTorch, observations)
pred_mean = self._belief_mean
pred_information_matrix = self.information_matrix
pred_information_vector = self.information_vector
# Measurement model forward pass, Jacobian
observations_mean = observations
pred_observations, observations_tril = self.measurement_model(states=pred_mean)
observations_information = fannypack.utils.cholesky_inverse(observations_tril)
C_matrix = self.measurement_model.jacobian(states=pred_mean)
C_matrix_transpose = C_matrix.transpose(-1, -2)
assert observations_mean.shape == pred_observations.shape
# Check shapes
N, observation_dim = observations_mean.shape
assert observations_information.shape == (N, observation_dim, observation_dim)
assert observations_mean.shape == (N, observation_dim)
# Compute update
information_vector = pred_information_vector + (
C_matrix_transpose
@ observations_information
@ (
observations_mean[:, :, None]
- pred_observations[:, :, None]
+ C_matrix @ pred_mean[:, :, None]
)
).squeeze(-1)
assert information_vector.shape == (N, self.state_dim)
information_matrix = (
pred_information_matrix
+ C_matrix_transpose @ observations_information @ C_matrix
)
assert information_matrix.shape == (N, self.state_dim, self.state_dim)
# Update internal state
self.information_matrix = information_matrix
self.information_vector = information_vector
self._belief_mean = (
fannypack.utils.cholesky_inverse(torch.cholesky(information_matrix))
@ information_vector[:, :, None]
).squeeze(-1)
| 38.086957
| 88
| 0.670852
|
3700c9fc100ae36b7da2b47eba2b021b7259562b
| 38,637
|
py
|
Python
|
pythran/tests/test_numpy_func0.py
|
wizardxz/pythran
|
9a1b1c08cf9d3478be3b6313ac8ebca9e5b88e65
|
[
"BSD-3-Clause"
] | null | null | null |
pythran/tests/test_numpy_func0.py
|
wizardxz/pythran
|
9a1b1c08cf9d3478be3b6313ac8ebca9e5b88e65
|
[
"BSD-3-Clause"
] | null | null | null |
pythran/tests/test_numpy_func0.py
|
wizardxz/pythran
|
9a1b1c08cf9d3478be3b6313ac8ebca9e5b88e65
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from pythran.tests import TestEnv
import numpy
import sys
from pythran.typing import NDArray, List, Tuple
@TestEnv.module
class TestNumpyFunc0(TestEnv):
def test_extended_sum0(self):
self.run_test("def numpy_extended_sum0(a): import numpy ; return numpy.sum(a)",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum0=[NDArray[int,:,:,:,:]])
def test_extended_sum1(self):
self.run_test("def numpy_extended_sum1(a): import numpy ; return numpy.sum(a[1])",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum1=[NDArray[int,:,:,:,:]])
def test_extended_sum2(self):
self.run_test("def numpy_extended_sum2(a): import numpy ; return numpy.sum(a[1,0])",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum2=[NDArray[int,:,:,:,:]])
def test_extended_sum3(self):
self.run_test("def numpy_extended_sum3(a): import numpy ; return numpy.sum(a[1:-1])",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum3=[NDArray[int,:,:,:,:]])
def test_extended_sum4(self):
self.run_test("def numpy_extended_sum4(a): import numpy ; return numpy.sum(a[1:-1,0])",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum4=[NDArray[int,:,:,:,:]])
def test_extended_sum5(self):
self.run_test("def numpy_extended_sum5(a): import numpy ; return numpy.sum(a)",
numpy.arange(120).reshape((3,5,4,2)),
numpy_extended_sum5=[NDArray[int,:,:,:,:]])
def test_out_sum0(self):
self.run_test("def numpy_out_sum0(a, b): import numpy ; return numpy.sum(a, axis=0, out=b)",
numpy.arange(10).reshape((5,2)),
numpy.zeros(2, dtype=int),
numpy_out_sum0=[NDArray[int,:,:], NDArray[int,:]])
def test_out_sum1(self):
self.run_test("def numpy_out_sum1(a, b): import numpy ; return numpy.sum(a, axis=0, out=b)",
numpy.arange(10).reshape((5,2)),
numpy.ones(2, dtype=int),
numpy_out_sum1=[NDArray[int,:,:], NDArray[int,:]])
def test_out_sum2(self):
self.run_test("def numpy_out_sum2(a, b): import numpy ; return numpy.sum(a, axis=1, out=b)",
numpy.arange(10).reshape((5,2)),
numpy.zeros(5, dtype=int),
numpy_out_sum2=[NDArray[int,:,:], NDArray[int,:]])
def test_numpy_shape_as_function(self):
self.run_test("def numpy_shape_as_function(a): import numpy ; return numpy.shape(a)",
numpy.ones(3, numpy.int16),
numpy_shape_as_function=[NDArray[numpy.int16,:]])
def test_numpy_size_as_function(self):
self.run_test("def numpy_size_as_function(a): import numpy ; return numpy.size(a)",
numpy.ones(3, numpy.int16),
numpy_size_as_function=[NDArray[numpy.int16,:]])
def test_numpy_ndim_as_function(self):
self.run_test("def numpy_ndim_as_function(a): import numpy ; return numpy.ndim(a)",
numpy.ones(3, numpy.int16),
numpy_ndim_as_function=[NDArray[numpy.int16,:]])
def test_frexp0(self):
self.run_test("def np_frexp0(a): import numpy as np ; return np.frexp(a)", 1.5, np_frexp0=[float])
def test_frexp1(self):
self.run_test("def np_frexp1(a): import numpy as np ; return np.frexp(a)", numpy.array([1.1,2.2,3.3]), np_frexp1=[NDArray[float,:]])
def test_frexp2(self):
self.run_test("def np_frexp2(a): import numpy as np ; return np.frexp(a+a)", numpy.array([1.1,2.2,3.3]), np_frexp2=[NDArray[float,:]])
def test_ndindex0(self):
self.run_test("def np_ndindex0(): import numpy as np ; return [x for x in np.ndindex(5,6)]",
np_ndindex0=[])
def test_ndindex1(self):
self.run_test("def np_ndindex1(a): import numpy as np ; return [x for x in np.ndindex(a)]", 3, np_ndindex1=[int])
def test_ndindex2(self):
self.run_test("def np_ndindex2(n): import numpy as np ; return [x for x in np.ndindex((n,n))]", 3, np_ndindex2=[int])
def test_ndenumerate0(self):
self.run_test("def np_ndenumerate0(a): import numpy as np ; return [x for x in np.ndenumerate(a)]", numpy.array([[1, 2], [3, 4]]), np_ndenumerate0=[NDArray[int,:,:]])
def test_ndenumerate1(self):
self.run_test("def np_ndenumerate1(a): import numpy as np ; return [x for x in np.ndenumerate(a)]", numpy.array([1, 2, 3, 4]), np_ndenumerate1=[NDArray[int,:]])
def test_nansum0(self):
self.run_test("def np_nansum0(a): import numpy as np ; return np.nansum(a)" , numpy.array([[1, 2], [3, numpy.nan]]), np_nansum0=[NDArray[float,:,:]])
def test_nansum1(self):
self.run_test("def np_nansum1(a): import numpy as np ; return np.nansum(a)" , numpy.array([[1, 2], [numpy.NINF, numpy.nan]]), np_nansum1=[NDArray[float,:,:]])
def test_nansum2(self):
self.run_test("def np_nansum2(a): import numpy as np ; return np.nansum(a)", [1., numpy.nan], np_nansum2=[List[float]])
def test_nanmin0(self):
self.run_test("def np_nanmin0(a): import numpy as np ; return np.nanmin(a)" , numpy.array([[1, 2], [3, numpy.nan]]), np_nanmin0=[NDArray[float,:,:]])
def test_nanmin1(self):
self.run_test("def np_nanmin1(a): import numpy as np ; return np.nanmin(a)" , numpy.array([[1, 2], [numpy.NINF, numpy.nan]]), np_nanmin1=[NDArray[float,:,:]])
def test_nanmax0(self):
self.run_test("def np_nanmax0(a): import numpy as np ; return np.nanmax(a)" , numpy.array([[1, 2], [3, numpy.nan]]), np_nanmax0=[NDArray[float,:,:]])
def test_nanmax1(self):
self.run_test("def np_nanmax1(a): import numpy as np ; return np.nanmax(a)" , numpy.array([[1, 2], [numpy.inf, numpy.nan]]) , np_nanmax1=[NDArray[float,:,:]])
def test_np_residual(self):
self.run_test("""import numpy as np
def np_residual():
nx, ny, nz= 75, 75, 100
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
P = np.ones((nx, ny, nz), np.float64)
d2x = np.zeros_like(P)
d2y = np.zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y + 5*np.cosh(P).mean()**2
""", np_residual=[])
def test_np_func2(self):
self.run_test("""import numpy as np
def np_func2(x):
f = [x[0] * np.cos(x[1]) - 4,
x[1]*x[0] - x[1] - 5]
df = np.array([[np.cos(x[1]), -x[0] * np.sin(x[1])],
[x[1], x[0] - 1]])
return f, df
""", [1.0, 2.0, 3.0], np_func2=[List[float]])
def test_np_peval(self):
self.run_test("""import numpy
def np_peval(x, p):
return p[0]*numpy.sin(2*numpy.pi*p[1]*x+p[2])
""", 12., [1.0, 2.0, 3.0], np_peval=[float, List[float]])
def test_np_residuals(self):
self.run_test("""import numpy
def np_residuals():
x = numpy.arange(0,6e-2,6e-2/30)
A,k,theta = 10, 1.0/3e-2, numpy.pi/6
return A*numpy.sin(2*numpy.pi*k*x+theta)
""", np_residuals=[])
def test_np_func_deriv(self):
self.run_test("""import numpy
def np_func_deriv(x, sign=1.0):
dfdx0 = sign*(-2*x[0] + 2*x[1] + 2)
dfdx1 = sign*(2*x[0] - 4*x[1])
return numpy.array([ dfdx0, dfdx1 ])
""", [-1.0, 1.0], -1.0, np_func_deriv=[List[float], float])
def test_np_func(self):
self.run_test("""import numpy
def np_func(x, sign=1.0):
return sign*(2*x[0]*x[1] + 2*x[0] - x[0]**2 - 2*x[1]**2)
""", [-1.0, 1.0], -1.0, np_func=[List[float], float])
def test_rosen_hess_p(self):
self.run_test("""import numpy
def np_rosen_hess_p(x, p):
x = numpy.asarray(x)
Hp = numpy.zeros_like(x)
Hp[0] = (1200*x[0]**2 - 400*x[1] + 2)*p[0] - 400*x[0]*p[1]
Hp[1:-1] = -400*x[:-2]*p[:-2]+(202+1200*x[1:-1]**2-400*x[2:])*p[1:-1] \
-400*x[1:-1]*p[2:]
Hp[-1] = -400*x[-2]*p[-2] + 200*p[-1]
return Hp
""",
numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]),
numpy.array([2.3, 1.7, 1.8, 2.9, 2.2]),
np_rosen_hess_p=[NDArray[float,:], NDArray[float,:]])
def test_rosen_hess(self):
self.run_test("""import numpy
def np_rosen_hess(x):
x = numpy.asarray(x)
H = numpy.diag(-400*x[:-1],1) - numpy.diag(400*x[:-1],-1)
diagonal = numpy.zeros_like(x)
diagonal[0] = 1200*x[0]**2-400*x[1]+2
diagonal[-1] = 200
diagonal[1:-1] = 202 + 1200*x[1:-1]**2 - 400*x[2:]
H = H + numpy.diag(diagonal)
return H
""",
numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]),
np_rosen_hess=[NDArray[float,:]])
def test_rosen_der(self):
self.run_test("""import numpy
def np_rosen_der(x):
xm = x[1:-1]
xm_m1 = x[:-2]
xm_p1 = x[2:]
der = numpy.zeros_like(x)
der[1:-1] = 200*(xm-xm_m1**2) - 400*(xm_p1 - xm**2)*xm - 2*(1-xm)
der[0] = -400*x[0]*(x[1]-x[0]**2) - 2*(1-x[0])
der[-1] = 200*(x[-1]-x[-2]**2)
return der
""",
numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]),
np_rosen_der=[NDArray[float,:]])
def test_rosen(self):
self.run_test("import numpy\ndef np_rosen(x): return sum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)",
numpy.array([1.3, 0.7, 0.8, 1.9, 1.2]),
np_rosen=[NDArray[float,:]])
def test_nanargmax0(self):
self.run_test("def np_nanargmax0(a): from numpy import nanargmax; return nanargmax(a)", numpy.array([[numpy.nan, 4], [2, 3]]), np_nanargmax0=[NDArray[float,:,:]])
def test_nanargmin0(self):
self.run_test("def np_nanargmin0(a): from numpy import nanargmin ; return nanargmin(a)", numpy.array([[numpy.nan, 4], [2, 3]]), np_nanargmin0=[NDArray[float,:,:]])
def test_nan_to_num0(self):
self.run_test("def np_nan_to_num0(a): import numpy as np ; return np.nan_to_num(a)", numpy.array([numpy.inf, -numpy.inf, numpy.nan, -128, 128]), np_nan_to_num0=[NDArray[float,:]])
def test_median0(self):
self.run_test("def np_median0(a): from numpy import median ; return median(a)", numpy.array([[1, 2], [3, 4]]), np_median0=[NDArray[int,:,:]])
def test_median1(self):
self.run_test("def np_median1(a): from numpy import median ; return median(a)", numpy.array([1, 2, 3, 4,5]), np_median1=[NDArray[int,:]])
def test_mean0(self):
self.run_test("def np_mean0(a): from numpy import mean ; return mean(a)", numpy.array([[1, 2], [3, 4]]), np_mean0=[NDArray[int,:,:]])
def test_mean1(self):
self.run_test("def np_mean1(a): from numpy import mean ; return mean(a, 1)", numpy.array([[1, 2], [3, 4.]]), np_mean1=[NDArray[float,:,:]])
def test_mean2(self):
self.run_test("def np_mean2(a): from numpy import mean ; return mean(a)", numpy.array([[[1, 2], [3, 4.]]]), np_mean2=[NDArray[float,:,:,:]])
def test_mean3(self):
self.run_test("def np_mean3(a): from numpy import mean ; return mean(a, 0)", numpy.array([[[1, 2], [3, 4.]]]), np_mean3=[NDArray[float,:,:,:]])
def test_mean4(self):
self.run_test("def np_mean4(a): from numpy import mean ; return mean(a, 1)", numpy.array([[[1, 2], [3, 4.]]]), np_mean4=[NDArray[float,:,:,:]])
def test_mean5(self):
self.run_test("def np_mean5(a): from numpy import mean ; return mean(a, 2)", numpy.array([[[1, 2], [3, 4.]]]), np_mean5=[NDArray[float,:,:,:]])
def test_var0(self):
self.run_test("def np_var0(a): return a.var()", numpy.array([[1, 2], [3, 4]], dtype=float), np_var0=[NDArray[float,:,:]])
def test_var1(self):
self.run_test("def np_var1(a): from numpy import var ; return var(a, 1)", numpy.array([[1, 2], [3, 4.]]), np_var1=[NDArray[float,:,:]])
def test_var2(self):
self.run_test("def np_var2(a): from numpy import var ; return var(a)", numpy.array([[[1, 2], [3, 4.]]]), np_var2=[NDArray[float,:,:,:]])
def test_var3(self):
self.run_test("def np_var3(a): from numpy import var ; return var(a, 0)", numpy.array([[[1, 2], [3, 4.]]]), np_var3=[NDArray[float,:,:,:]])
def test_var4(self):
self.run_test("def np_var4(a): from numpy import var ; return var(a, 1)", numpy.array([[[1, 2], [3, 4.]]]), np_var4=[NDArray[float,:,:,:]])
def test_var5(self):
self.run_test("def np_var5(a): from numpy import var ; return var(a, 2)", numpy.array([[[1, 2], [3, 4.]]]), np_var5=[NDArray[float,:,:,:]])
def test_std0(self):
self.run_test("def np_std0(a): from numpy import std ; return std(a)", numpy.array([[[1, 2], [3, 4]]]), np_std0=[NDArray[int, :, :, :]])
def test_std1(self):
self.run_test("def np_std1(a): from numpy import std ; return std(a, 0)", numpy.array([[[1, 2], [3, 4]]]), np_std1=[NDArray[int, :, :, :]])
def test_std2(self):
self.run_test("def np_std2(a): from numpy import std ; return std(a, 1)", numpy.array([[[1, 2], [3, 4]]]), np_std2=[NDArray[int, :, :, :]])
def test_logspace0(self):
self.run_test("def np_logspace0(start, stop): from numpy import logspace ; start, stop = 3., 4. ; return logspace(start, stop, 4)", 3., 4., np_logspace0=[float, float])
def test_logspace1(self):
self.run_test("def np_logspace1(start, stop): from numpy import logspace ; return logspace(start, stop, 4, False)", 3., 4., np_logspace1=[float, float])
def test_logspace2(self):
self.run_test("def np_logspace2(start, stop): from numpy import logspace ; return logspace(start, stop, 4, True, 2.0)", 3., 4., np_logspace2=[float, float])
def test_lexsort0(self):
self.run_test("def np_lexsort0(surnames): from numpy import lexsort ; first_names = ('Heinrich', 'Galileo', 'Gustav') ; return lexsort((first_names, surnames))", ('Hertz', 'Galilei', 'Hertz'), np_lexsort0=[Tuple[str, str, str]])
def test_lexsort1(self):
self.run_test("def np_lexsort1(a): from numpy import lexsort ; b = [1,5,1,4,3,4,4] ; return lexsort((a,b))", [9,4,0,4,0,2,1], np_lexsort1=[List[int]])
def test_lexsort2(self):
self.run_test("def np_lexsort2(a): from numpy import lexsort ; return lexsort((a+1,a-1))", numpy.array([1,5,1,4,3,4,4]), np_lexsort2=[NDArray[int,:]])
def test_issctype0(self):
self.run_test("def np_issctype0(): from numpy import issctype, int32 ; a = int32 ; return issctype(a)", np_issctype0=[])
def test_issctype1(self):
self.run_test("def np_issctype1(): from numpy import issctype ; a = list ; return issctype(a)", np_issctype1=[])
def test_issctype2(self):
self.run_test("def np_issctype2(a): from numpy import issctype ; return issctype(a)", 3.1, np_issctype2=[float])
def test_isscalar0(self):
self.run_test("def np_isscalar0(a): from numpy import isscalar ; return isscalar(a)", 3.1, np_isscalar0=[float])
def test_isscalar1(self):
self.run_test("def np_isscalar1(a): from numpy import isscalar ; return isscalar(a)", [3.1], np_isscalar1=[List[float]])
def test_isscalar2(self):
self.run_test("def np_isscalar2(a): from numpy import isscalar ; return isscalar(a)", '3.1', np_isscalar2=[str])
def test_isrealobj0(self):
self.run_test("def np_isrealobj0(a): from numpy import isrealobj ; return isrealobj(a)", numpy.array([1,2,3.]), np_isrealobj0=[NDArray[float,:]])
def test_isrealobj1(self):
self.run_test("def np_isrealobj1(a): from numpy import isrealobj ; return isrealobj(a)", numpy.array([1,2,3.,4 + 1j]).reshape((2,2)), np_isrealobj1=[NDArray[complex,:,:]])
def test_isreal0(self):
self.run_test("def np_isreal0(a): from numpy import isreal ; return isreal(a)", numpy.array([1,2,3.]), np_isreal0=[NDArray[float,:]])
def test_isreal1(self):
self.run_test("def np_isreal1(a): from numpy import isreal ; return isreal(a)", numpy.array([1,2,3.,4 + 1j]).reshape((2,2)), np_isreal1=[NDArray[complex,:,:]])
def test_iscomplex0(self):
self.run_test("def np_iscomplex0(a): from numpy import iscomplex ; return iscomplex(a)", numpy.array([1, 2, 3.]), np_iscomplex0=[NDArray[float,:]])
def test_iscomplex1(self):
self.run_test("def np_iscomplex1(a): from numpy import iscomplex ; return iscomplex(a)", numpy.array([1,2,3.,4 + 1j]).reshape((2,2)), np_iscomplex1=[NDArray[complex,:,:]])
def test_intersect1d0(self):
self.run_test("def np_intersect1d0(a): from numpy import intersect1d ; b = [3, 1, 2, 1] ; return intersect1d(a,b)", [1, 3, 4, 3], np_intersect1d0=[List[int]])
def test_insert0(self):
self.run_test("def np_insert0(a): from numpy import insert ; return insert(a, 1, 5)", numpy.array([[1, 1], [2, 2], [3, 3]]), np_insert0=[NDArray[int,:,:]])
def test_insert1(self):
self.run_test("def np_insert1(a): from numpy import insert ; return insert(a, [1,2], [5,6])", numpy.array([[1, 1], [2, 2], [3, 3]]), np_insert1=[NDArray[int,:,:]])
def test_insert2(self):
self.run_test("def np_insert2(a): from numpy import insert ; return insert(a, [1,1], [5.2,6])", numpy.array([[1, 1], [2, 2], [3, 3]]), np_insert2=[NDArray[int,:,:]])
def test_inner0(self):
self.run_test("def np_inner0(x): from numpy import inner ; y = 3 ; return inner(x,y)", 2, np_inner0=[int])
def test_inner1(self):
self.run_test("def np_inner1(x): from numpy import inner ; y = [2, 3] ; return inner(x,y)", [2, 3], np_inner1=[List[int]])
def test_indices0(self):
self.run_test("def np_indices0(s): from numpy import indices ; return indices(s)", (2, 3), np_indices0=[Tuple[int, int]])
def test_identity0(self):
self.run_test("def np_identity0(a): from numpy import identity ; return identity(a)", 3, np_identity0=[int])
def test_identity1(self):
self.run_test("def np_identity1(a): from numpy import identity ;return identity(a)", 4, np_identity1=[int])
def test_fromstring0(self):
self.run_test("def np_fromstring0(a): from numpy import fromstring, uint8 ; return fromstring(a, uint8)", '\x01\x02', np_fromstring0=[str])
def test_fromstring1(self):
self.run_test("def np_fromstring1(a): from numpy import fromstring, uint8 ; a = '\x01\x02\x03\x04' ; return fromstring(a, uint8,3)", '\x01\x02\x03\x04', np_fromstring1=[str])
def test_fromstring2(self):
self.run_test("def np_fromstring2(a): from numpy import fromstring, uint32 ; return fromstring(a, uint32,-1, ' ')", '1 2 3 4', np_fromstring2=[str])
def test_fromstring3(self):
self.run_test("def np_fromstring3(a): from numpy import fromstring, uint32 ; return fromstring(a, uint32,2, ',')", '1,2, 3, 4', np_fromstring3=[str])
def test_outer0(self):
self.run_test("def np_outer0(x): from numpy import outer ; return outer(x, x+2)", numpy.arange(6).reshape(2,3), np_outer0=[NDArray[int,:,:]])
def test_outer1(self):
self.run_test("def np_outer1(x): from numpy import outer; return outer(x, range(6))", numpy.arange(6).reshape((2,3)), np_outer1=[NDArray[int,:,:]])
def test_place0(self):
self.run_test("def np_place0(x): from numpy import place, ravel ; place(x, x>1, ravel(x**2)); return x", numpy.arange(6).reshape((2,3)), np_place0=[NDArray[int,:,:]])
def test_place1(self):
self.run_test("def np_place1(x): from numpy import place ; place(x, x>1, [57, 58]); return x", numpy.arange(6).reshape((2,3)), np_place1=[NDArray[int,:,:]])
def test_product(self):
self.run_test("def np_product(x):\n from numpy import product\n return product(x)", numpy.arange(1, 10), np_product=[NDArray[int,:]])
def test_ptp0(self):
self.run_test("def np_ptp0(x): return x.ptp()", numpy.arange(4).reshape((2,2)), np_ptp0=[NDArray[int,:,:]])
def test_ptp1(self):
self.run_test("def np_ptp1(x): from numpy import ptp ; return ptp(x,0)", numpy.arange(4).reshape((2,2)), np_ptp1=[NDArray[int,:,:]])
def test_ptp2(self):
self.run_test("def np_ptp2(x): from numpy import ptp ; return ptp(x,1)", numpy.arange(4).reshape((2,2)), np_ptp2=[NDArray[int,:,:]])
def test_put0(self):
self.run_test("def np_put0(x): x.put([0,2], [-44, -55]); return x", numpy.arange(5), np_put0=[NDArray[int,:]])
def test_put1(self):
self.run_test("def np_put1(x): from numpy import put ; put(x, [0,2,3], [57, 58]); return x", numpy.arange(6).reshape((2, 3)), np_put1=[NDArray[int,:,:]])
def test_put2(self):
self.run_test("def np_put2(x): from numpy import put ; put(x, 2, 57); return x", numpy.arange(6).reshape((2,3)), np_put2=[NDArray[int,:,:]])
def test_putmask0(self):
self.run_test("def np_putmask0(x): from numpy import putmask ; putmask(x, x>1, x**2); return x", numpy.arange(6).reshape((2,3)), np_putmask0=[NDArray[int,:,:]])
def test_putmask1(self):
self.run_test("def np_putmask1(x): from numpy import putmask; putmask(x, x>1, [57, 58]); return x", numpy.arange(6).reshape((2,3)), np_putmask1=[NDArray[int,:,:]])
def test_ravel0(self):
self.run_test("def np_ravel0(x): from numpy import ravel ; return ravel(x)", numpy.arange(6).reshape((2,3)), np_ravel0=[NDArray[int,:,:]])
def test_ravel1(self):
self.run_test("def np_ravel1(x): return x.ravel()", numpy.arange(6).reshape((2,3)), np_ravel1=[NDArray[int,:,:]])
def test_repeat0(self):
self.run_test("def np_repeat0(x): from numpy import repeat; return repeat(x, 3)", numpy.arange(3), np_repeat0=[NDArray[int,:]])
def test_repeat1(self):
self.run_test("def np_repeat1(x): return x.repeat(3)", numpy.arange(6).reshape(2,3), np_repeat1=[NDArray[int,:,:]])
def test_repeat2(self):
self.run_test("def np_repeat2(x): from numpy import repeat; return repeat(x, 4, axis=0)", numpy.arange(6).reshape(2,3), np_repeat2=[NDArray[int,:,:]])
def test_repeat3(self):
self.run_test("def np_repeat3(x): from numpy import repeat; return repeat(x, 4, axis=1)", numpy.arange(6).reshape(2,3), np_repeat3=[NDArray[int,:,:]])
def test_resize4(self):
self.run_test("def np_resize4(x): from numpy import resize ; return resize(x, (6,7))", numpy.arange(24).reshape((2,3,4)), np_resize4=[NDArray[int, :, :, :]])
def test_resize3(self):
self.run_test("def np_resize3(x): from numpy import resize; return resize(x, (6,6))", numpy.arange(24).reshape((2,3,4)), np_resize3=[NDArray[int, :, :, :]])
def test_resize2(self):
self.run_test("def np_resize2(x): from numpy import resize; return resize(x, (3,3))", numpy.arange(24).reshape((2,3,4)), np_resize2=[NDArray[int, :, :, :]])
def test_resize1(self):
self.run_test("def np_resize1(x): from numpy import resize; return resize(x, 32)", numpy.arange(24), np_resize1=[NDArray[int,:]])
def test_resize0(self):
self.run_test("def np_resize0(x): from numpy import resize; return resize(x, 12)", numpy.arange(24), np_resize0=[NDArray[int,:]])
def test_rollaxis3(self):
self.run_test("def np_rollaxis3(x): from numpy import rollaxis; return rollaxis(x, 0, 3)", numpy.arange(24).reshape((2,3,4)), np_rollaxis3=[NDArray[int, :, :, :]])
def test_rollaxis2(self):
self.run_test("def np_rollaxis2(x): from numpy import rollaxis; return rollaxis(x, 2)", numpy.arange(24).reshape((2,3,4)), np_rollaxis2=[NDArray[int, :, :, :]])
def test_rollaxis1(self):
self.run_test("def np_rollaxis1(x): from numpy import rollaxis; return rollaxis(x, 1, 2)", numpy.arange(24).reshape(2,3,4), np_rollaxis1=[NDArray[int, :, :, :]])
def test_rollaxis0(self):
self.run_test("def np_rollaxis0(x): from numpy import rollaxis; return rollaxis(x, 1)", numpy.arange(24).reshape(2,3,4), np_rollaxis0=[NDArray[int, :, :, :]])
def test_roll6(self):
self.run_test("def np_roll6(x): from numpy import roll; return roll(x[:,:,:-1], -1, 2)", numpy.arange(24).reshape(2,3,4), np_roll6=[NDArray[int, :, :, :]])
def test_roll5(self):
self.run_test("def np_roll5(x): from numpy import roll; return roll(x, -1, 2)", numpy.arange(24).reshape(2,3,4), np_roll5=[NDArray[int, :, :, :]])
def test_roll4(self):
self.run_test("def np_roll4(x): from numpy import roll; return roll(x, 1, 1)", numpy.arange(24).reshape(2,3,4), np_roll4=[NDArray[int, :, :, :]])
def test_roll3(self):
self.run_test("def np_roll3(x): from numpy import roll; return roll(x, -1, 0)", numpy.arange(24).reshape(2,3,4), np_roll3=[NDArray[int, :, :, :]])
def test_roll2(self):
self.run_test("def np_roll2(x): from numpy import roll; return roll(x, -1)", numpy.arange(24).reshape(2,3,4), np_roll2=[NDArray[int, :, :, :]])
def test_roll1(self):
self.run_test("def np_roll1(x): from numpy import roll; return roll(x, 10)", numpy.arange(24).reshape(2,3,4), np_roll1=[NDArray[int, :, :, :]])
def test_roll0(self):
self.run_test("def np_roll0(x): from numpy import roll; return roll(x, 3)", numpy.arange(24).reshape(2,3,4), np_roll0=[NDArray[int, :, :, :]])
def test_searchsorted3(self):
self.run_test("def np_searchsorted3(x): from numpy import searchsorted; return searchsorted(x, [[3,4],[1,87]])", numpy.arange(6), np_searchsorted3=[NDArray[int,:]])
def test_searchsorted2(self):
self.run_test("def np_searchsorted2(x): from numpy import searchsorted; return searchsorted(x, [[3,4],[1,87]], 'right')", numpy.arange(6), np_searchsorted2=[NDArray[int,:]])
def test_searchsorted1(self):
self.run_test("def np_searchsorted1(x): from numpy import searchsorted; return searchsorted(x, 3)", numpy.arange(6), np_searchsorted1=[NDArray[int,:]])
def test_searchsorted0(self):
self.run_test("def np_searchsorted0(x): from numpy import searchsorted; return searchsorted(x, 3, 'right')", numpy.arange(6), np_searchsorted0=[NDArray[int,:]])
def test_rank1(self):
self.run_test("def np_rank1(x): from numpy import rank; return rank(x)", numpy.arange(24).reshape(2,3,4), np_rank1=[NDArray[int, :, :, :]])
def test_rank0(self):
self.run_test("def np_rank0(x): from numpy import rank; return rank(x)", numpy.arange(6), np_rank0=[NDArray[int,:]])
def test_rot904(self):
self.run_test("def np_rot904(x): from numpy import rot90; return rot90(x, 4)", numpy.arange(24).reshape(2,3,4), np_rot904=[NDArray[int, :, :, :]])
def test_rot903(self):
self.run_test("def np_rot903(x): from numpy import rot90; return rot90(x, 2)", numpy.arange(24).reshape(2,3,4), np_rot903=[NDArray[int, :, :, :]])
def test_rot902(self):
self.run_test("def np_rot902(x): from numpy import rot90; return rot90(x, 3)", numpy.arange(24).reshape(2,3,4), np_rot902=[NDArray[int, :, :, :]])
def test_rot900(self):
self.run_test("def np_rot900(x): from numpy import rot90; return rot90(x)", numpy.arange(24).reshape(2,3,4), np_rot900=[NDArray[int, :, :, :]])
def test_rot901(self):
self.run_test("def np_rot901(x): from numpy import rot90; return rot90(x)", numpy.arange(4).reshape(2,2), np_rot901=[NDArray[int,:,:]])
def test_select2(self):
self.run_test("def np_select2(x): from numpy import select; condlist = [x<3, x>5]; choicelist = [x**3, x**2]; return select(condlist, choicelist)", numpy.arange(10).reshape(2,5), np_select2=[NDArray[int,:,:]])
def test_select1(self):
self.run_test("def np_select1(x): from numpy import select; condlist = [x<3, x>5]; choicelist = [x+3, x**2]; return select(condlist, choicelist)", numpy.arange(10), np_select1=[NDArray[int,:]])
def test_select0(self):
self.run_test("def np_select0(x): from numpy import select; condlist = [x<3, x>5]; choicelist = [x, x**2]; return select(condlist, choicelist)", numpy.arange(10), np_select0=[NDArray[int,:]])
def test_sometrue0(self):
self.run_test("def np_sometrue0(a): from numpy import sometrue ; return sometrue(a)", numpy.array([[True, False], [True, True]]), np_sometrue0=[NDArray[bool,:,:]])
def test_sometrue1(self):
self.run_test("def np_sometrue1(a): from numpy import sometrue ; return sometrue(a, 0)", numpy.array([[True, False], [False, False]]), np_sometrue1=[NDArray[bool,:,:]])
def test_sometrue2(self):
self.run_test("def np_sometrue2(a): from numpy import sometrue ; return sometrue(a)", [-1, 0, 5], np_sometrue2=[List[int]])
def test_sort0(self):
self.run_test("def np_sort0(a): from numpy import sort ; return sort(a)", numpy.array([[1,6],[7,5]]), np_sort0=[NDArray[int,:,:]])
def test_sort1(self):
self.run_test("def np_sort1(a): from numpy import sort ; return sort(a)", numpy.array([2, 1, 6, 3, 5]), np_sort1=[NDArray[int,:]])
def test_sort2(self):
self.run_test("def np_sort2(a): from numpy import sort ; return sort(a)", numpy.arange(2*3*4, 0, -1).reshape(2,3,4), np_sort2=[NDArray[int, :, :, :]])
def test_sort3(self):
self.run_test("def np_sort3(a): from numpy import sort ; return sort(a, 0)", numpy.arange(2*3*4, 0, -1).reshape(2,3,4), np_sort3=[NDArray[int, :, :, :]])
def test_sort4(self):
self.run_test("def np_sort4(a): from numpy import sort ; return sort(a, 1)", numpy.arange(2*3*4, 0, -1).reshape(2,3,4), np_sort4=[NDArray[int, :, :, :]])
def test_sort_complex0(self):
self.run_test("def np_sort_complex0(a): from numpy import sort_complex ; return sort_complex(a)", numpy.array([[1,6],[7,5]]), np_sort_complex0=[NDArray[int,:,:]])
def test_sort_complex1(self):
self.run_test("def np_sort_complex1(a): from numpy import sort_complex ; return sort_complex(a)", numpy.array([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]), np_sort_complex1=[NDArray[complex,:]])
def test_split0(self):
self.run_test("def np_split0(a): from numpy import split,array2string ; return map(array2string,split(a, 3))", numpy.arange(12), np_split0=[NDArray[int,:]])
def test_split1(self):
self.run_test("def np_split1(a):\n from numpy import split\n try:\n print(split(a, 5))\n return False\n except ValueError:\n return True", numpy.arange(12), np_split1=[NDArray[int,:]])
def test_split2(self):
self.run_test("def np_split2(a): from numpy import split, array2string; return map(array2string,split(a, [0,1,5]))", numpy.arange(12).reshape(6,2), np_split2=[NDArray[int,:,:]])
@unittest.skip("Require numpy_fexpr for multidim array")
def test_take0(self):
self.run_test("def np_take0(a):\n from numpy import take\n return take(a, [0,1])", numpy.arange(24).reshape(2,3,4), np_take0=[NDArray[int, :, :, :]])
@unittest.skip("Require numpy_fexpr for multidim array")
def test_take1(self):
self.run_test("def np_take1(a):\n from numpy import take\n return take(a, [[0,0,2,2],[1,0,1,2]])", numpy.arange(24).reshape(2,3,4), np_take1=[NDArray[int, :, :, :]])
@unittest.skip("Require numpy_fexpr with indices")
def test_take2(self):
self.run_test("def np_take2(a):\n from numpy import take\n return take(a, [1,0,1,2])", numpy.arange(24), np_take2=[NDArray[int,:]])
def test_swapaxes_(self):
self.run_test("def np_swapaxes_(a):\n from numpy import swapaxes\n return swapaxes(a, 1, 2)", numpy.arange(24).reshape(2,3,4), np_swapaxes_=[NDArray[int, :, :, :]])
def test_tile0(self):
self.run_test("def np_tile0(a): from numpy import tile ; return tile(a, 3)", numpy.arange(4), np_tile0=[NDArray[int,:]])
def test_tile1(self):
self.run_test("def np_tile1(a): from numpy import tile ; return tile(a, (3, 2))", numpy.arange(4), np_tile1=[NDArray[int,:]])
def test_tolist0(self):
self.run_test("def np_tolist0(a): return a.tolist()", numpy.arange(12), np_tolist0=[NDArray[int,:]])
def test_tolist1(self):
self.run_test("def np_tolist1(a): return a.tolist()", numpy.arange(12).reshape(3,4), np_tolist1=[NDArray[int,:,:]])
def test_tolist2(self):
self.run_test("def np_tolist2(a): return a.tolist()", numpy.arange(2*3*4*5).reshape(2,3,4,5), np_tolist2=[NDArray[int, :, :, :, :]])
@unittest.skipIf(sys.version_info.major == 3, "Not supported in Pythran3")
def test_tostring0(self):
self.run_test("def np_tostring0(a): return a.tostring()", numpy.arange(80, 100), np_tostring0=[NDArray[int,:]])
@unittest.skipIf(sys.version_info.major == 3, "Not supported in Pythran3")
def test_tostring1(self):
self.run_test("def np_tostring1(a): return a.tostring()", numpy.arange(500, 600), np_tostring1=[NDArray[int,:]])
def test_fromiter0(self):
self.run_test("def g(): yield 1 ; yield 2\ndef np_fromiter0(): from numpy import fromiter, float32 ; iterable = g() ; return fromiter(iterable, float32)", np_fromiter0=[])
def test_fromiter1(self):
self.run_test("def np_fromiter1(): from numpy import fromiter, float32 ; iterable = (x*x for x in range(5)) ; return fromiter(iterable, float32, 5)", np_fromiter1=[])
def test_fromiter2(self):
self.run_test("def np_fromiter2(): from numpy import fromiter, float64 ; iterable = (x-x for x in range(5)) ; return fromiter(iterable, count=2, dtype=float64)", np_fromiter2=[])
def test_fromfunction0(self):
self.run_test("def np_fromfunction0(s): from numpy import fromfunction ; return fromfunction(lambda i: i == 1, s)", (3,), np_fromfunction0=[Tuple[int]])
def test_fromfunction1(self):
self.run_test("def np_fromfunction1(s): from numpy import fromfunction; return fromfunction(lambda i, j: i + j, s)", (3, 3), np_fromfunction1=[Tuple[int, int]])
def test_flipud0(self):
self.run_test("def np_flipud0(x): from numpy import flipud ; return flipud(x)", numpy.arange(9).reshape(3,3), np_flipud0=[NDArray[int,:,:]])
def test_fliplr0(self):
self.run_test("def np_fliplr0(x): from numpy import fliplr ; return fliplr(x)", numpy.arange(9).reshape(3,3), np_fliplr0=[NDArray[int,:,:]])
def test_flip3(self):
self.run_test("def np_flip3(x): from numpy import flip; return flip(x[:,:,:-1], 2)", numpy.arange(24).reshape(2,3,4), np_flip3=[NDArray[int, :, :, :]])
def test_flip2(self):
self.run_test("def np_flip2(x): from numpy import flip; return flip(x, 2)", numpy.arange(24).reshape(2,3,4), np_flip2=[NDArray[int, :, :, :]])
def test_flip1(self):
self.run_test("def np_flip1(x): from numpy import flip; return flip(x, 1)", numpy.arange(24).reshape(2,3,4), np_flip1=[NDArray[int, :, :, :]])
def test_flip0(self):
self.run_test("def np_flip0(x): from numpy import flip; return flip(x, 0)", numpy.arange(24).reshape(2,3,4), np_flip0=[NDArray[int, :, :, :]])
def test_flatten0(self):
self.run_test("def np_flatten0(x): return x.flatten()", numpy.array([[1,2], [3,4]]), np_flatten0=[NDArray[int,:,:]])
def test_flatnonzero0(self):
self.run_test("def np_flatnonzero0(x): from numpy import flatnonzero ; return flatnonzero(x)", numpy.arange(-2, 3), np_flatnonzero0=[NDArray[int,:]])
def test_flatnonzero1(self):
self.run_test("def np_flatnonzero1(x): from numpy import flatnonzero ; return flatnonzero(x[1:-1])", numpy.arange(-2, 3), np_flatnonzero1=[NDArray[int,:]])
def test_fix0(self):
self.run_test("def np_fix0(x): from numpy import fix ; return fix(x)", 3.14, np_fix0=[float])
def test_fix1(self):
self.run_test("def np_fix1(x): from numpy import fix ; return fix(x)", 3, np_fix1=[int])
def test_fix2(self):
self.run_test("def np_fix2(x): from numpy import fix ; return fix(x)", numpy.array([2.1, 2.9, -2.1, -2.9]), np_fix2=[NDArray[float,:]])
def test_fix3(self):
self.run_test("def np_fix3(x): from numpy import fix ; return fix(x)", numpy.array([2.1, 2.9, -2.1, -2.9]), np_fix3=[NDArray[float,:]])
def test_fix4(self):
self.run_test("def np_fix4(x): from numpy import fix ; return fix(x+x)", numpy.array([2.1, 2.9, -2.1, -2.9]), np_fix4=[NDArray[float,:]])
def test_finfo0(self):
self.run_test("def np_finfo0(): from numpy import finfo, float64 ; x = finfo(float64) ; return x.eps", np_finfo0=[])
def test_fill0(self):
self.run_test("def np_fill0(x): x.fill(5) ; return x", numpy.ones((2, 3)), np_fill0=[NDArray[float,:,:]])
def test_eye0(self):
self.run_test("def np_eye0(x): from numpy import eye ; return eye(x)", 2, np_eye0=[int])
def test_eye1(self):
self.run_test("def np_eye1(x): from numpy import eye ; return eye(x, x+1)", 2, np_eye1=[int])
def test_eye1b(self):
self.run_test("def np_eye1b(x): from numpy import eye ; return eye(x, x-1)", 3, np_eye1b=[int])
def test_eye2(self):
self.run_test("def np_eye2(x): from numpy import eye ; return eye(x, x, 1)", 2, np_eye2=[int])
def test_eye3(self):
self.run_test("def np_eye3(x): from numpy import eye, int32 ; return eye(x, x, 1, int32)", 2, np_eye3=[int])
def test_eye4(self):
self.run_test("def np_eye4(x): from numpy import eye, uint32 ; return eye(x, dtype=uint32)", 2, np_eye4=[int])
def test_ediff1d0(self):
self.run_test("def np_ediff1d0(x): from numpy import ediff1d ; return ediff1d(x)", [1,2,4,7,0], np_ediff1d0=[List[int]])
def test_ediff1d1(self):
self.run_test("def np_ediff1d1(x): from numpy import ediff1d ; return ediff1d(x)", [[1,2,4],[1,6,24]], np_ediff1d1=[List[List[int]]])
def test_print_slice(self):
self.run_test("def np_print_slice(a): print(a[:-1])", numpy.arange(12), np_print_slice=[NDArray[int,:]])
def test_print_expr(self):
self.run_test("def np_print_expr(a): print(a * 2)", numpy.arange(12), np_print_expr=[NDArray[int,:]])
def test_broadcast_to0(self):
self.run_test("def np_broadcast_to0(a, s): import numpy as np; return np.broadcast_to(a, s)", numpy.arange(12), (4, 12), np_broadcast_to0=[NDArray[int,:], Tuple[int, int]])
def test_broadcast_to1(self):
self.run_test("def np_broadcast_to1(a, s): import numpy as np; return np.broadcast_to(a, s)", numpy.arange(1), (4, 12), np_broadcast_to1=[NDArray[int,:], Tuple[int, int]])
def test_broadcast_to2(self):
self.run_test("def np_broadcast_to2(a, s): import numpy as np; return np.broadcast_to(a, s)", 5., (12, 2), np_broadcast_to2=[float, Tuple[int, int]])
| 54.341772
| 239
| 0.628801
|
38be5b690c82f63d34d7ad782142bf3bec45a076
| 35,655
|
py
|
Python
|
care/care/doctype/order_receiving/order_receiving.py
|
mohsinalimat/care
|
5b2f85839c5fa9882eb0d0097825e149402a6a8c
|
[
"MIT"
] | null | null | null |
care/care/doctype/order_receiving/order_receiving.py
|
mohsinalimat/care
|
5b2f85839c5fa9882eb0d0097825e149402a6a8c
|
[
"MIT"
] | null | null | null |
care/care/doctype/order_receiving/order_receiving.py
|
mohsinalimat/care
|
5b2f85839c5fa9882eb0d0097825e149402a6a8c
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, RF and contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.utils import nowdate, getdate, cstr
from frappe.model.document import Document
import json
from frappe.utils import flt
from erpnext.controllers.taxes_and_totals import get_itemised_tax_breakup_data
from erpnext.stock.get_item_details import _get_item_tax_template, get_conversion_factor, get_item_tax_map
from erpnext.controllers.accounts_controller import get_taxes_and_charges
from care.hook_events.purchase_invoice import get_price_list_rate_for
from erpnext.controllers.accounts_controller import get_default_taxes_and_charges
from erpnext.accounts.doctype.pricing_rule.pricing_rule import apply_pricing_rule
class OrderReceiving(Document):
def validate(self):
if self.get("__islocal"):
self.status = 'Draft'
self.update_total_margin()
# self.set_missing_value()
@frappe.whitelist()
def update_total_margin(self):
for res in self.items:
if self.is_return:
if res.qty != res.received_qty and not res.code:
frappe.throw("Return qty is not equal to Received qty in row <b>{0}</b>. <span style='color:red'>please split the Qty.</span>".format(res.idx))
margin = -100
if res.selling_price_list_rate > 0:
margin = (res.selling_price_list_rate - res.rate) / res.selling_price_list_rate * 100
res.margin_percent = margin
res.discount_after_rate = round(res.amount, 2) / res.qty
self.calculate_item_level_tax_breakup()
def on_cancel(self):
frappe.db.set(self, 'status', 'Cancelled')
def on_submit(self):
if len(self.items) <= 100 and self.warehouse:
self.updated_price_list_and_dicsount()
make_purchase_invoice(self)
if self.is_return:
frappe.db.set(self, 'status', 'Return')
else:
frappe.db.set(self, 'status', 'Submitted')
elif len(self.items) <= 50:
self.updated_price_list_and_dicsount()
make_purchase_invoice(self)
if self.is_return:
frappe.db.set(self, 'status', 'Return')
else:
frappe.db.set(self, 'status', 'Submitted')
else:
self.ignore_un_order_item = 1
self.updated_price_list_and_dicsount()
frappe.enqueue(make_purchase_invoice, doc=self, queue='long')
frappe.db.set(self, 'status', 'Queue')
self.update_p_r_c_tool_status()
@frappe.whitelist()
def create_purchase_receipt(self):
make_purchase_invoice(self)
@frappe.whitelist()
def check_purchase_receipt_created(self):
result = frappe.db.sql("""select name from `tabPurchase Receipt`
where order_receiving = '{0}'""".format(self.name))
if result:
return True
else:
return False
def calculate_item_level_tax_breakup(self):
if self:
itemised_tax, itemised_taxable_amount = get_itemised_tax_breakup_data(self)
if itemised_tax:
for res in self.items:
total = 0
if res.item_code in itemised_tax.keys():
for key in itemised_tax[res.item_code].keys():
if 'Sales Tax' in key:
res.sales_tax = flt(itemised_tax[res.item_code][key]['tax_amount']) if \
itemised_tax[res.item_code][key]['tax_amount'] else 0
total += flt(res.sales_tax)
if 'Further Tax' in key:
res.further_tax = flt(itemised_tax[res.item_code][key]['tax_amount']) if \
itemised_tax[res.item_code][key]['tax_amount'] else 0
total += flt(res.further_tax)
if 'Advance Tax' in key:
res.advance_tax = flt(itemised_tax[res.item_code][key]['tax_amount']) if \
itemised_tax[res.item_code][key]['tax_amount'] else 0
res.total_include_taxes = flt(res.sales_tax + res.further_tax + res.advance_tax) + res.amount
else:
for res in self.items:
res.sales_tax = res.further_tax = res.advance_tax = res.total_include_taxes = 0
@frappe.whitelist()
def get_item_code(self):
i_lst = []
select_item_list = []
if self.purchase_request:
for res in self.items:
select_item_list.append(res.item_code)
result = frappe.db.sql("""select distinct pi.item_code from `tabPurchase Request Item` as pi
inner join `tabPurchase Request` as p on p.name = pi.parent
where p.name = '{0}'""".format(self.purchase_request), as_dict=True)
for res in result:
if res.get('item_code') not in select_item_list:
i_lst.append(res.get('item_code'))
return i_lst
def updated_price_list_and_dicsount(self):
if self.update_buying_price or self.update_selling_price:
for res in self.items:
if self.update_buying_price and res.rate != res.base_buying_price_list_rate:
buying_price_list = frappe.get_value("Item Price", {'item_code': res.item_code,
'price_list': self.buying_price_list,
'buying': 1}, ['name'])
if buying_price_list:
item_price = frappe.get_doc("Item Price", buying_price_list)
item_price.price_list_rate = res.rate / res.conversion_factor
item_price.save(ignore_permissions=True)
if self.update_selling_price and res.selling_price_list_rate != res.base_selling_price_list_rate:
selling_price_list = frappe.get_value("Item Price", {'item_code': res.item_code,
'price_list': self.base_selling_price_list,
'selling': 1}, ['name'])
if selling_price_list:
item_price = frappe.get_doc("Item Price", selling_price_list)
item_price.price_list_rate = res.selling_price_list_rate / res.conversion_factor
item_price.save(ignore_permissions=True)
if self.update_discount and res.discount_percent:
query = """select p.name from `tabPricing Rule` as p
inner join `tabPricing Rule Item Code` as pi on pi.parent = p.name
where p.apply_on = 'Item Code'
and p.disable = 0
and p.price_or_product_discount = 'Price'
and p.applicable_for = 'Supplier'
and p.supplier = '{0}'
and pi.item_code = '{1}'
""".format(self.supplier, res.item_code)
query += """ and valid_from <= '{0}' order by valid_from desc limit 1""".format(nowdate())
result = frappe.db.sql(query)
if result:
p_rule = frappe.get_doc("Pricing Rule", result[0][0])
text = ""
# if res.discount:
# text = f"""Updated Discount Amount {p_rule.discount_amount} to {res.discount} From Order Receiving"""
# p_rule.rate_or_discount = 'Discount Amount'
# p_rule.discount_amount = res.discount
if float(res.discount_percent) != float(p_rule.discount_percentage):
text = f"""Updated Discount Percentage {p_rule.discount_percentage}
to {res.discount_percent} From Order Receiving"""
p_rule.rate_or_discount = 'Discount Percentage'
p_rule.discount_percentage = res.discount_percent
p_rule.save(ignore_permissions=True)
p_rule.add_comment(comment_type='Info', text=text, link_doctype=p_rule.doctype,
link_name=p_rule.name)
else:
priority = 1
p_rule = frappe.new_doc("Pricing Rule")
p_rule.title = 'Discount'
p_rule.apply_on = 'Item Code'
p_rule.price_or_product_discount = 'Price'
p_rule.currency = self.currency
p_rule.applicable_for = "Supplier"
p_rule.supplier = self.supplier
p_rule.buying = 1
p_rule.priority = priority
p_rule.valid_from = nowdate()
p_rule.append("items", {'item_code': res.item_code})
# if res.discount:
# p_rule.rate_or_discount = 'Discount Amount'
# p_rule.discount_amount = res.discount
# if res.discount_percent:
p_rule.rate_or_discount = 'Discount Percentage'
p_rule.discount_percentage = res.discount_percent
p_rule.save(ignore_permissions=True)
text = "Pricing Rule created from Order Receiving"
p_rule.add_comment(comment_type='Info', text=text, link_doctype=p_rule.doctype,
link_name=p_rule.name)
def set_missing_value(self):
if self:
self.company = frappe.defaults.get_defaults().company
self.buying_price_list = frappe.defaults.get_defaults().buying_price_list
self.currency = frappe.defaults.get_defaults().currency
self.base_selling_price_list = frappe.defaults.get_defaults().selling_price_list
taxes_and_charges = get_default_taxes_and_charges(master_doctype='Purchase Taxes and Charges Template',
company=self.company)
self.taxes_and_charges = taxes_and_charges.get('taxes_and_charges')
taxes = get_taxes_and_charges('Purchase Taxes and Charges Template', self.taxes_and_charges)
if not self.get('taxes'):
for tax in taxes:
self.append('taxes', tax)
if not self.posting_date:
self.posting_date = nowdate()
for itm in self.get('items'):
itm.conversion_factor = get_conversion_factor(itm.item_code, itm.uom).get('conversion_factor')
amt = itm.rate * itm.qty
if itm.discount > 0:
discount_percent = (itm.discount / amt) * 100
itm.discount_percent = discount_percent
discount_amount = (amt / 100) * itm.discount_percent
amount = amt - discount_amount
dis_aft_rate = amount / itm.qty
itm.amount = amount
itm.net_amount = amount
itm.base_net_amount = amount
itm.discount = discount_amount
itm.discount_after_rate = dis_aft_rate
args = {
'item_code': itm.item_code,
'supplier': self.supplier,
'currency': self.currency,
'price_list': self.buying_price_list,
'price_list_currency': self.currency,
'company': self.company,
'transaction_date': self.posting_date,
'doctype': self.doctype,
'name': self.name,
'qty': itm.qty or 1,
'net_rate': itm.rate,
'child_docname': itm.name,
'uom': itm.uom,
'stock_uom': itm.stock_uom,
'conversion_factor': itm.conversion_factor
}
buying_rate = get_price_list_rate_for(itm.item_code, json.dumps(args)) or 0
itm.base_buying_price_list_rate = buying_rate
args['price_list'] = self.base_selling_price_list
selling_rate = get_price_list_rate_for(itm.item_code, json.dumps(args)) or 0
itm.selling_price_list_rate = selling_rate
itm.base_selling_price_list_rate = selling_rate
itm.item_tax_template = get_item_tax_template(itm.item_code, json.dumps(args))
itm.item_tax_rate = get_item_tax_map(self.company, itm.item_tax_template)
def update_p_r_c_tool_status(self):
if self.purchase_invoice_creation_tool:
prc_doc = frappe.get_doc("Purchase Invoice Creation Tool", self.purchase_invoice_creation_tool)
prc_doc.status = "Order Created"
prc_doc.db_update()
@frappe.whitelist()
def get_item_filter(self):
itm_lst = []
items = frappe.db.sql("select item_code from `tabOrder Receiving Item` where parent = '{0}'".format(self.name), as_dict=True)
for res in items:
itm_lst.append(res.item_code)
return itm_lst
def make_purchase_invoice(doc):
material_demand = frappe.get_list("Material Demand",
{'supplier': doc.supplier, 'purchase_request': doc.purchase_request}, ['name'])
m_list = []
for res in material_demand:
m_list.append(res.name)
if doc.items:
if doc.warehouse:
is_franchise = frappe.get_value("Warehouse", {'name': doc.warehouse}, "is_franchise")
cost_center = frappe.get_value("Warehouse", {'name': doc.warehouse}, "cost_center")
pi = frappe.new_doc("Purchase Receipt")
pi.supplier = doc.supplier
pi.posting_date = nowdate()
pi.due_date = nowdate()
pi.company = doc.company
pi.taxes_and_charges = doc.taxes_and_charges
pi.order_receiving = doc.name
pi.update_stock = 1 if not is_franchise else 0
pi.set_warehouse = doc.warehouse
pi.cost_center = cost_center
pi.ignore_pricing_rule = 1
pi.is_return = doc.is_return
if doc.is_return:
pr_rec = frappe.get_value("Purchase Receipt", {"order_receiving": doc.return_ref,'set_warehouse': doc.warehouse}, "name")
pi.return_against = pr_rec
for d in doc.items:
md_item = frappe.get_value("Material Demand Item",
{'item_code': d.get('item_code'), 'parent': ['in', m_list],
"warehouse": doc.warehouse}, "name")
if md_item:
md_doc = frappe.get_doc("Material Demand Item", md_item)
pi.append("items", {
"item_code": d.get('item_code'),
"warehouse": md_doc.warehouse,
"qty": 0 - d.get('qty') if doc.is_return else d.get('qty'),
"received_qty": 0 - d.get('qty') if doc.is_return else d.get('qty'),
"rate": d.get('discount_after_rate'),
"expense_account": md_doc.expense_account,
"cost_center": md_doc.cost_center,
"uom": md_doc.uom,
"item_tax_template": d.get('item_tax_template'),
"item_tax_rate": d.get('item_tax_rate'),
"stock_Uom": md_doc.stock_uom,
"material_demand": md_doc.parent,
"material_demand_item": md_doc.name,
"order_receiving_item": d.name,
"margin_type": "Percentage" if d.get("discount_percent") else None,
"discount_percentage": d.get("discount_percent"),
})
else:
if not doc.ignore_un_order_item:
frappe.throw(_("Item <b>{0}</b> not found in Material Demand").format(d.get('item_code')))
if pi.get('items'):
taxes = get_taxes_and_charges('Purchase Taxes and Charges Template', doc.taxes_and_charges)
for tax in taxes:
pi.append('taxes', tax)
pi.set_missing_values()
for res in pi.items:
if res.order_receiving_item:
if not frappe.get_value("Order Receiving Item", res.order_receiving_item, 'item_tax_template'):
res.item_tax_template = None
res.item_tax_rate = '{}'
pi.insert(ignore_permissions=True)
else:
item_details = {}
for d in doc.items:
if d.code:
data = json.loads(d.code)
for res in data:
if res.get('qty') > 0:
md_item = frappe.get_list("Material Demand Item", {'item_code': d.get('item_code'),
'warehouse': res.get('warehouse'),
'parent': ['in', m_list]}, ['name'])
if md_item:
for p_tm in md_item:
md_doc = frappe.get_doc("Material Demand Item", p_tm.name)
if md_doc:
s = {
"item_code": d.get('item_code'),
"warehouse": md_doc.warehouse,
"qty": 0 - res.get('qty') if doc.is_return else res.get('qty'),
"received_qty": 0 - res.get('qty') if doc.is_return else res.get('qty'),
"rate": d.get('discount_after_rate'),
"expense_account": md_doc.expense_account,
"cost_center": md_doc.cost_center,
"uom": md_doc.uom,
"stock_Uom": md_doc.stock_uom,
"material_demand": md_doc.parent,
"material_demand_item": md_doc.name,
"order_receiving_item": d.name,
"item_tax_template": d.get('item_tax_template'),
"item_tax_rate": d.get('item_tax_rate'),
"margin_type": "Percentage" if d.get("discount_percent") else None,
"discount_percentage": d.get("discount_percent"),
}
key = (md_doc.warehouse)
item_details.setdefault(key, {"details": []})
fifo_queue = item_details[key]["details"]
fifo_queue.append(s)
else:
if not doc.ignore_un_order_item:
frappe.throw(
_("Item <b>{0}</b> not found in Material Demand").format(d.get('item_code')))
else:
md_item = frappe.get_list("Material Demand Item",
{'item_code': d.get('item_code'), 'parent': ['in', m_list]}, ['name'])
received_qty = d.get('qty')
if md_item:
for p_tm in md_item:
if received_qty > 0:
md_doc = frappe.get_doc("Material Demand Item", p_tm.name)
if md_doc:
qty = md_doc.qty if md_doc.qty <= received_qty else received_qty
s = {
"item_code": d.get('item_code'),
"warehouse": md_doc.warehouse,
"qty": 0 - qty if doc.is_return else qty,
"received_qty": 0 - qty if doc.is_return else qty,
"rate": d.get('discount_after_rate'),
"expense_account": md_doc.expense_account,
"cost_center": md_doc.cost_center,
"uom": md_doc.uom,
"stock_Uom": md_doc.stock_uom,
"material_demand": md_doc.parent,
"material_demand_item": md_doc.name,
"order_receiving_item": d.name,
"item_tax_template": d.get('item_tax_template'),
"item_tax_rate": d.get('item_tax_rate'),
"margin_type": "Percentage" if d.get("discount_percent") else None,
"discount_percentage": d.get("discount_percent"),
}
received_qty -= md_doc.qty
key = (md_doc.warehouse)
item_details.setdefault(key, {"details": []})
fifo_queue = item_details[key]["details"]
fifo_queue.append(s)
if received_qty > 0:
s = {
"item_code": d.get('item_code'),
"warehouse": doc.c_b_warehouse,
"qty": 0 - received_qty if doc.is_return else received_qty,
"received_qty": 0 - received_qty if doc.is_return else received_qty,
"rate": d.get('discount_after_rate'),
"uom": d.get('uom'),
"stock_Uom": d.get('stock_uom'),
"item_tax_template": d.get('item_tax_template'),
"item_tax_rate": d.get('item_tax_rate'),
"order_receiving_item": d.name,
"margin_type": "Percentage" if d.get("discount_percent") else None,
"discount_percentage": d.get("discount_percent"),
}
key = (doc.c_b_warehouse)
item_details.setdefault(key, {"details": []})
fifo_queue = item_details[key]["details"]
fifo_queue.append(s)
else:
if not doc.ignore_un_order_item:
frappe.throw(_("Item <b>{0}</b> not found in Material Demand").format(d.get('item_code')))
if item_details:
if item_details:
for key in item_details.keys():
try:
is_franchise = frappe.get_value("Warehouse", {'name': key}, "is_franchise")
cost_center = frappe.get_value("Warehouse", {'name': key}, "cost_center")
pi = frappe.new_doc("Purchase Receipt")
pi.supplier = doc.supplier
pi.posting_date = nowdate()
pi.due_date = nowdate()
pi.company = doc.company
pi.taxes_and_charges = doc.taxes_and_charges
pi.order_receiving = doc.name
pi.purchase_request = doc.purchase_request
pi.update_stock = 1 if not is_franchise else 0
pi.set_warehouse = key
pi.cost_center = cost_center
pi.ignore_pricing_rule = 1
pi.is_return = doc.is_return
if doc.is_return:
pr_rec = frappe.get_value("Purchase Receipt", {"order_receiving": doc.return_ref, "set_warehouse": key}, ["name"])
pi.return_against = pr_rec
for d in item_details[key]['details']:
pi.append("items", d)
if pi.get('items'):
taxes = get_taxes_and_charges('Purchase Taxes and Charges Template',
doc.taxes_and_charges)
for tax in taxes:
pi.append('taxes', tax)
pi.set_missing_values()
for res in pi.items:
if res.order_receiving_item:
if not frappe.get_value("Order Receiving Item", res.order_receiving_item,
'item_tax_template'):
res.item_tax_template = None
res.item_tax_rate = '{}'
pi.insert(ignore_permissions=True)
except Exception as e:
print("---------error: ",e)
continue
frappe.msgprint(_("Purchase Receipt Created"), alert=1)
if doc.is_return:
frappe.db.set(doc, 'status', 'Return')
else:
frappe.db.set(doc, 'status', 'Submitted')
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def get_item_code(doctype, txt, searchfield, start, page_len, filters):
if filters.get('purchase_request'):
result = frappe.db.sql("""select distinct pi.item_code, pi.item_name from `tabPurchase Request Item` as pi
inner join `tabPurchase Request` as p on p.name = pi.parent
where p.name = '{0}'""".format(filters.get('purchase_request')))
return result
else:
return (" ",)
@frappe.whitelist()
def get_item_qty(purchase_request, item, supplier, warehouse=None):
if purchase_request and supplier and item:
if warehouse:
qty = float(frappe.db.sql("""select sum(pack_order_qty) from `tabPurchase Request Item`
where item_code = '{0}'
and parent = '{1}'
and supplier = '{2}'
and warehouse = '{3}'""".format(item, purchase_request, supplier, warehouse))[0][0] or 0)
return qty
else:
qty = float(frappe.db.sql("""select sum(pack_order_qty) from `tabPurchase Request Item`
where item_code = '{0}'
and parent = '{1}'
and supplier = '{2}'""".format(item, purchase_request, supplier))[0][0] or 0)
return qty
else:
return 0
@frappe.whitelist()
def get_warehouse(purchase_request, item):
if purchase_request and item:
result = frappe.db.sql("""select w.name as warehouse,
IFNULL(sum(p.pack_order_qty), 0) as order_qty,
IFNULL(sum(p.pack_order_qty), 0) as qty
from `tabWarehouse` as w
left join `tabPurchase Request Item` as p on w.name = p.warehouse and p.parent ='{0}' and p.item_code ='{1}'
where w.is_group = 0 and w.auto_select_in_purchase_request = 1
group by w.name""".format(purchase_request, item), as_dict=True)
return result
return []
@frappe.whitelist()
def get_item_tax_template(item, args, out=None):
"""
args = {
"tax_category": None
"item_tax_template": None
}
"""
item = frappe.get_doc("Item", item)
item_tax_template = None
args = json.loads(args)
if item.taxes:
item_tax_template = _get_item_tax_template(args, item.taxes, out)
return item_tax_template
if not item_tax_template:
item_group = item.item_group
while item_group and not item_tax_template:
item_group_doc = frappe.get_cached_doc("Item Group", item_group)
item_tax_template = _get_item_tax_template(args, item_group_doc.taxes, out)
return item_tax_template
@frappe.whitelist()
def get_total_receive_qty(doc_name,item):
if doc_name and item:
qty = float(frappe.db.sql("""select ifnull(sum(qty),0) from `tabOrder Receiving Item`
where parent = '{0}' and item_code ='{1}'""".format(doc_name, item))[0][0] or 0)
rate = float(frappe.db.sql("""select ifnull(sum(rate),0) from `tabOrder Receiving Item`
where parent = '{0}' and item_code ='{1}'""".format(doc_name, item))[0][0] or 0)
return {'qty': qty,'rate': rate}
return {'qty': 0,'rate': 0}
@frappe.whitelist()
def make_return_entry(doc_name, items):
if doc_name and items:
self = frappe.get_doc("Order Receiving", doc_name)
doc = frappe.new_doc("Order Receiving")
doc.posting_date = nowdate()
doc.company = self.company
doc.c_b_warehouse = self.c_b_warehouse
doc.purchase_request = self.purchase_request
doc.supplier = self.supplier
doc.ignore_un_order_item = self.ignore_un_order_item
doc.warehouse = self.warehouse
doc.return_ref = self.name
doc.is_return = 1
# doc.status = 'Return'
data = json.loads(items)
if len(data) > 0:
for d in data:
item_doc = frappe.get_doc("Item", d.get('item_code'))
doc.append("items", {
"item_code": item_doc.name,
"item_name": item_doc.item_name,
"received_qty": d.get('rec_qty'),
"qty": d.get('return_qty'),
"rate": d.get('rate'),
"uom": 'Pack',
"stock_uom": item_doc.stock_uom,
"discount_percent": 0,
"discount": 0
})
doc.set_missing_value()
return doc.as_dict()
def calculate_line_level_tax(doc, method):
for res in doc.items:
if res.item_tax_template:
item_tax_template = frappe.get_doc('Item Tax Template', res.item_tax_template)
for tax in item_tax_template.taxes:
if 'Sales Tax' in tax.tax_type:
res.sales_tax = res.amount * (tax.tax_rate / 100)
if 'Further Tax' in tax.tax_type:
res.further_tax = res.amount * (tax.tax_rate / 100)
if 'Advance Tax' in tax.tax_type:
res.advance_tax = res.amount * (tax.tax_rate / 100)
res.total_include_taxes = flt(res.sales_tax + res.further_tax + res.advance_tax) + res.amount
@frappe.whitelist()
def get_items_details(item_code, doc, item):
if item_code:
doc = json.loads(doc)
item = json.loads(item)
company = doc.get('company')
buying_price_list = doc.get('buying_price_list')
currency = doc.get('currency')
base_selling_price_list = doc.get('base_selling_price_list')
conversion_factor = get_conversion_factor(item_code, item.get('uom')).get('conversion_factor') or 1
args = {
'item_code': item.get('item_code'),
'supplier': doc.get('supplier'),
'currency': company,
'price_list':buying_price_list,
'price_list_currency': currency,
'company': company,
'transaction_date': doc.get('posting_date'),
'doctype': doc.get('doctype'),
'name': doc.get('name'),
'qty': item.get('qty') or 1,
'child_docname': item.get('name'),
'uom': item.get('uom'),
'stock_uom': item.get('stock_uom'),
'conversion_factor': conversion_factor
}
buying_rate = get_price_list_rate_for(item_code, json.dumps(args)) or 0
args['price_list'] = base_selling_price_list
selling_rate = get_price_list_rate_for(item_code, json.dumps(args)) or 0
item_tax_template = get_item_tax_template(item_code, json.dumps(args))
qty = get_item_qty(doc.get('purchase_request'), item_code, doc.get('supplier'), doc.get('warehouse'))
rule = apply_price_rule(doc, item, conversion_factor)
discount_percentage = discount_amount = 0
if rule:
if rule[0].margin_type == 'Percentage' and rule[0].discount_percentage > 0:
discount_percentage = rule[0].discount_percentage
if rule[0].margin_type == 'Amount' and rule[0].discount_amount > 0:
discount_amount = rule[0].discount_amount
return {'buying_price_rate': buying_rate,
'selling_price_rate': selling_rate,
'conversion_factor': conversion_factor,
'item_tax_template': item_tax_template,
'qty': qty,
'discount_percentage': discount_percentage or 0,
'discount_amount': discount_amount or 0
}
def apply_price_rule(doc, item, conversion_factor):
args = {
"items": [
{
"parenttype": item.get('parenttype'),
"parent": item.get('parent'),
'item_code': item.get('item_code'),
'doctype': item.get('doctype'),
'name': item.get('name'),
'qty': item.get('qty') or 1,
'child_docname': item.get('name'),
'uom': item.get('uom'),
'stock_uom': item.get('stock_uom'),
'conversion_factor': conversion_factor
}
],
"supplier": doc.get('supplier'),
"currency": doc.get('currency'),
"conversion_rate": doc.get('conversion_rate'),
"price_list": doc.get('buying_price_list'),
"price_list_currency": doc.get('currency'),
"plc_conversion_rate": 0,
"company": doc.get('company'),
"transaction_date": doc.get('posting_date'),
"doctype": doc.get('doctype'),
"name": doc.get('name'),
"is_return": 0,
"update_stock": 0,
"pos_profile": ""
}
price_rule = apply_pricing_rule(args = json.dumps(args), doc=doc)
return price_rule
| 52.203514
| 163
| 0.507923
|
5770241404ebe4d30d320d94aae96cf437e5eb0c
| 141
|
py
|
Python
|
output/models/sun_data/elem_decl/target_ns/target_ns00301m/target_ns00301m2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/sun_data/elem_decl/target_ns/target_ns00301m/target_ns00301m2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/sun_data/elem_decl/target_ns/target_ns00301m/target_ns00301m2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.sun_data.elem_decl.target_ns.target_ns00301m.target_ns00301m2_xsd.target_ns00301m2 import Root
__all__ = [
"Root",
]
| 23.5
| 113
| 0.808511
|
cfc342aabf9aca5bbafb51076862817bdc39ff98
| 4,161
|
py
|
Python
|
prediction/src/preprocess/crop_patches.py
|
yasiriqbal1/concept-to-clinic-1
|
3b7d34a6b31e8d3924934f3e5c990c49813c670e
|
[
"MIT"
] | 346
|
2017-08-04T12:26:11.000Z
|
2018-10-16T06:51:45.000Z
|
prediction/src/preprocess/crop_patches.py
|
yasiriqbal1/concept-to-clinic-1
|
3b7d34a6b31e8d3924934f3e5c990c49813c670e
|
[
"MIT"
] | 296
|
2017-08-02T10:17:05.000Z
|
2018-07-31T05:29:43.000Z
|
prediction/src/preprocess/crop_patches.py
|
yasiriqbal1/concept-to-clinic-1
|
3b7d34a6b31e8d3924934f3e5c990c49813c670e
|
[
"MIT"
] | 159
|
2017-08-04T07:34:52.000Z
|
2018-10-16T18:34:08.000Z
|
import itertools
import numpy as np
import scipy.ndimage
from src.preprocess.preprocess_ct import mm_coordinates_to_voxel
def crop_patch(ct_array, patch_shape=None, centroids=None, stride=None, pad_value=0):
"""
Generator yield a patch of a desired shape for each centroid from a given a
CT scan.
Args:
ct_array (np.ndarray): a numpy ndarray representation of a CT scan
patch_shape (int, list[int]): a desired shape of a patch. If int will be provided,
then patch will be a cube-shaped.
centroids (list[dict]): A list of centroids of the form::
{'x': int,
'y': int,
'z': int}
stride (int): stride for patch coordinates meshgrid.
If None is set (default), then no meshgrid will be returned.
pad_value (int): value with which an array padding will be performed.
Yields:
np.ndarray: cropped patch from a CT scan.
np.ndarray | None: meshgrid of a patch.
"""
if centroids is None:
centroids = []
if patch_shape is None:
patch_shape = []
patch_shape = scipy.ndimage._ni_support._normalize_sequence(patch_shape, len(ct_array.shape))
patch_shape = np.array(patch_shape)
# array with padding size for each dimension
padding_size = np.ceil(patch_shape / 2.).astype(np.int)
# array with left and right padding for each dimension
padding_array = np.stack([padding_size, padding_size], axis=1)
# adding paddings at both ends of all dimensions
ct_array = np.pad(ct_array, padding_array, mode='constant', constant_values=pad_value)
for centroid in centroids:
# cropping a patch with selected centroid in the center of it
patch = ct_array[centroid[0]: centroid[0] + patch_shape[0],
centroid[1]: centroid[1] + patch_shape[1],
centroid[2]: centroid[2] + patch_shape[2]]
if stride:
normstart = np.array(centroid) / np.array(ct_array.shape) - 0.5
normsize = np.array(patch_shape) / np.array(ct_array.shape)
xx, yy, zz = np.meshgrid(np.linspace(normstart[0], normstart[0] + normsize[0], patch_shape[0] // stride),
np.linspace(normstart[1], normstart[1] + normsize[1], patch_shape[1] // stride),
np.linspace(normstart[2], normstart[2] + normsize[2], patch_shape[2] // stride),
indexing='ij')
coord = np.concatenate([xx[np.newaxis, ...], yy[np.newaxis, ...], zz[np.newaxis, :]], 0).astype('float32')
yield patch, coord
else:
yield patch
def patches_from_ct(ct_array, meta, patch_shape=None, centroids=None, stride=None, pad_value=0):
"""
Given a CT scan, and a list of centroids return the list of patches of the
desired patch shape.
This is just a wrapper over crop_patch generator.
Args:
ct_array (np.ndarray): a numpy ndarray representation of a CT scan
patch_shape (int | list[int]): the size of
If int will be provided, then patch will be a cube.
centroids (list[dict]): A list of centroids of the form::
{'x': int,
'y': int,
'z': int}
meta (src.preprocess.load_ct.MetaData): meta information of the CT scan.
stride (int): stride for patches' coordinates meshgrids.
If None is set (default), then no meshgrid will be returned.
pad_value (int): value with which an array padding will be performed.
Yields:
np.ndarray: a cropped patch from the CT scan.
"""
if patch_shape is None:
patch_shape = []
if centroids is None:
centroids = []
centroids = [[centroid[axis] for axis in 'zyx'] for centroid in centroids]
# scale the coordinates according to spacing
centroids = [mm_coordinates_to_voxel(centroid, meta) for centroid in centroids]
patch_generator = crop_patch(ct_array, patch_shape, centroids, stride, pad_value)
patches = itertools.islice(patch_generator, len(centroids))
return list(patches)
| 40.009615
| 118
| 0.631339
|
8876a6878dfcebd640c8834b68fe870ccb69c794
| 25,399
|
py
|
Python
|
nipype/sphinxext/plot_workflow.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | 1
|
2015-01-19T13:12:27.000Z
|
2015-01-19T13:12:27.000Z
|
nipype/sphinxext/plot_workflow.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | null | null | null |
nipype/sphinxext/plot_workflow.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
:mod:`nipype.sphinxext.plot_workflow` -- Workflow plotting extension
====================================================================
A directive for including a nipype workflow graph in a Sphinx document.
This code is forked from the plot_figure sphinx extension of matplotlib.
By default, in HTML output, `workflow` will include a .png file with a
link to a high-res .png. In LaTeX output, it will include a
.pdf.
The source code for the workflow may be included as **inline content** to
the directive `workflow`::
.. workflow ::
:graph2use: flat
:simple_form: no
from nipype.workflows.dmri.camino.connectivity_mapping import create_connectivity_pipeline
wf = create_connectivity_pipeline()
For example, the following graph has been generated inserting the previous
code block in this documentation:
.. workflow ::
:graph2use: flat
:simple_form: no
from nipype.workflows.dmri.camino.connectivity_mapping import create_connectivity_pipeline
wf = create_connectivity_pipeline()
Options
-------
The ``workflow`` directive supports the following options:
graph2use : {'hierarchical', 'colored', 'flat', 'orig', 'exec'}
Specify the type of graph to be generated.
simple_form: bool
Whether the graph will be in detailed or simple form.
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `workflow_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since workflow will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The workflow directive has the following configuration options:
graph2use
Select a graph type to use
simple_form
determines if the node name shown in the visualization is either of the form nodename
(package) when set to True or nodename.Class.package when set to False.
wf_include_source
Default value for the include-source option
wf_html_show_source_link
Whether to show a link to the source in HTML.
wf_pre_code
Code that should be executed before each workflow.
wf_basedir
Base directory, to which ``workflow::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
wf_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen. When passing from
the command line through sphinx_build the list should be passed as
suffix:dpi,suffix:dpi, ....
wf_html_show_formats
Whether to show links to the files in HTML.
wf_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each workflow.
wf_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a workflow directive. This configuration option overrides this behavior
and applies rcParams before each workflow.
wf_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
wf_template
Provide a customized template for preparing restructured text.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import sys, os, shutil, io, re, textwrap
from os.path import relpath
from errno import EEXIST
import traceback
missing_imports = []
try:
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
except ImportError as e:
missing_imports = [str(e)]
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError as e:
missing_imports.append(str(e))
try:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
missing_imports.pop()
except ImportError as e:
missing_imports.append(str(e))
from builtins import str, bytes
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
def _mkdirp(folder):
"""
Equivalent to bash's mkdir -p
"""
if sys.version_info > (3, 4, 1):
os.makedirs(folder, exist_ok=True)
return folder
try:
os.makedirs(folder)
except OSError as exc:
if exc.errno != EEXIST or not os.path.isdir(folder):
raise
return folder
def wf_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if len(missing_imports) == 0:
return run(arguments, content, options, state_machine, state, lineno)
else:
raise ImportError('\n'.join(missing_imports))
wf_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_graph2use(arg):
return directives.choice(arg, ('hierarchical', 'colored', 'flat', 'orig', 'exec'))
def _option_context(arg):
if arg in [None, 'reset', 'close-figs']:
return arg
raise ValueError("argument should be None or 'reset' or 'close-figs'")
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_wf_labels(app, document):
"""
To make graphs referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in list(document.nametypes.items()):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding,
'graph2use': _option_graph2use,
'simple_form': _option_boolean
}
app.add_directive('workflow', wf_directive, True, (0, 2, False), **options)
app.add_config_value('graph2use', 'hierarchical', 'html')
app.add_config_value('simple_form', True, 'html')
app.add_config_value('wf_pre_code', None, True)
app.add_config_value('wf_include_source', False, True)
app.add_config_value('wf_html_show_source_link', True, True)
app.add_config_value('wf_formats', ['png', 'svg', 'pdf'], True)
app.add_config_value('wf_basedir', None, True)
app.add_config_value('wf_html_show_formats', True, True)
app.add_config_value('wf_rcparams', {}, True)
app.add_config_value('wf_apply_rcparams', False, True)
app.add_config_value('wf_working_directory', None, True)
app.add_config_value('wf_template', None, True)
app.connect('doctree-read'.encode() if PY2 else 'doctree-read', mark_wf_labels)
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def remove_coding(text):
"""
Remove the coding comment, which exec doesn't like.
"""
sub_re = re.compile("^#\s*-\*-\s*coding:\s*.*-\*-$", flags=re.MULTILINE)
return sub_re.sub("", text)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.{{ default_fmt }}
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. figure:: {{ build_dir }}/{{ img.basename }}.pdf
{% for option in options -%}
{{ option }}
{% endfor %}
{{ caption }}
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
wf_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, fmt):
return os.path.join(self.dirname, "%s.%s" % (self.basename, fmt))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class GraphError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
pwd = str(os.getcwd())
old_sys_path = list(sys.path)
if setup.config.wf_working_directory is not None:
try:
os.chdir(setup.config.wf_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`wf_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`wf_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.wf_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if PY3:
sys.stdout = io.StringIO()
else:
from cStringIO import StringIO
sys.stdout = StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.wf_pre_code is not None:
exec(str(setup.config.wf_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
exec("__name__ = '__main__'", ns)
code = remove_coding(code)
exec(code, ns)
if function_name is not None:
exec(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise GraphError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def get_wf_formats(config):
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
wf_formats = config.wf_formats
if isinstance(wf_formats, (str, bytes)):
# String Sphinx < 1.3, Split on , to mimic
# Sphinx 1.3 and later. Sphinx 1.3 always
# returns a list.
wf_formats = wf_formats.split(',')
for fmt in wf_formats:
if isinstance(fmt, (str, bytes)):
if ':' in fmt:
suffix, dpi = fmt.split(':')
formats.append((str(suffix), int(dpi)))
else:
formats.append((fmt, default_dpi.get(fmt, 80)))
elif isinstance(fmt, (tuple, list)) and len(fmt) == 2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise GraphError('invalid image format "%r" in wf_formats' % fmt)
return formats
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, graph2use, simple_form,
context_reset=False, close_figs=False):
"""
Run a nipype workflow creation script and save the graph in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
formats = get_wf_formats(config)
ns = wf_context if context else {}
if context_reset:
wf_context.clear()
run_code(code, code_path, ns, function_name)
img = ImageFile(output_base, output_dir)
for fmt, dpi in formats:
try:
img_path = img.filename(fmt)
imgname, ext = os.path.splitext(os.path.basename(img_path))
ns['wf'].base_dir = output_dir
src = ns['wf'].write_graph(imgname, format=ext[1:],
graph2use=graph2use,
simple_form=simple_form)
shutil.move(src, img_path)
except Exception as err:
raise GraphError(traceback.format_exc())
img.formats.append(fmt)
return [(code, [img])]
def run(arguments, content, options, state_machine, state, lineno):
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
formats = get_wf_formats(config)
default_fmt = formats[0][0]
graph2use = options.get('graph2use', 'hierarchical')
simple_form = options.get('simple_form', True)
options.setdefault('include-source', config.wf_include_source)
keep_context = 'context' in options
context_opt = None if not keep_context else options['context']
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.wf_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.wf_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join([str(c) for c in content]))
counter = document.attributes.get('_wf_counter', 0) + 1
document.attributes['_wf_counter'] = counter
base, _ = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'wf_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
try:
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
except ValueError:
# on Windows, relpath raises ValueError when path and start are on
# different mounts/drives
build_dir_link = build_dir
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code,
source_file_name,
build_dir,
output_base,
keep_context,
function_name,
config,
graph2use,
simple_form,
context_reset=context_opt == 'reset',
close_figs=context_opt == 'close-figs')
errors = []
except GraphError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in list(options.items())
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.wf_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.wf_template or TEMPLATE,
default_fmt=default_fmt,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.wf_html_show_formats and len(images),
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
_mkdirp(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
| 34.698087
| 96
| 0.588645
|
c4ace5f516b33e937098b071133685702db7752f
| 6,705
|
py
|
Python
|
pyrsa/inference/boot_testset.py
|
kshitijd20/pyrsa
|
3090d88362b26e6b2ee807e62d483a0158530e2a
|
[
"MIT"
] | 4
|
2015-08-10T18:34:21.000Z
|
2018-05-15T20:43:15.000Z
|
pyrsa/inference/boot_testset.py
|
kshitijd20/pyrsa
|
3090d88362b26e6b2ee807e62d483a0158530e2a
|
[
"MIT"
] | null | null | null |
pyrsa/inference/boot_testset.py
|
kshitijd20/pyrsa
|
3090d88362b26e6b2ee807e62d483a0158530e2a
|
[
"MIT"
] | 2
|
2018-03-26T03:02:07.000Z
|
2021-11-10T21:09:48.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
boostrap-testset evaluation methods
variants of taking a bootstrap sample and taking the unsampled patterns as
a testset
"""
import numpy as np
from pyrsa.util.inference_util import input_check_model
from .bootstrap import bootstrap_sample
from .bootstrap import bootstrap_sample_rdm
from .bootstrap import bootstrap_sample_pattern
from .evaluate import crossval
def bootstrap_testset(model, data, method='cosine', fitter=None, N=1000,
pattern_descriptor=None, rdm_descriptor=None):
"""takes a bootstrap sample and evaluates on the rdms and patterns not
sampled
also returns the size of each test_set to allow later weighting
or selection if this is desired.
Args:
model(pyrsa.model.Model): Model to be evaluated
data(pyrsa.rdm.RDMs): RDM data to use
method(string): comparison method to use
fitter(function): fitting function
pattern_descriptor(string): descriptor to group patterns
rdm_descriptor(string): descriptor to group rdms
Returns:
numpy.ndarray: vector of evaluations of length N
numpy.ndarray: n_rdm for each test_set
numpy.ndarray: n_pattern for each test_set
"""
evaluations, _, fitter = input_check_model(model, None, fitter, N)
n_rdm = np.zeros(N, dtype=np.int)
n_pattern = np.zeros(N, dtype=np.int)
if pattern_descriptor is None:
data.pattern_descriptors['index'] = np.arange(data.n_cond)
pattern_descriptor = 'index'
if rdm_descriptor is None:
data.rdm_descriptors['index'] = np.arange(data.n_rdm)
rdm_descriptor = 'index'
for i_sample in range(N):
sample, rdm_idx, pattern_idx = bootstrap_sample(
data,
rdm_descriptor=rdm_descriptor,
pattern_descriptor=pattern_descriptor)
train_set = [[sample, pattern_idx]]
rdm_idx_test = data.rdm_descriptors[rdm_descriptor]
rdm_idx_test = np.setdiff1d(rdm_idx_test, rdm_idx)
pattern_idx_test = data.pattern_descriptors[pattern_descriptor]
pattern_idx_test = np.setdiff1d(pattern_idx_test, pattern_idx)
if len(pattern_idx_test) >= 3 and len(rdm_idx_test) >= 1:
rdms_test = data.subsample_pattern(pattern_descriptor,
pattern_idx_test)
rdms_test = rdms_test.subsample(rdm_descriptor, rdm_idx_test)
test_set = [[rdms_test, pattern_idx_test]]
evaluations[i_sample] = crossval(
model, data, train_set, test_set,
method=method, fitter=fitter,
pattern_descriptor=pattern_descriptor).evaluations[:, 0]
else:
evaluations[i_sample] = np.nan
n_rdm[i_sample] = len(rdm_idx_test)
n_pattern[i_sample] = len(pattern_idx_test)
return evaluations, n_rdm, n_pattern
def bootstrap_testset_pattern(model, data, method='cosine', fitter=None,
N=1000, pattern_descriptor=None):
"""takes a bootstrap sample and evaluates on the patterns not
sampled
also returns the size of each test_set to allow later weighting
or selection if this is desired.
Args:
model(pyrsa.model.Model): Model to be evaluated
datat(pyrsa.rdm.RDMs): RDM data to use
method(string): comparison method to use
fitter(function): fitting function for the model
pattern_descriptor(string): descriptor to group patterns
Returns:
numpy.ndarray: vector of evaluations of length
numpy.ndarray: n_pattern for each test_set
"""
evaluations, _, fitter = input_check_model(model, None, fitter, N)
n_pattern = np.zeros(N, dtype=np.int)
if pattern_descriptor is None:
data.pattern_descriptors['index'] = np.arange(data.n_cond)
pattern_descriptor = 'index'
for i_sample in range(N):
sample, pattern_idx = bootstrap_sample_pattern(
data, pattern_descriptor=pattern_descriptor)
train_set = [[sample, pattern_idx]]
pattern_idx_test = data.pattern_descriptors[pattern_descriptor]
pattern_idx_test = np.setdiff1d(pattern_idx_test, pattern_idx)
if len(pattern_idx_test) >= 3:
rdms_test = data.subsample_pattern(pattern_descriptor,
pattern_idx_test)
test_set = [[rdms_test, pattern_idx_test]]
evaluations[i_sample] = crossval(
model, data, train_set, test_set,
method=method, fitter=fitter,
pattern_descriptor=pattern_descriptor).evaluations[:, 0]
else:
evaluations[i_sample] = np.nan
n_pattern[i_sample] = len(pattern_idx_test)
return evaluations, n_pattern
def bootstrap_testset_rdm(model, data, method='cosine', fitter=None, N=1000,
rdm_descriptor=None):
"""takes a bootstrap sample and evaluates on the patterns not
sampled
also returns the size of each test_set to allow later weighting
or selection if this is desired.
Args:
model(pyrsa.model.Model): Model to be evaluated
datat(pyrsa.rdm.RDMs): RDM data to use
method(string): comparison method to use
fitter(function): fitting function for the model
pattern_descriptor(string): descriptor to group patterns
Returns:
numpy.ndarray: vector of evaluations of length
numpy.ndarray: n_pattern for each test_set
"""
evaluations, _, fitter = input_check_model(model, None, fitter, N)
n_rdm = np.zeros(N, dtype=np.int)
if rdm_descriptor is None:
data.rdm_descriptors['index'] = np.arange(data.n_rdm)
rdm_descriptor = 'index'
data.pattern_descriptors['index'] = np.arange(data.n_cond)
pattern_descriptor = 'index'
for i_sample in range(N):
sample, rdm_idx = bootstrap_sample_rdm(
data, rdm_descriptor=rdm_descriptor)
pattern_idx = np.arange(data.n_cond)
train_set = [[sample, pattern_idx]]
rdm_idx_test = data.rdm_descriptors[rdm_descriptor]
rdm_idx_test = np.setdiff1d(rdm_idx_test, rdm_idx)
if len(rdm_idx_test) >= 1:
rdms_test = data.subsample(rdm_descriptor, rdm_idx_test)
test_set = [[rdms_test, pattern_idx]]
evaluations[i_sample] = crossval(
model, data, train_set, test_set,
method=method, fitter=fitter,
pattern_descriptor=pattern_descriptor).evaluations[:, 0]
else:
evaluations[i_sample] = np.nan
n_rdm[i_sample] = len(rdm_idx_test)
return evaluations, n_rdm
| 41.645963
| 76
| 0.664579
|
fc035ae32c7c0c7522070444094992e73998f44b
| 12,538
|
py
|
Python
|
coremltools/converters/tensorflow/_tf_converter.py
|
Maxkvy33/coremltools
|
1afb9cb567a8a608b73ed53315319e97d50b85d0
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/tensorflow/_tf_converter.py
|
Maxkvy33/coremltools
|
1afb9cb567a8a608b73ed53315319e97d50b85d0
|
[
"BSD-3-Clause"
] | null | null | null |
coremltools/converters/tensorflow/_tf_converter.py
|
Maxkvy33/coremltools
|
1afb9cb567a8a608b73ed53315319e97d50b85d0
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2019, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import os.path
from ...models import MLModel
def convert(filename,
inputs=None,
outputs=None,
image_input_names=None,
tf_image_format=None,
is_bgr=False,
red_bias=0.0,
green_bias=0.0,
blue_bias=0.0,
gray_bias=0.0,
image_scale=1.0,
class_labels=None,
predicted_feature_name=None,
predicted_probabilities_output='',
add_custom_layers=False, # type: bool
custom_conversion_functions=None, # type: dict{text, any}
custom_shape_functions=None, # type: dict{text, any}
**kwargs):
"""
Convert TensorFlow model to Core ML format.
Parameters
----------
filename: str
Path to the TensorFlow model. Takes in one of the following formats:
- TensorFlow frozen graph (.pb) model file name
- TensorFlow tf.keras HDF5 (.h5) model file name
- TensorFlow SavedModel directory path
- TensorFlow concrete functions(s)
inputs: dict(str: list or tuple)
Model input name and shape pairs.
outputs: [str]
Model output names.
image_input_names: [str] | str
Input names (a subset of the keys of inputs)
that can be treated as images by Core ML. All other inputs
are treated as MultiArrays.
tf_image_format: str
Optional and valid if image_input_names is also set. Specify either 'NCHW' or 'NHWC' to set or
override the image format. If not set, tries to use hints from the graph which may be present in convolution or
other image-specific layers. Ultimately defaults to NHWC.
is_bgr: bool | dict():
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys and booleans as values.
red_bias: float | dict()
Bias value to be added to the red channel of the input image, after applying scale.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
blue_bias: float | dict()
Bias value to be added to the blue channel of the input image, after applying scale.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
green_bias: float | dict()
Bias value to be added to the green channel of the input image, after applying scale.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
gray_bias: float | dict()
Bias value to be added to the input image (in grayscale), after applying scale.
Defaults to 0.0
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
image_scale: float | dict()
Value by which input images will be scaled before bias is added and
Core ML model makes a prediction. Defaults to 1.0.
Applicable only if image_input_names is specified.
To specify different values for each image input provide a dictionary with input names as keys.
class_labels: list[int or str] | str
Class labels (applies to classifiers only) that map the index of the
output of a neural network to labels in a classifier.
If the provided class_labels is a string, it is assumed to be a
file path where classes are parsed as a list of newline separated
strings.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
predicted_probabilities_output: str
Name of the neural network output to be interpreted as the predicted
probabilities of the resulting classes. Typically the output of a
softmax function.
add_custom_layers: bool
Flag to turn on addition of custom CoreML layers for unsupported TF ops or attributes within
a supported op.
custom_conversion_functions: dict(): {Text: func(**kwargs)}
Argument to provide user-defined functions for converting Tensorflow operations (op, for short).
A dictionary with keys corresponding to the names or types of the TF ops and values as handle to user-defined functions.
The keys can be either the type of the op or the name of the op. If former, then the function is called whenever the op
of that type is encountered during conversion. By using op names, specific ops can be targeted which is
useful for handling unsupported configuration in an op.
The function receives multiple arguments: TF operation, the CoreML Neural network builder object,
dictionary containing the op's inputs that are constants and their values (as numpy arrays).
The function can add custom layers or any other combination of CoreML layers to translate the TF op.
See "examples/custom_layer_examples.ipynb" jupyter-notebook for examples on using this argument.
custom_shape_functions: dict(): {Text: func()}
Argument to provide user-defined functions to compute shape for given op.
A dictionary with keys corresponding to the type of TF Op and value as handled to user-defined function.
Function receives `layer specification` and `input shape` as a input.
output of the function must be output shape for give op. (generally List).
Custom shape function is required for adding custom layer in Core ML 3.
Returns
-------
model: MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. code-block:: python
import coremltools
from tensorflow.keras.applications import ResNet50
model = coremltools.converters.tensorflow.convert(
'./model.h5',
inputs={'input_1': (1, 224, 224, 3)},
outputs=['Identity']
)
For more examples, see: https://github.com/apple/coremltools/blob/master/docs/NeuralNetworkGuide.md
"""
use_cpu_only = kwargs.get('use_cpu_only')
use_cpu_only = use_cpu_only if use_cpu_only is not None else False
optional_inputs = kwargs.get('optional_inputs')
optional_inputs = optional_inputs if optional_inputs is not None else []
# `tf_model_path` takes in one of the following formats:
# 1) TensorFlow frozen graph (.pb) model file name
# 2) TensorFlow tf.keras HDF5 (.h5) model file name
# 3) TensorFlow SavedModel directory path
# 4) TensorFlow concrete functions(s)
invalid_filename_message = ('invalid input tf_model_path: {}!\n'
'Supported tf_model_path input format includes:\n'
'- Path to TensorFlow frozen graph (.pb) file\n'
'- Path to TensorFlow tf.keras model (.h5) file\n'
'- Path to TensorFlow SavedModel directory\n'
'- List of TensorFlow concrete functions'.format(filename))
if isinstance(filename, str) and not os.path.exists(filename):
raise ValueError('invalid input tf_model_path \'{}\' does not exist.'.format(filename))
if isinstance(filename, str) and os.path.isfile(filename):
# path to the model file must end with either .pb or .h5 format
if not (filename.endswith('.pb') or filename.endswith('.h5')):
raise ValueError(invalid_filename_message)
if filename.endswith('.h5'):
filename = _graph_def_from_saved_model_or_keras_model(filename)
elif isinstance(filename, str) and os.path.isdir(filename):
filename = _graph_def_from_saved_model_or_keras_model(filename)
elif isinstance(filename, list):
filename = _graph_def_from_concrete_function(filename)
else:
raise ValueError(invalid_filename_message)
# convert from TensorFlow to SSA IR
from ..nnssa.frontend.tensorflow import load as frontend_load
ssa = frontend_load(filename, resume_on_errors=False, inputs=inputs, outputs=outputs, **kwargs)
# convert from SSA IR to Core ML
from ..nnssa.coreml.ssa_converter import ssa_convert
model_spec = ssa_convert(ssa,
top_func='main',
inputs=inputs,
outputs=outputs,
image_input_names=image_input_names,
image_format=tf_image_format,
is_bgr=is_bgr,
red_bias=red_bias,
green_bias=green_bias,
blue_bias=blue_bias,
gray_bias=gray_bias,
image_scale=image_scale,
class_labels=class_labels,
predicted_feature_name=predicted_feature_name,
predicted_probabilities_output=predicted_probabilities_output,
add_custom_layers=add_custom_layers,
custom_conversion_functions=custom_conversion_functions,
custom_shape_functions=custom_shape_functions,
optional_inputs=optional_inputs)
return MLModel(model_spec, useCPUOnly=use_cpu_only)
def _graph_def_from_saved_model_or_keras_model(filename):
"""
Utility function that returns GraphDef object from the given SavedModel or HDF5 model.
:param filename: TensorFlow SavedModel directory or Keras HDF5 model (.h5) file.
:return: TensorFlow GraphDef object.
"""
try:
import tensorflow as tf
from tensorflow.python.keras.saving import saving_utils as _saving_utils
from tensorflow.python.framework import convert_to_constants as _convert_to_constants
if filename.endswith('.h5'):
model = tf.keras.models.load_model(filename)
tf.keras.backend.set_learning_phase(False)
func = _saving_utils.trace_model_call(model)
concrete_func = func.get_concrete_function()
else:
model = tf.saved_model.load(filename)
signatures = model.signatures
if len(signatures) == 0:
raise ValueError('Unable to load a model with no signatures provided.')
if len(signatures) >= 2:
raise ValueError('Unable to load a model with multiple signatures')
concrete_func = list(signatures.values())[0]
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(concrete_func)
graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
except ImportError as e:
raise ImportError('Failed to import TensorFlow utilities. {}.'.format(e))
except ValueError as e:
raise ValueError('Failed to load SavedModel or .h5 model. {}.'.format(e))
except Exception as e:
raise RuntimeError('Failed to load SavedModel or .h5 model. {}.'.format(e))
return graph_def
def _graph_def_from_concrete_function(concrete_functions):
"""
Utility function that returns GraphDef object from the given concrete functions.
:param concrete_functions: list of TensorFlow concrete functions.
:return: TensorFlow GraphDef object.
"""
if len(concrete_functions) != 1:
raise ValueError('This converter can only convert a single ConcreteFunction.')
try:
import tensorflow as tf
from tensorflow.python.framework import convert_to_constants as _convert_to_constants
from tensorflow.python.eager import function as _function
frozen_func = _convert_to_constants.convert_variables_to_constants_v2(concrete_functions[0])
graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
except ImportError as e:
raise ImportError('Failed to import TensorFlow utilities. {}.'.format(e))
except Exception as e:
raise RuntimeError('Failed to load concrete functions(s). {}.'.format(e))
return graph_def
| 48.785992
| 126
| 0.67435
|
fe2b5d5819e58f67913b8f20e949f19670416bee
| 6,563
|
py
|
Python
|
tests/python/pants_test/backend/python/tasks2/test_resolve_requirements.py
|
mateor/pants
|
e01cee8959da269c0b526138760847901e4d4a48
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/backend/python/tasks2/test_resolve_requirements.py
|
mateor/pants
|
e01cee8959da269c0b526138760847901e4d4a48
|
[
"Apache-2.0"
] | null | null | null |
tests/python/pants_test/backend/python/tasks2/test_resolve_requirements.py
|
mateor/pants
|
e01cee8959da269c0b526138760847901e4d4a48
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import subprocess
from pex.interpreter import PythonInterpreter
from pants.backend.python.interpreter_cache import PythonInterpreterCache
from pants.backend.python.python_requirement import PythonRequirement
from pants.backend.python.python_setup import PythonRepos, PythonSetup
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.tasks2.resolve_requirements import ResolveRequirements
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import temporary_file
from pants_test.tasks.task_test_base import TaskTestBase
class ResolveRequirementsTest(TaskTestBase):
@classmethod
def task_type(cls):
return ResolveRequirements
def test_resolve_simple_requirements(self):
noreqs_tgt = self._fake_target('noreqs', [])
ansicolors_tgt = self._fake_target('ansicolors', ['ansicolors==1.0.2'])
# Check that the module is unavailable unless specified as a requirement (proves that
# the requirement isn't sneaking in some other way, which would render the remainder
# of this test moot.)
_, stderr_data = self._exercise_module(self._resolve_requirements([noreqs_tgt]), 'colors')
self.assertIn('ImportError: No module named colors', stderr_data)
# Check that the module is available if specified as a requirement.
stdout_data, stderr_data = self._exercise_module(self._resolve_requirements([ansicolors_tgt]),
'colors')
self.assertEquals('', stderr_data.strip())
path = stdout_data.strip()
# Check that the requirement resolved to what we expect.
self.assertTrue(path.endswith('/.deps/ansicolors-1.0.2-py2-none-any.whl/colors.py'))
# Check that the path is under the test's build root, so we know the pex was created there.
self.assertTrue(path.startswith(os.path.realpath(get_buildroot())))
def test_resolve_multiplatform_requirements(self):
cffi_tgt = self._fake_target('cffi', ['cffi==1.9.1'])
pex = self._resolve_requirements([cffi_tgt], {
'python-setup': {
# We have 'current' so we can import the module in order to get the path to it.
# The other platforms (one of which may happen to be the same as current) are what we
# actually test the presence of.
'platforms': ['current', 'macosx-10.10-x86_64', 'manylinux1_i686', 'win_amd64']
}
})
stdout_data, stderr_data = self._exercise_module(pex, 'cffi')
self.assertEquals('', stderr_data.strip())
path = stdout_data.strip()
wheel_dir = os.path.join(path[0:path.find('{sep}.deps{sep}'.format(sep=os.sep))], '.deps')
wheels = set(os.listdir(wheel_dir))
def name_and_platform(whl):
# The wheel filename is of the format
# {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl
# See https://www.python.org/dev/peps/pep-0425/.
# We don't care about the python or abi versions (they depend on what we're currently
# running on), we just want to make sure we have all the platforms we expect.
parts = os.path.splitext(whl)[0].split('-')
return '{}-{}'.format(parts[0], parts[1]), parts[-1]
names_and_platforms = set(name_and_platform(w) for w in wheels)
expected_name_and_platforms = {
# Note that we don't check for 'current' because if there's no published wheel for the
# current platform we may end up with a wheel for a compatible platform (e.g., if there's no
# wheel for macosx_10_11_x86_64, 'current' will be satisfied by macosx_10_10_x86_64).
# This is technically also true for the hard-coded platforms we list below, but we chose
# those and we happen to know that cffi wheels exist for them. Whereas we have no such
# advance knowledge for the current platform, whatever that might be in the future.
('cffi-1.9.1', 'macosx_10_10_x86_64'),
('cffi-1.9.1', 'manylinux1_i686'),
('cffi-1.9.1', 'win_amd64'),
}
# pycparser is a dependency of cffi only on CPython. We might as well check for it,
# as extra verification that we correctly fetch transitive dependencies.
if PythonInterpreter.get().identity.interpreter == 'CPython':
expected_name_and_platforms.add(('pycparser-2.17', 'any'))
self.assertTrue(expected_name_and_platforms.issubset(names_and_platforms),
'{} is not a subset of {}'.format(expected_name_and_platforms,
names_and_platforms))
# Check that the path is under the test's build root, so we know the pex was created there.
self.assertTrue(path.startswith(os.path.realpath(get_buildroot())))
def _fake_target(self, spec, requirement_strs):
requirements = [PythonRequirement(r) for r in requirement_strs]
return self.make_target(spec=spec, target_type=PythonRequirementLibrary,
requirements=requirements)
def _resolve_requirements(self, target_roots, options=None):
context = self.context(target_roots=target_roots, options=options)
# We must get an interpreter via the cache, instead of using PythonInterpreter.get() directly,
# to ensure that the interpreter has setuptools and wheel support.
interpreter = PythonInterpreter.get()
interpreter_cache = PythonInterpreterCache(PythonSetup.global_instance(),
PythonRepos.global_instance(),
logger=context.log.debug)
interpreters = interpreter_cache.setup(paths=[os.path.dirname(interpreter.binary)],
filters=[str(interpreter.identity.requirement)])
context.products.get_data(PythonInterpreter, lambda: interpreters[0])
task = self.create_task(context)
task.execute()
return context.products.get_data(ResolveRequirements.REQUIREMENTS_PEX)
def _exercise_module(self, pex, expected_module):
with temporary_file() as f:
f.write('import {m}; print({m}.__file__)'.format(m=expected_module))
f.close()
proc = pex.run(args=[f.name], blocking=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return proc.communicate()
| 50.099237
| 98
| 0.70227
|
f6f9b1ad43cf0f02b97b2530c202c642b6b067a5
| 5,718
|
py
|
Python
|
lambda/us-east-1_Numbers_Trivia/ask_sdk_model/services/list_management/list_items_created_event_request.py
|
Techievena/Numbers_Trivia
|
e86daaf7e7bc2c80c703c8496daea6317e986204
|
[
"MIT"
] | 1
|
2019-02-04T21:07:06.000Z
|
2019-02-04T21:07:06.000Z
|
lambda/us-east-1_Numbers_Trivia/ask_sdk_model/services/list_management/list_items_created_event_request.py
|
Techievena/Numbers_Trivia
|
e86daaf7e7bc2c80c703c8496daea6317e986204
|
[
"MIT"
] | 9
|
2020-03-24T16:32:57.000Z
|
2022-03-11T23:37:22.000Z
|
lambda/us-east-1_Numbers_Trivia/ask_sdk_model/services/list_management/list_items_created_event_request.py
|
Techievena/Numbers_Trivia
|
e86daaf7e7bc2c80c703c8496daea6317e986204
|
[
"MIT"
] | null | null | null |
# coding: utf-8
#
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_sdk_model.request import Request
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional
from datetime import datetime
from ask_sdk_model.services.list_management.list_item_body import ListItemBody
class ListItemsCreatedEventRequest(Request):
"""
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param body:
:type body: (optional) ask_sdk_model.services.list_management.list_item_body.ListItemBody
:param event_creation_time:
:type event_creation_time: (optional) datetime
:param event_publishing_time:
:type event_publishing_time: (optional) datetime
"""
deserialized_types = {
'object_type': 'str',
'request_id': 'str',
'timestamp': 'datetime',
'locale': 'str',
'body': 'ask_sdk_model.services.list_management.list_item_body.ListItemBody',
'event_creation_time': 'datetime',
'event_publishing_time': 'datetime'
}
attribute_map = {
'object_type': 'type',
'request_id': 'requestId',
'timestamp': 'timestamp',
'locale': 'locale',
'body': 'body',
'event_creation_time': 'eventCreationTime',
'event_publishing_time': 'eventPublishingTime'
}
def __init__(self, request_id=None, timestamp=None, locale=None, body=None, event_creation_time=None, event_publishing_time=None):
# type: (Optional[str], Optional[datetime], Optional[str], Optional[ListItemBody], Optional[datetime], Optional[datetime]) -> None
"""
:param request_id: Represents the unique identifier for the specific request.
:type request_id: (optional) str
:param timestamp: Provides the date and time when Alexa sent the request as an ISO 8601 formatted string. Used to verify the request when hosting your skill as a web service.
:type timestamp: (optional) datetime
:param locale: A string indicating the user’s locale. For example: en-US. This value is only provided with certain request types.
:type locale: (optional) str
:param body:
:type body: (optional) ask_sdk_model.services.list_management.list_item_body.ListItemBody
:param event_creation_time:
:type event_creation_time: (optional) datetime
:param event_publishing_time:
:type event_publishing_time: (optional) datetime
"""
self.__discriminator_value = "AlexaHouseholdListEvent.ItemsCreated"
self.object_type = self.__discriminator_value
super(ListItemsCreatedEventRequest, self).__init__(object_type=self.__discriminator_value, request_id=request_id, timestamp=timestamp, locale=locale)
self.body = body
self.event_creation_time = event_creation_time
self.event_publishing_time = event_publishing_time
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, ListItemsCreatedEventRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 39.708333
| 182
| 0.648478
|
d5afeec68f8b92334745f5ce1a8fbd56a1e40a3a
| 204
|
py
|
Python
|
states.py
|
Kvm99/Pressure-Bot-
|
4dc5c850f12265dd89150891c2e383c9c32d421f
|
[
"BSD-3-Clause"
] | null | null | null |
states.py
|
Kvm99/Pressure-Bot-
|
4dc5c850f12265dd89150891c2e383c9c32d421f
|
[
"BSD-3-Clause"
] | 208
|
2019-12-06T12:48:58.000Z
|
2022-03-28T21:10:35.000Z
|
states.py
|
Kvm99/Telegram-Pressurebot
|
4dc5c850f12265dd89150891c2e383c9c32d421f
|
[
"BSD-3-Clause"
] | null | null | null |
class States:
START = 1
AGE = 2
SEX = 3
WEIGHT = 4
ADD_PRESSURE = 5
ARM = 6
PRESSURE = 7
GRAPH_FOR_PERIOD = 8
SET_TIMER = 9
REMOVE_TIMER = 10
START_BUTTON = 11
| 15.692308
| 24
| 0.553922
|
9fbb317c844d1c738200ab42b2c350ee1c6f9d61
| 4,529
|
py
|
Python
|
jicbioimage/core/util/array.py
|
JIC-CSB/jicbioimage.core
|
8a836b3af7c51926b2d18948babec593cf35ce55
|
[
"MIT"
] | 1
|
2017-07-11T10:51:22.000Z
|
2017-07-11T10:51:22.000Z
|
jicbioimage/core/util/array.py
|
JIC-CSB/jicbioimage.core
|
8a836b3af7c51926b2d18948babec593cf35ce55
|
[
"MIT"
] | null | null | null |
jicbioimage/core/util/array.py
|
JIC-CSB/jicbioimage.core
|
8a836b3af7c51926b2d18948babec593cf35ce55
|
[
"MIT"
] | null | null | null |
"""Module containing utility functions for manipulating numpy arrays."""
import sys
from functools import wraps
import random
import numpy as np
from jicbioimage.core.util.color import pretty_color_palette, unique_color_palette
def normalise(array):
"""Return array normalised such that all values are between 0 and 1.
If all the values in the array are the same the function will return:
- np.zeros(array.shape, dtype=np.float) if the value is 0 or less
- np.ones(array.shape, dtype=np.float) if the value is greater than 0
:param array: numpy.array
:returns: numpy.array.astype(numpy.float)
"""
min_val = array.min()
max_val = array.max()
array_range = max_val - min_val
if array_range == 0:
# min_val == max_val
if min_val > 0:
return np.ones(array.shape, dtype=np.float)
return np.zeros(array.shape, dtype=np.float)
return (array.astype(np.float) - min_val) / array_range
def reduce_stack(array3D, z_function):
"""Return 2D array projection of the input 3D array.
The input function is applied to each line of an input x, y value.
:param array3D: 3D numpy.array
:param z_function: function to use for the projection (e.g. :func:`max`)
"""
xmax, ymax, _ = array3D.shape
projection = np.zeros((xmax, ymax), dtype=array3D.dtype)
for x in range(xmax):
for y in range(ymax):
projection[x, y] = z_function(array3D[x, y, :])
return projection
def map_stack(array3D, z_function):
"""Return 3D array where each z-slice has had the function applied to it.
:param array3D: 3D numpy.array
:param z_function: function to be mapped to each z-slice
"""
_, _, zdim = array3D.shape
return np.dstack([z_function(array3D[:, :, z]) for z in range(zdim)])
def check_dtype(array, allowed):
"""Raises TypeError if the array is not of an allowed dtype.
:param array: array whose dtype is to be checked
:param allowed: instance or list of allowed dtypes
:raises: TypeError
"""
if not hasattr(allowed, "__iter__"):
allowed = [allowed, ]
if array.dtype not in allowed:
msg = "Invalid dtype {}. Allowed dtype(s): {}"
raise(TypeError(msg.format(array.dtype, allowed)))
def dtype_contract(input_dtype=None, output_dtype=None):
"""Function decorator for specifying input and/or output array dtypes.
:param input_dtype: dtype of input array
:param output_dtype: dtype of output array
:returns: function decorator
"""
def wrap(function):
@wraps(function)
def wrapped_function(*args, **kwargs):
if input_dtype is not None:
check_dtype(args[0], input_dtype)
array = function(*args, **kwargs)
if output_dtype is not None:
check_dtype(array, output_dtype)
return array
return wrapped_function
return wrap
def color_array(array, color_dict):
"""Return RGB color array.
Assigning a unique RGB color value to each unique element of the input
array and return an array of shape (array.shape, 3).
:param array: input numpy.array
:param color_dict: dictionary with keys/values corresponding to identifiers
and RGB tuples respectively
"""
output_array = np.zeros(array.shape + (3,), np.uint8)
unique_identifiers = set(np.unique(array))
for identifier in unique_identifiers:
output_array[np.where(array == identifier)] = color_dict[identifier]
return output_array
def pretty_color_array(array, keep_zero_black=True):
"""Return a RGB pretty color array.
Assigning a pretty RGB color value to each unique element of the input
array and return an array of shape (array.shape, 3).
:param array: input numpy.array
:param keep_zero_black: whether or not the background should be black
:returns: numpy.array
"""
unique_identifiers = set(np.unique(array))
color_dict = pretty_color_palette(unique_identifiers, keep_zero_black)
return color_array(array, color_dict)
def unique_color_array(array):
"""Return a RGB unique color array.
Assigning a unique RGB color value to each unique element of the input
array and return an array of shape (array.shape, 3).
:param array: input numpy.array
:returns: numpy.array
"""
unique_identifiers = set(np.unique(array))
color_dict = unique_color_palette(unique_identifiers)
return color_array(array, color_dict)
| 32.582734
| 82
| 0.682049
|
286285655c90b73509d61ec047ab55d795b22ece
| 3,375
|
py
|
Python
|
playbooks/robusta_playbooks/cpu_throttling.py
|
pavangudiwada/robusta
|
cc1cb8a2e198f404e275a3947cf64e9f700f56f4
|
[
"MIT"
] | 273
|
2021-12-28T20:48:48.000Z
|
2022-03-31T16:03:13.000Z
|
playbooks/robusta_playbooks/cpu_throttling.py
|
pavangudiwada/robusta
|
cc1cb8a2e198f404e275a3947cf64e9f700f56f4
|
[
"MIT"
] | 103
|
2022-01-10T11:45:47.000Z
|
2022-03-31T16:31:11.000Z
|
playbooks/robusta_playbooks/cpu_throttling.py
|
pavangudiwada/robusta
|
cc1cb8a2e198f404e275a3947cf64e9f700f56f4
|
[
"MIT"
] | 35
|
2021-12-30T15:30:14.000Z
|
2022-03-28T11:43:57.000Z
|
from robusta.api import *
@action
def cpu_throttling_analysis_enricher(event: PodEvent):
"""
Enrich the finding with a deep analysis for the cause of the CPU throttling.
Includes recommendations for the identified cause.
"""
pod = event.get_pod()
if not pod:
logging.error(f"cannot run CPUThrottlingAnalysis on event with no pod: {event}")
return
if pod.metadata.name.startswith("metrics-server-") and pod.has_toleration(
"components.gke.io/gke-managed-components"
):
logging.info(
"ignoring cpu throttling for GKE because there is nothing you can do about it"
)
event.stop_processing = True
elif pod.metadata.name.startswith("metrics-server-") and pod.has_cpu_limit():
event.add_enrichment(
[
MarkdownBlock(
"*Alert Explanation:* This alert is likely due to a known issue with metrics-server. "
"<https://github.com/kubernetes/autoscaler/issues/4141|The default metrics-server deployment has cpu "
"limits which are too low.>"
),
MarkdownBlock(
"*Robusta's Recommendation:* Increase the CPU limit for the metrics-server deployment. Note that "
"metrics-server does *not* respect normal cpu limits. For instructions on fixing this issue, see the "
"<https://github.com/robusta-dev/alert-explanations/wiki/CPUThrottlingHigh-on-metrics-server-(Prometheus-alert)|Robusta wiki>."
),
],
annotations={SlackAnnotations.UNFURL: False},
)
elif pod.has_cpu_limit():
# TODO: ideally we would check if there is a limit on the specific container which is triggering the alert
event.add_enrichment(
[
MarkdownBlock(
"*Alert Explanation:* This pod is throttled. It wanted to use the CPU and was blocked due to "
"its CPU limit. This can occur even when CPU usage is far below the limit."
"(<https://github.com/robusta-dev/alert-explanations/wiki/CPUThrottlingHigh-"
"(Prometheus-Alert)|Learn more.>)"
),
MarkdownBlock(
"*Robusta's Recommendation:* Remove this pod's CPU limit entirely. <https://github.com/robusta-dev/"
"alert-explanations/wiki/CPUThrottlingHigh-(Prometheus-Alert)#:~:text=relatively%20accurate%20one-,"
"Explanation,-As%20long%20as|Despite common misconceptions, using CPU limits is *not* a best "
"practice.>"
),
],
annotations={SlackAnnotations.UNFURL: False},
)
else:
event.add_enrichment(
[
MarkdownBlock(
"*Alert Explanation:* This pod is throttled because it is using more CPU than its request and the "
"node doesn't have spare CPU to give. Increase the pod's CPU request. This will impact Kubernetes' "
"scheduling decisions and guarantee the pod is placed on a node with sufficient CPU to match the "
"new request."
)
],
annotations={SlackAnnotations.UNFURL: False},
)
| 46.875
| 147
| 0.592296
|
779997555a158f9bfd174d9efd0f3f530524b0c6
| 1,550
|
py
|
Python
|
zerver/migrations/0032_verify_all_medium_avatar_images.py
|
sophie200/zulip
|
c685d368215bbc3d85d7c40f2c1e6fa95e53186a
|
[
"Apache-2.0"
] | 2
|
2021-02-02T01:29:32.000Z
|
2021-02-02T01:30:51.000Z
|
zerver/migrations/0032_verify_all_medium_avatar_images.py
|
sophie200/zulip
|
c685d368215bbc3d85d7c40f2c1e6fa95e53186a
|
[
"Apache-2.0"
] | 1
|
2021-01-07T15:28:54.000Z
|
2021-01-08T15:38:45.000Z
|
zerver/migrations/0032_verify_all_medium_avatar_images.py
|
sophie200/zulip
|
c685d368215bbc3d85d7c40f2c1e6fa95e53186a
|
[
"Apache-2.0"
] | 1
|
2020-12-03T17:08:44.000Z
|
2020-12-03T17:08:44.000Z
|
import hashlib
from unittest.mock import patch
from django.conf import settings
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from zerver.lib.upload import upload_backend
from zerver.lib.utils import make_safe_digest
from zerver.models import UserProfile
# We hackishly patch this function in order to revert it to the state
# it had when this migration was first written. This is a balance
# between copying in a historical version of hundreds of lines of code
# from zerver.lib.upload (which would pretty annoying, but would be a
# pain) and just using the current version, which doesn't work
# since we rearranged the avatars in Zulip 1.6.
def patched_user_avatar_path(user_profile: UserProfile) -> str:
email = user_profile.email
user_key = email.lower() + settings.AVATAR_SALT
return make_safe_digest(user_key, hashlib.sha1)
@patch('zerver.lib.upload.user_avatar_path', patched_user_avatar_path)
def verify_medium_avatar_image(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
user_profile_model = apps.get_model('zerver', 'UserProfile')
for user_profile in user_profile_model.objects.filter(avatar_source="U"):
upload_backend.ensure_medium_avatar_image(user_profile)
class Migration(migrations.Migration):
dependencies = [
('zerver', '0031_remove_system_avatar_source'),
]
operations = [
migrations.RunPython(verify_medium_avatar_image, elidable=True),
]
| 37.804878
| 93
| 0.785161
|
3ba62c7973ec30c2a884a854b55c37220151aba9
| 609
|
py
|
Python
|
src/playground/migrations/0001_initial.py
|
malithbc/Mole-AR-Stage1
|
0776483fa79a2452d1b2b93cfc06291ce713c7c0
|
[
"MIT"
] | null | null | null |
src/playground/migrations/0001_initial.py
|
malithbc/Mole-AR-Stage1
|
0776483fa79a2452d1b2b93cfc06291ce713c7c0
|
[
"MIT"
] | null | null | null |
src/playground/migrations/0001_initial.py
|
malithbc/Mole-AR-Stage1
|
0776483fa79a2452d1b2b93cfc06291ce713c7c0
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2021-07-11 03:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('address', models.TextField()),
],
),
]
| 25.375
| 117
| 0.569787
|
ff68fbaece0899c7e06fcd6e4edc48a43efa7bf7
| 47,595
|
py
|
Python
|
rumi/io/demand.py
|
prayas-energy/Rumi
|
fd7f0cf4eba08b4efa1a643217caff7f876fb394
|
[
"Apache-2.0"
] | 1
|
2021-11-24T07:22:15.000Z
|
2021-11-24T07:22:15.000Z
|
rumi/io/demand.py
|
prayas-energy/Rumi
|
fd7f0cf4eba08b4efa1a643217caff7f876fb394
|
[
"Apache-2.0"
] | null | null | null |
rumi/io/demand.py
|
prayas-energy/Rumi
|
fd7f0cf4eba08b4efa1a643217caff7f876fb394
|
[
"Apache-2.0"
] | 1
|
2022-02-09T11:11:35.000Z
|
2022-02-09T11:11:35.000Z
|
# Copyright 2021 Prayas Energy Group(https://www.prayaspune.org/peg/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""demand io layer. mainly data loading and validation functions.
Function and variable from this module are available in Demand.yml
for validation.
Few function are also used by processing layer.
Important note: Some functions are just imported and not used
are for yaml validation. All definations(including those which are
imported from other modules) from this module are available in
yaml validation.
"""
import csv
import functools
import itertools
import os
import logging
from rumi.io import functionstore as fs
from rumi.io import loaders
from rumi.io import filemanager
from rumi.io import config
from rumi.io import constant
from rumi.io import common
from rumi.io import utilities
import pandas as pd
from rumi.io.common import balancing_area, balancing_time
from rumi.io.utilities import check_consumer_validity
from rumi.io.utilities import check_geographic_validity
from rumi.io.utilities import check_time_validity
from rumi.io.multiprocessutils import execute_in_process_pool
logger = logging.getLogger(__name__)
def get_consumer_levels(ds):
"""get number of consumer levels defined
for given demand sector
Parameters
----------
ds: str
Demand sector name
Returns
-------
1 or 2
"""
DS_Cons1_Map = loaders.get_parameter("DS_Cons1_Map")
type1 = DS_Cons1_Map[ds][-1]
Cons1_Cons2_Map = loaders.get_parameter("Cons1_Cons2_Map")
if Cons1_Cons2_Map and Cons1_Cons2_Map.get(type1, None):
return 2
return 1
def get_cons_columns(ds):
"""get maximum consumer columns for given demand sector
Parameters
-----------
ds: str
Demand sector name
Returns
-------
a list of consumer columns for given demand sector
"""
return list(constant.CONSUMER_TYPES[:get_consumer_levels(ds)])
def get_consumer_granularity(ds, specified_gran):
"""Converts CONSUMERALL to actual granularity
Parameters
-----------
demand_specs: str
Demand sector
Returns
-------
one of CONSUMERTYPE1,CONSUMERTYPE2
"""
if specified_gran != "CONSUMERALL":
return specified_gran
if get_consumer_levels(ds) == 1:
return "CONSUMERTYPE1"
else:
return "CONSUMERTYPE1"
def get_geographic_granularity(demand_sector,
energy_service,
energy_carrier):
DS_ES_EC_DemandGranularity_Map = loaders.get_parameter(
"DS_ES_EC_DemandGranularity_Map")
granularity_map = DS_ES_EC_DemandGranularity_Map.set_index(['DemandSector',
'EnergyService',
'EnergyCarrier'])
return granularity_map.loc[(demand_sector,
energy_service,
energy_carrier)]['GeographicGranularity']
def get_type(demand_sector, energy_service):
"""find type of service BOTTOMUP,EXTRANEOUS,GDPELASTICITY or RESIDUAL
"""
DS_ES_Map = loaders.get_parameter('DS_ES_Map')
DS_ES_Map = DS_ES_Map.set_index(['DemandSector', 'EnergyService'])
return DS_ES_Map.loc[(demand_sector, energy_service)]['InputType']
def get_BaseYearDemand(demand_sector):
"""loader function for parameter BaseYearDemand
"""
return get_demand_sector_parameter('BaseYearDemand',
demand_sector)
def get_DemandElasticity(demand_sector):
"""loader function for parameter DemandElasticity
"""
return get_demand_sector_parameter('DemandElasticity',
demand_sector)
def get_ExtraneousDemand(demand_sector):
"""loader function for parameter ExtraneousDemand
"""
extraneous = get_demand_sector_parameter('ExtraneousDemand',
demand_sector)
return extraneous
def get_ST_Efficiency(demand_sector):
"""ST_Efficiency loader function
"""
return get_demand_sector_parameter("ST_Efficiency",
demand_sector)
def get_ST_EmissionDetails(demand_sector):
"""ST_EmissionDetails loader function
"""
return get_demand_sector_parameter("ST_EmissionDetails",
demand_sector)
def get_ResidualDemand(demand_sector):
"""loader function for parameter ResidualDemand
"""
return get_demand_sector_parameter("ResidualDemand",
demand_sector)
def get_NumConsumers(demand_sector):
"""loader function for parameter NumConsumers
"""
return get_demand_sector_parameter('NumConsumers',
demand_sector)
def get_NumInstances(demand_sector, energy_service):
"""loader function for parameter NumInstances
"""
return get_DS_ES_parameter('NumInstances',
demand_sector,
energy_service)
def get_EfficiencyLevelSplit(demand_sector, energy_service):
"""loader function for parameter EfficiencyLevelSplit
"""
return get_DS_ES_parameter('EfficiencyLevelSplit',
demand_sector,
energy_service)
def get_ES_Demand(demand_sector,
energy_service,
service_tech):
"""loader function for parameter ES_Demand
should not be used directly. use loaders.get_parameter instead.
"""
prefix = f"{service_tech}_"
filepath = find_custom_DS_ES_filepath(demand_sector,
energy_service,
'ES_Demand',
prefix)
logger.debug(f"Reading {prefix}ES_Demand from file {filepath}")
return pd.read_csv(filepath)
def get_Penetration(demand_sector,
energy_service,
ST_combination):
"""loader function for parameter Penetration
"""
for item in itertools.permutations(ST_combination):
prefix = constant.ST_SEPARATOR_CHAR.join(
item) + constant.ST_SEPARATOR_CHAR
filepath = find_custom_DS_ES_filepath(demand_sector,
energy_service,
'Penetration',
prefix)
logger.debug(f"Searching for file {filepath}")
if os.path.exists(filepath):
logger.debug(f"Reading {prefix} from file {filepath}")
return pd.read_csv(filepath)
def get_demand_sector_parameter(param_name,
demand_sector):
"""loads demand sector parameter which lies inside demand_sector folder
"""
filepath = find_custom_demand_path(demand_sector, param_name)
logger.debug(f"Reading {param_name} from file {filepath}")
cols = list(filemanager.demand_specs()[param_name]['columns'].keys())
d = pd.read_csv(filepath)
return d[[c for c in cols if c in d.columns]]
def get_DS_ES_parameter(param_name,
demand_sector,
energy_service):
"""loads parameter which is inside demand_sector/energy_service folder
"""
filepath = find_custom_DS_ES_filepath(demand_sector,
energy_service,
param_name,
"")
logger.debug(f"Reading {param_name} from file {filepath}")
cols = list(filemanager.demand_specs()[param_name]['columns'].keys())
d = pd.read_csv(filepath)
return d[[c for c in cols if c in d.columns]]
def find_custom_DS_ES_filepath(demand_sector,
energy_service,
name,
prefix):
"""find actual location of data in case some data lies in scenario
"""
return find_custom_demand_path(demand_sector,
name,
energy_service,
prefix)
def find_custom_demand_path(demand_sector,
name,
energy_service="",
prefix=""):
"""find actual location of data in case some data lies in scenario
"""
return filemanager.find_filepath(name,
demand_sector,
energy_service,
fileprefix=prefix)
def get_mapped_items(DS_ES_EC_Map):
"""returns list of ECS from DS_ES_EC_Map
"""
return fs.flatten(fs.drop_columns(DS_ES_EC_Map, 2))
def get_RESIDUAL_ECs(DS_ES_Map, DS_ES_EC_Map):
df = DS_ES_Map.query("InputType == 'RESIDUAL'")[
['DemandSector', 'EnergyService']]
DS_ES = zip(df['DemandSector'], df['EnergyService'])
ECs = {(DS, ES): row[2:]
for DS, ES in DS_ES
for row in DS_ES_EC_Map if DS == row[0] and ES == row[1]}
return ECs
def derive_ES_EC(demand_sector, input_type):
"""return set of ES,EC combinations for given demand_sector and input_type but not_BOTTOMUP
"""
DS_ES_Map = loaders.get_parameter('DS_ES_Map')
DS_ES_EC_Map = loaders.get_parameter('DS_ES_EC_Map')
es_ec = fs.concat(*[[(row[1], ec) for ec in row[2:]]
for row in DS_ES_EC_Map if row[0] == demand_sector])
return [(es, ec) for es, ec in es_ec if len(DS_ES_Map.query(f"DemandSector=='{demand_sector}' & EnergyService=='{es}' & InputType=='{input_type}'")) > 0]
def check_RESIDUAL_EC(DS_ES_Map, DS_ES_EC_Map):
"""Each EC specified for a <DS, ES> combination,
whose InputType in DS_ES_Map is RESIDUAL,
must occur at least once in another
<DS, ES> combination for the same DS
"""
def x_in_y(x, y):
return any([ix in y for ix in x])
ECS = get_RESIDUAL_ECs(DS_ES_Map, DS_ES_EC_Map)
items1 = [row
for row in DS_ES_EC_Map
for DS, ES in ECS if row[0] == DS and row[1] != ES and x_in_y(ECS[(DS, ES)], row[2:])]
if len(items1) == 0 and ECS:
DS_ES_ST = expand_DS_ES_ST()
ST_Info = loaders.get_parameter('ST_Info')
items2 = []
for ECs in ECS.values():
for EC in ECs:
STS = ST_Info.query(f"EnergyCarrier == '{EC}'")[
'ServiceTech']
items2.extend([row for row in DS_ES_ST for DS, ES in ECS if row[0]
== DS and row[1] != ES and x_in_y(STS, row[2:])])
return not ECS or len(items1) > 0 or len(items2) > 0
def are_BOTTOMUP(DS_ES_X_Map, DS_ES_Map):
DS_ES = fs.transpose(fs.take_columns(DS_ES_X_Map, 2))
df = fs.combined_key_subset(DS_ES, DS_ES_Map).query(
"InputType != 'BOTTOMUP'")
return len(df) == 0
def not_BOTTOMUP(DS_ES_X_Map, DS_ES_Map):
DS_ES = fs.transpose(fs.take_columns(DS_ES_X_Map, 2))
df = fs.combined_key_subset(DS_ES, DS_ES_Map).query(
"InputType == 'BOTTOMUP'")
return len(df) == 0
def check_ALL_DS(DS_ES_X_Map):
"""
ES used with ALL as DS can not be used with any other DS.
This function checks if this is true.
"""
ES_with_ALL = [row[1] for row in DS_ES_X_Map if row[0] == "ALL"]
ES_without_ALL = [ES for ES in ES_with_ALL
for row in DS_ES_X_Map if row[0] != "ALL"]
return len(ES_without_ALL) == 0
def listcols(df):
return [df[c] for c in df.columns]
def check_ALL_ES(DS_ES_EC_DemandGranularity_Map):
"""function for validation
"""
DS_EC_ALL = DS_ES_EC_DemandGranularity_Map.query(
"EnergyService == 'ALL'")[['DemandSector', 'EnergyCarrier']]
DS_EC_NOALL = DS_ES_EC_DemandGranularity_Map.query(
"EnergyService != 'ALL'")[['DemandSector', 'EnergyCarrier']]
ALL = set(zip(*listcols(DS_EC_ALL)))
NOALL = set(zip(*listcols(DS_EC_NOALL)))
return not ALL & NOALL
def expand_DS_ALL(BOTTOMUP):
"""
Expands Map when DS is ALL
"""
if BOTTOMUP:
cond = "=="
data = loaders.load_param("DS_ES_ST_Map")
else:
data = loaders.load_param("DS_ES_EC_Map")
cond = "!="
DS_ES_Map = loaders.load_param("DS_ES_Map")
ESs = [row for row in data if row[0] == 'ALL']
for row in ESs:
ES = row[1]
data.remove(row)
nonbottomup = DS_ES_Map.query(
f"EnergyService == '{ES}' & InputType {cond} 'BOTTOMUP'")
if len(nonbottomup) > 0:
ds = nonbottomup['DemandSector']
for eachds in ds:
newrow = row.copy()
newrow[0] = eachds
data.append(newrow)
return data
def expand_DS_ES_EC():
return expand_DS_ALL(BOTTOMUP=False)
def expand_DS_ES_ST():
return expand_DS_ALL(BOTTOMUP=True)
def is_valid(DS, EC):
DS_ES_EC_Map = loaders.load_param("DS_ES_EC_Map")
DS_ES_ST_Map = loaders.load_param("DS_ES_ST_Map")
ST_Info = loaders.get_parameter("ST_Info")
ECS = [row for row in DS_ES_EC_Map if row[0] == DS and row[1] == EC]
STS = ST_Info.query(f"EnergyCarrier == '{EC}'")['ServiceTech']
DSS = [row[0] for row in DS_ES_ST_Map for ST in STS if row[2] == ST]
return ECS or DS in DSS
@functools.lru_cache()
def expand_DS_ES_EC_DemandGranularity_Map():
DS_ES_EC_DemandGranularity_Map = loaders.load_param(
"DS_ES_EC_DemandGranularity_Map")
DS_ES_Map = loaders.get_parameter("DS_ES_Map")
data = DS_ES_EC_DemandGranularity_Map.to_dict(orient="records")
DSs = [d for d in data if d['EnergyService'] == 'ALL']
for DS in DSs:
data.remove(DS)
DemandSector = DS['DemandSector']
ALL_DS_ES = DS_ES_Map.query(f"DemandSector == '{DemandSector}'")[
['DemandSector', 'EnergyService']].to_dict(orient="records")
for item in ALL_DS_ES:
d = DS.copy()
d.update(item)
if is_valid(d['DemandSector'], d['EnergyCarrier']):
data.append(d)
return pd.DataFrame(data)
def ST_to_EC(ST):
ST_Info = loaders.get_parameter("ST_Info")
return ST_Info.query(f"ServiceTech == '{ST}'")['EnergyCarrier'].iloc[0]
def get_service_techs(demand_sector,
energy_service,
energy_carrier):
"""ServiceTechs for given <demand_sector,energy_service, energy_carrier>
combination
"""
DS_ES_ST_Map = loaders.get_parameter("DS_ES_ST_Map")
ST1 = fs.flatten([row[2:] for row in DS_ES_ST_Map if row[0]
== demand_sector and row[1] == energy_service])
ST2 = EC_to_ST(energy_carrier)
return tuple(set(ST1) & set(ST2))
def EC_to_ST(energy_carrier):
ST_Info = loaders.get_parameter("ST_Info")
return ST_Info.query(f"EnergyCarrier == '{energy_carrier}'")[
'ServiceTech'].values
def derive_DS_ES_EC():
DS_ES_EC_Map = expand_DS_ES_EC()
DS_ES_ST_Map = expand_DS_ES_ST()
explicit = []
for row in DS_ES_EC_Map:
explicit.extend([(row[0], row[1], EC) for EC in row[2:]])
implicit = []
for row in DS_ES_ST_Map:
implicit.extend([(row[0], row[1], ST_to_EC(ST)) for ST in row[2:]])
return explicit + implicit
def check_DS_ES_EC_validity():
"""
checks if DS_ES_EC_DemandGranularity_Map has valid combinations of
<DS,ES,EC>
"""
gmap = expand_DS_ES_EC_DemandGranularity_Map()
a = list(
zip(*listcols(gmap[['DemandSector', 'EnergyService', 'EnergyCarrier']])))
b = derive_DS_ES_EC()
return fs.one_to_one(a, b)
def coarser(x, y, values):
return values.index(x) <= values.index(y)
def finer(x, y, values):
return values.index(x) >= values.index(y)
def check_granularity(GRANULARITY):
DS_ES_EC_DemandGranularity_Map = expand_DS_ES_EC_DemandGranularity_Map()
DS_ES_Map = loaders.get_parameter("DS_ES_Map")
def get_Granularity(DS, ES, EC):
df = DS_ES_EC_DemandGranularity_Map.query(
f"(DemandSector == '{DS}') & (EnergyService =='{ES}') & (EnergyCarrier == '{EC}')")
return df[GRANULARITY].iloc[0] if len(df) != 0 else None
def get_input_type(DS, ES):
return DS_ES_Map.query(f"DemandSector == '{DS}' & EnergyService == '{ES}'")['InputType'].iloc[0]
DS_ES_EC = list(
zip(*listcols(DS_ES_EC_DemandGranularity_Map[['DemandSector', 'EnergyService', 'EnergyCarrier']])))
type_ = {(DS, ES, EC): get_input_type(DS, ES)
for DS, ES, EC in DS_ES_EC}
type_RESIDUAL = {item: type_[item]
for item in type_ if type_[item] == 'RESIDUAL'}
t_gran = {item: get_Granularity(*item)
for item in DS_ES_EC}
t_gran_RES = {item: get_Granularity(*item)
for item in DS_ES_EC if item in type_RESIDUAL}
if GRANULARITY == "TimeGranularity":
t_values = [t.upper() for t in constant.TIME_COLUMNS]
else:
t_values = [g.upper() for g in constant.GEO_COLUMNS]
# here condition is list comprehension is filtering out
# rows with DS==DS_under_consideration and EC=EC_under_consideration
return all([(t_values.index(t_gran_RES[ritem]) <=
t_values.index(t_gran[item]))
for ritem in t_gran_RES
for item in t_gran if item[0] == ritem[0] and item[2] == ritem[2]])
def check_ST_ES(DS_ES_ST_Map):
STS = get_mapped_items(DS_ES_ST_Map)
repeating_STS = [ST for ST in STS if STS.count(ST) > 1]
cond = True
for ST in repeating_STS:
cond = cond and len(set([row[1]
for row in DS_ES_ST_Map if ST in row[2:]])) == 1
return cond
class DemandValidationError(Exception):
pass
@functools.lru_cache(maxsize=None)
def derive_ECs(DS):
DS_ES_EC_Map = expand_DS_ES_EC()
DS_ES_ST_Map = expand_DS_ES_ST()
ST_Info = loaders.get_parameter("ST_Info")
explicit = fs.flatten([row[2:] for row in DS_ES_EC_Map if row[0] == DS])
STs = fs.flatten([row[2:] for row in DS_ES_ST_Map if row[0] == DS])
implicit = [ST_Info.query(f"ServiceTech == '{ST}'")['EnergyCarrier'].iloc[0]
for ST in STs if len(ST_Info.query(f"ServiceTech == '{ST}'")) != 0]
return explicit + implicit
def check_time_granularity_DS_Cons1():
"""
checks if DS_Cons1_Map has time granularity coarser than balancing time
"""
DS_Cons1_Map = loaders.get_parameter("DS_Cons1_Map")
cond = True
t_values = ('YEAR', 'SEASON', 'DAYTYPE', 'DAYSLICE')
for row in DS_Cons1_Map:
DS, GGRAN, TGRAN = row[:3]
ECs = derive_ECs(DS)
cond = cond and all(
[coarser(TGRAN, balancing_time(EC), t_values) for EC in ECs])
return cond
def check_geo_granularity_DS_Cons1():
"""
checks if DS_Cons1_Map has geographic granularity finer than balancing area
"""
DS_Cons1_Map = loaders.get_parameter("DS_Cons1_Map")
cond = True
g_values = tuple(constant.GEO_COLUMNS.keys())
for row in DS_Cons1_Map:
DS, GGRAN, TGRAN = row[:3]
ECs = derive_ECs(DS)
cond = cond and all(
[finer(GGRAN, balancing_area(EC), g_values) for EC in ECs])
return cond
def validate_consumertype2(Cons1_Cons2_Map, CONSUMERTYPES1):
if Cons1_Cons2_Map:
return fs.x_in_y(x=[row[0] for row in Cons1_Cons2_Map], y=CONSUMERTYPES1)
else:
return True
def get_ds_list(name):
"""List of possible DemandSectors for given demand sector parameter.
Parameters
----------
name: str
Name of parameter , anyone from nested paramters od demand.
e.g. BaseYearDemand, DemandElasticity
Returns
-------
list demand sectors which have that parameter
"""
if name in ['BaseYearDemand', 'DemandElasticity']:
DS_ES_Map = loaders.get_parameter("DS_ES_Map")
return DS_ES_Map.query("InputType == 'GDPELASTICITY'")['DemandSector'].values
elif name == "ExtraneousDemand":
DS_ES_Map = loaders.get_parameter("DS_ES_Map")
return DS_ES_Map.query("InputType == 'EXTRANEOUS'")['DemandSector'].values
elif name in ["ResidualDemand"]:
DS_ES_Map = loaders.get_parameter("DS_ES_Map")
return DS_ES_Map.query("InputType == 'RESIDUAL'")['DemandSector'].values
else:
DS_ES_Map = loaders.get_parameter("DS_ES_Map")
return DS_ES_Map.query("InputType == 'BOTTOMUP'")['DemandSector'].values
def existence_demand_parameter(name):
ds = get_ds_list(name)
ds = list(set(ds))
args = [(name, d) for d in ds]
valid = execute_in_process_pool(existence_demand_parameter_, args)
return all(valid)
def existence_demand_parameter_(name, demand_sector):
try:
logger.info(f"Validating {name} from {demand_sector}")
data = loaders.get_parameter(name,
demand_sector=demand_sector)
valid = validate_each_demand_param(name, data,
demand_sector=demand_sector)
if not valid:
print(f"Validation failed for {name} from {demand_sector}")
logger.error(
f"Validation failed for {name} from {demand_sector}")
except FileNotFoundError as fne:
logger.error(f"{name} for {demand_sector} is not given")
valid = False
logger.exception(fne)
except Exception as e:
logger.error(f"{name} for {demand_sector} has invalid data")
valid = False
logger.exception(e)
return valid
def check_basedemand_elasticity_gran():
"""
cheks if baseyeardemand and demandelasicity have same granularity and is equal to
granularity specified in DemandGranularityMap
"""
ds = get_ds_list('BaseYearDemand')
for d in ds:
logger.debug(
f"Checking if granularity is same for BaseYearDemand and DemandElasticity for {d}")
BaseYearDemand = get_BaseYearDemand(d)
DemandElasticity = get_DemandElasticity(d)
indexcols = ["EnergyService", "EnergyCarrier"]
BaseYearDemand = BaseYearDemand.set_index(indexcols).sort_index()
DemandElasticity = DemandElasticity.set_index(indexcols).sort_index()
for item in BaseYearDemand.index.unique():
q = "EnergyService=='{}' & EnergyCarrier=='{}'".format(
item[0], item[1])
byd = utilities.filter_empty(BaseYearDemand.query(q))
de = utilities.filter_empty(DemandElasticity.query(q))
bg = utilities.get_geographic_columns_from_dataframe(byd)
dg = utilities.get_geographic_columns_from_dataframe(de)
geogran = get_geographic_granularity(d, *item)
grancols = constant.GEO_COLUMNS[geogran]
if bg == dg:
logger.debug(
f"Geographic granularity of BaseYearDemand and DemandElasticity is same for {d},{item}")
if bg != grancols:
logger.error(
f"Geographic granularity of BaseYearDemand for {d},{item} is diffecrent than specified in DS_ES_EC_DemandGranularity_Map.")
return False
if dg != grancols:
logger.error(
f"Geographic granularity of DemandElasticity for {d},{item} is diffecrent than specified in DS_ES_EC_DemandGranularity_Map.")
return False
return True
def get_all_ES_Demand(ds, es):
"""returns dictionary of ES_Demand data for each ST.
it returns dict with key as ST and ES_Demand as value
"""
DS_ES_ST_Map = loaders.get_parameter("DS_ES_ST_Map")
STs = [row[2:]
for row in DS_ES_ST_Map if row[0] == ds and row[1] == es][0]
return {s: loaders.get_parameter('ES_Demand',
demand_sector=ds,
energy_service=es,
service_tech=s) for s in STs}
def read_header(filepath):
with open(filepath) as f:
csvf = csv.reader(f)
return next(csvf)
def check_ES_Demand_columns():
ds_es = get_bottomup_ds_es()
valid = True
for ds, es in ds_es:
valid = valid and _check_ES_Demand_columns(ds, es)
return valid
def get_structural_columns(ds):
return constant.TIME_COLUMNS[utilities.get_valid_time_levels()[-1]] + \
constant.GEO_COLUMNS[utilities.get_valid_geographic_levels()[-1]] + \
get_cons_columns(ds)
def _check_ES_Demand_columns(ds, es):
"""checks if ES_Demand file has correct column names specified
"""
DS_ES_ST_Map = loaders.get_parameter("DS_ES_ST_Map")
STs = [row[2:]
for row in DS_ES_ST_Map if row[0] == ds and row[1] == es][0]
filepaths = {s: find_custom_DS_ES_filepath(ds,
es,
'ES_Demand',
f"{s}_") for s in STs}
valid = True
for ST, path in filepaths.items():
columns = read_header(path)
structural = get_structural_columns(ds)
other_cols = [c for c in columns if c not in structural]
unexpected_cols = [c for c in other_cols if ST not in c]
if unexpected_cols:
logger.warning(
f"Found unexpected columns {unexpected_cols} in {ST}_ES_Demand file")
st_cols = [c for c in other_cols if ST in c]
combinations = [set(c.split(constant.ST_SEPARATOR_CHAR))
for c in st_cols]
if any([combinations.count(c) > 1 for c in combinations]):
logger.error(
"It is not allowed for two columns to have the exact same combinations of STs in {ST}_ES_Demand")
valid = False
sts = get_corresponding_sts(ds, es, ST)
expected = fs.flatten([[set(x) for x in itertools.combinations(
sts, n)] for n in range(1, len(sts)+1)])
unexpected = [comb for comb in combinations if comb not in expected]
if unexpected:
logger.error(
f"Found unexpected combination of STs in {ST}_ES_Demand")
logger.error("Unexpected combination of STs in column {}".format(
[constant.ST_SEPARATOR_CHAR.join(c) for c in unexpected]))
valid = False
return valid
def get_all_Penetration(ds, es):
"""returns all penetration data as dictionary with key as st, value as
dictionary of ST combinations and actual penetration data.
{"ST":{(ST1,ST2): penetration data for ST1 and ST2}
"""
DS_ES_ST_Map = loaders.get_parameter("DS_ES_ST_Map")
STs = [row[2:]
for row in DS_ES_ST_Map if row[0] == ds and row[1] == es][0]
d = {}
for s in STs:
es_demand = loaders.get_parameter('ES_Demand',
demand_sector=ds,
energy_service=es,
service_tech=s)
combs = [tuple(name.split(constant.ST_SEPARATOR_CHAR))
for name in es_demand.columns if s in name]
d[s] = {tuple(c): loaders.get_parameter('Penetration',
demand_sector=ds,
energy_service=es,
ST_combination=c) for c in combs}
return d
def get_data(name, ds, es):
if name in ['EfficiencyLevelSplit', 'NumInstances']:
return {(ds, es): loaders.get_parameter(name,
demand_sector=ds,
energy_service=es)}
elif name == "ES_Demand":
return get_all_ES_Demand(ds, es)
elif name == "Penetration":
return get_all_Penetration(ds, es)
else:
logger.error(f"Unknown parameter {name}")
def validate_each_demand_param_(name, item, data, ds, es, st):
"""encapsulation over validate_each_demand_param to catch exception
"""
logger.info(f"Validating {name} from {ds},{es} for {st}")
try:
v = validate_each_demand_param(name,
data,
demand_sector=ds,
energy_service=es,
service_tech=st)
if not v:
logger.error(
f"Validaton failed for {name} from {ds},{es} for {st}")
print(
f"Validaton failed for {name} from {ds},{es} for {st}")
except Exception as e:
logger.exception(e)
logger.error(
f"{name} for {ds},{es},{item} has invalid data")
print(e)
v = False
return v
def existence_demand_energy_service_parameter(name):
"""checks existence and basic data validation of
EfficiencyLevelSplit,NumInstances,ES_Demand,Penetration
"""
ds_es = get_bottomup_ds_es()
args = []
for ds, es in ds_es:
try:
data_ = get_data(name, ds, es)
except FileNotFoundError as fne:
logger.error(f"{name} for {ds},{es} is not given")
logger.exception(fne)
return False
for st, data in data_.items():
if not isinstance(data, dict):
data = {st: data}
for item in data:
args.append((name,
item,
data[item],
ds,
es,
st))
valid = execute_in_process_pool(validate_each_demand_param_, args)
#valid = [validate_each_demand_param_(*item) for item in args]
return all(valid)
def validate_each_demand_param(name, data, **kwargs):
"""Validates individual parameter according to specs given in yml file.
"""
specs = filemanager.demand_specs()[name]
return loaders.validate_param(name,
specs,
data,
"rumi.io.demand",
**kwargs)
def subset(data, indexnames, items):
if isinstance(items, str):
items = (items,)
q = " & ".join([f"{name} == '{item}'" for name,
item in zip(indexnames, items)])
return data.query(q)
def check_efficiency_levels(data, param_name, *args):
ST_Info = loaders.get_parameter("ST_Info")
st_info = ST_Info.set_index('ServiceTech')
valid = True
els = data.set_index('ServiceTech')
for service_tech in els.index.unique():
df = els.loc[service_tech]
levels = len(df['EfficiencyLevelName'].unique())
n = st_info.loc[service_tech]['NumEfficiencyLevels']
v = n == levels
valid = valid and v
if not v:
logger.error(
f"For {param_name} in {args}, efficiency levels do not match for {service_tech}")
return valid
def check_EfficiencyLevelSplit_granularity():
return _check_DS_ES_granularity("EfficiencyLevelSplit")
def check_NumInstances_granularity():
return _check_DS_ES_granularity("NumInstances")
def get_bottomup_ds_es():
DS_ES_Map = loaders.get_parameter("DS_ES_Map")
ds_es = DS_ES_Map.query("InputType == 'BOTTOMUP'")[
['DemandSector', 'EnergyService']].values
return ds_es
def get_nonbottomup_ds_es():
DS_ES_Map = loaders.get_parameter("DS_ES_Map")
ds_es = DS_ES_Map.query("InputType != 'BOTTOMUP'")[
['DemandSector', 'EnergyService']].values
return ds_es
def check_granularity_per_entity(d,
entity,
GeographicGranularity,
TimeGranularity,
ConsumerGranularity=None):
"""checks granuarity only. i.e. only columns are checked.
contents of columns are not validated here.
"""
geo_columns, time_columns, cons_columns = [], [], []
if GeographicGranularity:
geo_columns = common.get_geographic_columns(GeographicGranularity)
dataset_columns = [c for c in d.columns if c in constant.GEOGRAPHIES]
if TimeGranularity:
time_columns = common.get_time_columns(TimeGranularity)
dataset_columns.extend(
[c for c in d.columns if c in constant.TIME_SLICES])
if ConsumerGranularity:
cons_columns = constant.CONSUMER_COLUMNS[ConsumerGranularity]
dataset_columns.extend(
[c for c in d.columns if c in constant.CONSUMER_TYPES])
diff1 = set(geo_columns + time_columns +
cons_columns) - set(dataset_columns)
diff2 = set(dataset_columns) - \
set(geo_columns + time_columns + cons_columns)
valid = True
if diff2:
c, r = d[list(diff2)].shape
empty = d[list(diff2)].isnull().sum().sum() == c*r
if not empty:
logger.debug(f"Granularity is finer than expected for {entity}!")
return valid
if diff1:
logger.error(f"{diff1} not found in data for {entity}")
valid = False
else:
allcols = geo_columns+time_columns + cons_columns
nonempty = d[allcols].isnull().sum().sum()
valid = nonempty == 0
if not valid:
logger.error(f"one of columns {allcols} is empty for {entity}.")
return valid
def check_demand_granularity(param_name,
CSTAR=None,
GSTAR=None,
TSTAR=None,
check_function=check_granularity_per_entity):
"""
Checks whether given data follows granularity as specified in granularity
map. data file directly inside demand sector folder is tested using this
function.
"""
if CSTAR == None and GSTAR == None and TSTAR == None:
raise Exception(
"check_granularity function must have valid GSTAR/TSTAR argument")
granularity_map = loaders.get_parameter("DS_ES_EC_DemandGranularity_Map")
granularity = granularity_map.set_index(['DemandSector',
'EnergyService',
'EnergyCarrier'])
dslist = get_ds_list(param_name)
valid = True
for ds in dslist:
data = loaders.get_parameter(param_name, demand_sector=ds)
data = data.set_index(['EnergyService', 'EnergyCarrier'])
data.sort_index(inplace=True)
logger.debug(f"Checking granularity of {param_name} for {ds}")
for item in data.index.unique():
d = subset(data, data.index.names, item)
d = utilities.filter_empty(d)
entity = (ds,) + item
g = granularity.loc[entity]
ConsumerGranularity = None
GeographicGranularity, TimeGranularity = None, None
if CSTAR:
ConsumerGranularity = get_consumer_granularity(ds,
g['ConsumerGranularity'])
if GSTAR:
GeographicGranularity = g['GeographicGranularity']
if TSTAR:
TimeGranularity = g['TimeGranularity']
v = check_function(d,
entity,
GeographicGranularity,
TimeGranularity,
ConsumerGranularity)
valid = valid and v
if not v:
logger.error(
f"Granularity check failed for {param_name} for {entity}")
return valid
def get_corresponding_sts(demand_sector,
energy_service,
service_tech):
DS_ES_ST_Map = loaders.get_parameter('DS_ES_ST_Map')
ST_Info = loaders.get_parameter('ST_Info')
STs = fs.flatten([row[2:] for row in DS_ES_ST_Map if row[0] ==
demand_sector and row[1] == energy_service and service_tech in row])
ST_Info = ST_Info.set_index('ServiceTech')
EC = ST_Info.loc[service_tech]['EnergyCarrier']
return [s for s in STs if ST_Info.loc[s]['EnergyCarrier'] == EC]
def coarsest(gran_map, ds):
c = min(gran_map.to_dict(orient='records'),
key=lambda x: len(constant.CONSUMER_COLUMNS[get_consumer_granularity(ds, x['ConsumerGranularity'])]))['ConsumerGranularity']
g = min(gran_map['GeographicGranularity'].values,
key=lambda x: len(constant.GEO_COLUMNS[x]))
t = min(gran_map['TimeGranularity'].values,
key=lambda x: len(constant.TIME_COLUMNS[x]))
return c, g, t
def _check_DS_ES_granularity(param_name):
"""
Checks whether EfficiencyLevelSplit/NumInstances follows granularity as specified in granularity map.
"""
granularity_map = loaders.get_parameter("DS_ST_Granularity_Map")
ds_es = get_bottomup_ds_es()
valid = True
for ds, es in ds_es:
data_ = loaders.get_parameter(param_name,
demand_sector=ds,
energy_service=es)
data = data_.set_index('ServiceTech')
logger.debug(f"Checking granularity of {param_name} for {ds},{es}")
for ST in data.index.unique():
d = data.loc[ST]
d = utilities.filter_empty(d)
sts = get_corresponding_sts(ds, es, ST)
g = granularity_map.query(
f"DemandSector == '{ds}' & ServiceTech in {sts}")
ConsumerGranularity, GeographicGranularity, TimeGranularity = coarsest(
g, ds)
v = utilities.check_granularity_per_entity(d,
ST,
GeographicGranularity,
TimeGranularity,
ConsumerGranularity)
valid = valid and v
if not v:
logger.error(
f"Granularity check failed for {param_name} for {ST}")
return valid
def _check_ES_Demand_granularity(param_name):
"""
Checks whether ES_Demand follows granularity as
specified in granularity map.
"""
granularity_map = loaders.get_parameter("DS_ES_EC_DemandGranularity_Map")
granularity = granularity_map.set_index(['DemandSector',
'EnergyService',
'EnergyCarrier'])
ds_es = get_bottomup_ds_es()
valid = True
for ds, es in ds_es:
data_ = get_data(param_name, ds, es)
for ST, data in data_.items():
d_ = data
if not isinstance(data, dict):
d_ = {ST: data}
for item, df in d_.items():
if isinstance(df, pd.Series):
df = df.to_frame()
d = utilities.filter_empty(df)
g = granularity.loc[(ds, es, ST_to_EC(ST))]
ConsumerGranularity = get_consumer_granularity(ds,
g['ConsumerGranularity'])
GeographicGranularity = g['GeographicGranularity']
TimeGranularity = g['TimeGranularity']
v = utilities.check_granularity_per_entity(d,
item,
GeographicGranularity,
TimeGranularity,
ConsumerGranularity)
valid = valid and v
if not v:
logger.error(
f"Granularity check failed for {param_name} for {ST}, {item}")
return valid
def _check_Penetration_granularity(param_name):
"""
Checks whether Penetration follows granularity as
specified in granularity map.
"""
granularity_map = loaders.get_parameter("DS_ST_Granularity_Map")
ds_es = get_bottomup_ds_es()
valid = True
for ds, es in ds_es:
data_ = get_data(param_name, ds, es)
for ST, data in data_.items():
d_ = data
if not isinstance(data, dict):
d_ = {ST: data}
for comb, df in d_.items():
if isinstance(df, pd.Series):
df = df.to_frame()
d = utilities.filter_empty(df)
g = granularity_map.query(
f"DemandSector == '{ds}' & ServiceTech in {comb}")
ConsumerGranularity, GeographicGranularity, TimeGranularity = coarsest(
g, ds)
v = utilities.check_granularity_per_entity(d,
ST,
GeographicGranularity,
TimeGranularity,
ConsumerGranularity)
valid = valid and v
if not v:
logger.error(
f"Granularity check failed for {param_name} for {ST}, {comb}")
return valid
def check_ES_Demand_granularity():
return _check_ES_Demand_granularity("ES_Demand")
def check_Penetration_granularity():
return _check_Penetration_granularity("Penetration")
def check_numconsumers_granularity():
"""
Checks whether NumConsumers data follows granularity as specified in
granularity map.
"""
granularity = loaders.get_parameter("DS_Cons1_Map")
param_name = 'NumConsumers'
dslist = get_ds_list(param_name)
valid = True
for ds in dslist:
data = loaders.get_parameter(param_name, demand_sector=ds)
d = utilities.filter_empty(data)
g = granularity[ds]
ConsumerGranularity = get_cons_columns(ds)[-1].upper()
GeographicGranularity = g[0]
TimeGranularity = g[1]
v = check_granularity_per_entity(data,
(ds, "NumConsumers"),
GeographicGranularity,
TimeGranularity,
ConsumerGranularity)
valid = valid and v
if not v:
logger.error(
f"Granularity check failed for {param_name} for {ds}")
return valid
def save_output(compute_demand):
"""decorator function to be applied to demand computation function.
it will save the results to required destination and return same.
also if results are alredy present then it will read the results and
return, instaed of computing it again.
"""
def get_columns(d):
"""This is just to reorder columns"""
tgc = [c for c in constant.TIME_SLICES +
constant.GEOGRAPHIES + constant.CONSUMER_TYPES if c in d.columns]
other = [c for c in d.columns if c not in tgc]
return tgc + other
@functools.wraps(compute_demand)
def wrapper(*args):
output_path = filemanager.get_output_path("Demand")
filename = "_".join(args+('Demand',))
path = os.path.join(output_path, ".".join([filename, "csv"]))
if os.path.exists(path):
result = pd.read_csv(path)
else:
result = compute_demand(*args)
result = result[get_columns(result)]
result.to_csv(path, index=False)
return result
return wrapper
def check_ST_granularity():
""" checks if granuarity is coarser than corresponding granularty in
DS_ES_EC_DemandGranularity_Map for derived DS,ES,EC from DS,ST
Returns
-------
True or False
"""
DS_ES_EC_DemandGranularity_Map = loaders.get_parameter(
"DS_ES_EC_DemandGranularity_Map")
DS_ST_Granularity_Map = loaders.get_parameter("DS_ST_Granularity_Map")
DS_ST_Granularity_Map = DS_ST_Granularity_Map.set_index(
["DemandSector", "ServiceTech"])
for item in DS_ST_Granularity_Map.index:
consgran = DS_ST_Granularity_Map.loc[item]['ConsumerGranularity']
geogran = DS_ST_Granularity_Map.loc[item]['GeographicGranularity']
timegran = DS_ST_Granularity_Map.loc[item]['TimeGranularity']
ds, st = item
ec = ST_to_EC(st)
ds_es_ec = DS_ES_EC_DemandGranularity_Map.query(
f"DemandSector == '{ds}' & EnergyCarrier == '{ec}'")
def _check_gran(gran, grantype='TimeGranularity',
grandata=list(constant.TIME_COLUMNS.keys())):
gran_ = min(
ds_es_ec[grantype].values, key=grandata.index)
if not coarser(gran, gran_, grandata):
logger.error(
f"In DS_ST_Granularity_Map, for <{ds}, {st}> {grantype} should be coarser than or equal to {gran_}")
return False
return True
c = _check_gran(consgran, 'ConsumerGranularity',
list(constant.CONSUMER_COLUMNS.keys()))
g = _check_gran(geogran, 'GeographicGranularity',
list(constant.GEO_COLUMNS.keys()))
t = _check_gran(timegran, 'TimeGranularity',
list(constant.TIME_COLUMNS.keys()))
if not all([c, g, t]):
return False
return True
def check_total_penetration():
"""checks if penetrations for each ST together with which it can
appear totals less than or equal to 1
"""
ds_es = get_bottomup_ds_es()
valid = True
for ds, es in ds_es:
DS_ES_ST_Map = loaders.get_parameter("DS_ES_ST_Map")
STs = [row[2:]
for row in DS_ES_ST_Map if row[0] == ds and row[1] == es][0]
for s in STs:
es_demand = loaders.get_parameter('ES_Demand',
demand_sector=ds,
energy_service=es,
service_tech=s)
combs = [tuple(name.split(constant.ST_SEPARATOR_CHAR))
for name in es_demand.columns if s in name]
p = [loaders.get_parameter('Penetration',
demand_sector=ds,
energy_service=es,
ST_combination=c) for c in combs]
indexcols = utilities.get_all_structure_columns(p[0])
p = [item.set_index(indexcols) for item in p]
valid = valid and (functools.reduce(
lambda x, y: x+y, [item['Penetration'] for item in p], 0) > 1).sum() == 0
if not valid:
print(functools.reduce(
lambda x, y: x+y, [item['Penetration'] for item in p], 0) > 1)
logger.error(f"Penetration for {combs} sums more than 1!")
return valid
if __name__ == "__main__":
pass
| 36.002269
| 157
| 0.590734
|
652b650d8ea0957351322d930d35b5d58df4a268
| 284
|
py
|
Python
|
tests/artificial/transf_RelativeDifference/trend_LinearTrend/cycle_30/ar_12/test_artificial_1024_RelativeDifference_LinearTrend_30_12_100.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/artificial/transf_RelativeDifference/trend_LinearTrend/cycle_30/ar_12/test_artificial_1024_RelativeDifference_LinearTrend_30_12_100.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/artificial/transf_RelativeDifference/trend_LinearTrend/cycle_30/ar_12/test_artificial_1024_RelativeDifference_LinearTrend_30_12_100.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 100, ar_order = 12);
| 40.571429
| 179
| 0.746479
|
22a42c7fe8e75d64abe49976f0ecea349f002295
| 339
|
py
|
Python
|
RENAME_FILES.py
|
CaptainVietnam6/GoPro-output-cleaner
|
fbffc8ff91d2cc9845c2aab1c41a0e0f9c6ac6d4
|
[
"MIT"
] | null | null | null |
RENAME_FILES.py
|
CaptainVietnam6/GoPro-output-cleaner
|
fbffc8ff91d2cc9845c2aab1c41a0e0f9c6ac6d4
|
[
"MIT"
] | null | null | null |
RENAME_FILES.py
|
CaptainVietnam6/GoPro-output-cleaner
|
fbffc8ff91d2cc9845c2aab1c41a0e0f9c6ac6d4
|
[
"MIT"
] | null | null | null |
import os
suffix_count = 0
while True:
if os.path.exists(f"./GoPro{suffix_count}.mp4") == True:
suffix_count += 1
if os.path.exists(f"./GoPro{suffix_count}.mp4") == False:
break
for file in os.listdir("./"):
if file.endswith(".MP4"):
os.rename(file, f"GoPro{suffix_count}.mp4")
suffix_count += 1
| 30.818182
| 61
| 0.613569
|
17c93798a9c2da435fb40d40e168fbd33a887e61
| 2,624
|
py
|
Python
|
tests/beos_plugin_tests/scenarios/scenarios_various_operations/007_[5.8]_Test_of_issue,_transfer_and_withdraw_.py
|
terradacs/beos-core
|
31e19170bcad573b1d498811284e62babd478f92
|
[
"MIT"
] | 9
|
2019-04-04T18:46:14.000Z
|
2022-03-03T16:22:56.000Z
|
tests/beos_plugin_tests/scenarios/scenarios_various_operations/007_[5.8]_Test_of_issue,_transfer_and_withdraw_.py
|
terradacs/beos-core
|
31e19170bcad573b1d498811284e62babd478f92
|
[
"MIT"
] | null | null | null |
tests/beos_plugin_tests/scenarios/scenarios_various_operations/007_[5.8]_Test_of_issue,_transfer_and_withdraw_.py
|
terradacs/beos-core
|
31e19170bcad573b1d498811284e62babd478f92
|
[
"MIT"
] | 3
|
2019-03-19T17:45:08.000Z
|
2021-03-22T21:45:35.000Z
|
#!/usr/bin/python3
# Scenario based on test : [5.8]-Test-of-issue,-transfer-and-withdraw
import os
import sys
import time
import datetime
currentdir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(os.path.dirname(currentdir)))
from beos_test_utils.beos_utils_pack import init, ActionResult, ResourceResult, VotersResult
if __name__ == "__main__":
try:
node, summary, args, log = init(__file__)
accounts = node.create_accounts(2, "20.0000 BTS")
node.run_node()
#Changeparams
#node.changeparams(["0.0000 BTS"], 190, [55,0,60,5,8000000], [55,0,60,5,5000000], 3000000)
newparams = {
"beos" : {
"starting_block" : 55,
"next_block" : 0,
"ending_block" : 60,
"block_interval" : 5,
"trustee_reward" : 8000000
},
"ram" : {
"starting_block" : 55,
"next_block" : 0,
"ending_block" : 60,
"block_interval" : 5,
"trustee_reward" : 5000000
},
"proxy_assets" : [ "0.0000 BTS"],
"ram_leftover" : 3000000,
"starting_block_for_initial_witness_election":190
}
node.changeparams(newparams)
#Actions
summary.action_status(node.transfer(_from=accounts[0].name,_to=accounts[1].name,_quantity="20.0000 BTS",_memo=" internal transfer 0"), ActionResult(False, "transaction net usage is too high") )
node.wait_till_block(55)
summary.action_status(node.transfer(_from=accounts[0].name,_to=accounts[1].name,_quantity="20.0000 BTS",_memo=" internal transfer 1") )
node.wait_till_block(62)
summary.action_status(node.transfer(_from=accounts[1].name,_to=accounts[0].name,_quantity="20.0000 BTS",_memo=" internal transfer 2") )
summary.action_status(node.withdraw(_from=accounts[0].name,_bts_to="any_account",_quantity="20.0000 BTS",_memo="") )
summary.action_status(node.withdraw(_from=accounts[1].name,_bts_to="any_account",_quantity="20.0000 BTS",_memo="") )
summary.action_status(node.transfer(_from=accounts[0].name,_to=accounts[1].name,_quantity="20.0000 BTS",_memo=" internal transfer 3"), ActionResult(False, "no balance object found") )
#At end
summary.user_block_status(node, accounts[0].name, ResourceResult(_balance="",_net_weight="458902003.9224 BEOS",_cpu_weight="458902003.9225 BEOS",_ram_bytes=7998080448))
summary.user_block_status(node, accounts[1].name, ResourceResult(_balance="",_net_weight="1376706011.7673 BEOS",_cpu_weight="1376706011.7674 BEOS",_ram_bytes=23994230448))
except Exception as _ex:
log.exception("Exception `{0}` occures while executing `{1}` tests.".format(str(_ex), __file__))
finally:
summary_status = summary.summarize()
node.stop_node()
exit(summary_status)
| 43.733333
| 195
| 0.728659
|
acbc1608bf68f67a0b511fe2b0f082fe4ea025ba
| 2,298
|
py
|
Python
|
chrome/common/extensions/docs/server2/redirector.py
|
halton/chromium-crosswalk
|
bfcca582b723b9535907f0b410b920ef99911b70
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4
|
2017-04-05T01:51:34.000Z
|
2018-02-15T03:11:54.000Z
|
chrome/common/extensions/docs/server2/redirector.py
|
j4ckfrost/android_external_chromium_org
|
a1a3dad8b08d1fcf6b6b36c267158ed63217c780
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2021-12-13T19:44:12.000Z
|
2021-12-13T19:44:12.000Z
|
chrome/common/extensions/docs/server2/redirector.py
|
j4ckfrost/android_external_chromium_org
|
a1a3dad8b08d1fcf6b6b36c267158ed63217c780
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4
|
2017-04-05T01:52:03.000Z
|
2022-02-13T17:58:45.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
from urlparse import urlsplit
from file_system import FileNotFoundError
from future import Gettable, Future
class Redirector(object):
def __init__(self, compiled_fs_factory, file_system):
self._file_system = file_system
self._cache = compiled_fs_factory.ForJson(file_system)
def Redirect(self, host, path):
''' Check if a path should be redirected, first according to host
redirection rules, then from rules in redirects.json files.
Returns the path that should be redirected to, or None if no redirection
should occur.
'''
return self._RedirectOldHosts(host, path) or self._RedirectFromConfig(path)
def _RedirectFromConfig(self, url):
''' Lookup the redirects configuration file in the directory that contains
the requested resource. If no redirection rule is matched, or no
configuration file exists, returns None.
'''
dirname, filename = posixpath.split(url)
try:
rules = self._cache.GetFromFile(
posixpath.join(dirname, 'redirects.json')).Get()
except FileNotFoundError:
return None
redirect = rules.get(filename)
if redirect is None:
return None
if (redirect.startswith('/') or
urlsplit(redirect).scheme in ('http', 'https')):
return redirect
return posixpath.normpath(posixpath.join('/', dirname, redirect))
def _RedirectOldHosts(self, host, path):
''' Redirect paths from the old code.google.com to the new
developer.chrome.com, retaining elements like the channel and https, if
used.
'''
if urlsplit(host).hostname != 'code.google.com':
return None
path = path.split('/')
if path and path[0] == 'chrome':
path.pop(0)
return 'https://developer.chrome.com/' + posixpath.join(*path)
def Cron(self):
''' Load files during a cron run.
'''
futures = []
for root, dirs, files in self._file_system.Walk(''):
if 'redirects.json' in files:
futures.append(
self._cache.GetFromFile(posixpath.join(root, 'redirects.json')))
return Future(delegate=Gettable(lambda: [f.Get() for f in futures]))
| 32.828571
| 79
| 0.693211
|
5c31c78b9318c9e9f9f11a0f842c74e6e48ea2c1
| 1,075
|
py
|
Python
|
RecoLocalTracker/Configuration/python/RecoLocalTrackerHeavyIons_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
RecoLocalTracker/Configuration/python/RecoLocalTrackerHeavyIons_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
RecoLocalTracker/Configuration/python/RecoLocalTrackerHeavyIons_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
#
# Tracker Local Reco
# Initialize magnetic field
#
from RecoLocalTracker.SiStripRecHitConverter.SiStripRecHitConverter_cfi import *
from RecoLocalTracker.SiStripRecHitConverter.SiStripRecHitMatcher_cfi import *
from RecoLocalTracker.SiStripRecHitConverter.StripCPEfromTrackAngle_cfi import *
from RecoLocalTracker.SiStripZeroSuppression.SiStripZeroSuppression_cfi import *
from RecoLocalTracker.SiStripClusterizer.SiStripClusterizer_cfi import *
from RecoLocalTracker.SiPixelClusterizer.SiPixelClusterizerPreSplitting_cfi import *
from RecoLocalTracker.SiPixelRecHits.SiPixelRecHits_cfi import *
from RecoLocalTracker.SubCollectionProducers.clustersummaryproducer_cfi import *
pixeltrackerlocalrecoTask = cms.Task(siPixelClustersPreSplitting,siPixelRecHitsPreSplitting)
striptrackerlocalrecoTask = cms.Task(siStripZeroSuppression,siStripClusters,siStripMatchedRecHits)
trackerlocalrecoTask = cms.Task(pixeltrackerlocalrecoTask,striptrackerlocalrecoTask,clusterSummaryProducer)
trackerlocalreco = cms.Sequence(trackerlocalrecoTask)
| 56.578947
| 107
| 0.897674
|
ac08d1fdb4218882feed894675dcf5a20389511e
| 2,435
|
py
|
Python
|
Job Portal with Automated Resume Screening/gensim-4.1.2/gensim/models/__init__.py
|
Candida18/Job-Portal-with-Automated-Resume-Screening
|
19d19464ad3d1714da856656753a4afdfe257b31
|
[
"MIT"
] | null | null | null |
Job Portal with Automated Resume Screening/gensim-4.1.2/gensim/models/__init__.py
|
Candida18/Job-Portal-with-Automated-Resume-Screening
|
19d19464ad3d1714da856656753a4afdfe257b31
|
[
"MIT"
] | null | null | null |
Job Portal with Automated Resume Screening/gensim-4.1.2/gensim/models/__init__.py
|
Candida18/Job-Portal-with-Automated-Resume-Screening
|
19d19464ad3d1714da856656753a4afdfe257b31
|
[
"MIT"
] | 2
|
2022-01-15T05:36:58.000Z
|
2022-02-08T15:25:50.000Z
|
"""
This package contains algorithms for extracting document representations from their raw
bag-of-word counts.
"""
# bring model classes directly into package namespace, to save some typing
from .coherencemodel import CoherenceModel # noqa:F401
from .hdpmodel import HdpModel # noqa:F401
from .ldamodel import LdaModel # noqa:F401
from .lsimodel import LsiModel # noqa:F401
from .tfidfmodel import TfidfModel # noqa:F401
from .rpmodel import RpModel # noqa:F401
from .logentropy_model import LogEntropyModel # noqa:F401
from .word2vec import Word2Vec, FAST_VERSION # noqa:F401
from .doc2vec import Doc2Vec # noqa:F401
from .keyedvectors import KeyedVectors # noqa:F401
from .ldamulticore import LdaMulticore # noqa:F401
from .phrases import Phrases # noqa:F401
from .normmodel import NormModel # noqa:F401
from .atmodel import AuthorTopicModel # noqa:F401
from .ldaseqmodel import LdaSeqModel # noqa:F401
from .fasttext import FastText # noqa:F401
from .translation_matrix import TranslationMatrix, BackMappingTranslationMatrix # noqa:F401
from .ensemblelda import EnsembleLda # noqa:F401
from .nmf import Nmf # noqa:F401
from gensim import interfaces, utils
class VocabTransform(interfaces.TransformationABC):
"""
Remap feature ids to new values.
Given a mapping between old ids and new ids (some old ids may be missing = these
features are to be discarded), this will wrap a corpus so that iterating over
`VocabTransform[corpus]` returns the same vectors but with the new ids.
Old features that have no counterpart in the new ids are discarded. This
can be used to filter vocabulary of a corpus "online":
.. sourcecode:: pycon
>>> old2new = {oldid: newid for newid, oldid in enumerate(ids_you_want_to_keep)}
>>> vt = VocabTransform(old2new)
>>> for vec_with_new_ids in vt[corpus_with_old_ids]:
>>> pass
"""
def __init__(self, old2new, id2token=None):
self.old2new = old2new
self.id2token = id2token
def __getitem__(self, bow):
"""
Return representation with the ids transformed.
"""
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
return sorted((self.old2new[oldid], weight) for oldid, weight in bow if oldid in self.old2new)
| 38.046875
| 102
| 0.726899
|
d69ab7e018423ff264d2053d8f2bc31058fb51d5
| 8,158
|
py
|
Python
|
darshan-util/pydarshan/darshan/experimental/plots/matplotlib.py
|
srini009/darshan
|
06faec6ae09081078da1d85e3737928361ade8f1
|
[
"mpich2"
] | 31
|
2021-05-13T08:55:57.000Z
|
2022-03-17T17:41:01.000Z
|
darshan-util/pydarshan/darshan/experimental/plots/matplotlib.py
|
srini009/darshan
|
06faec6ae09081078da1d85e3737928361ade8f1
|
[
"mpich2"
] | 655
|
2021-05-12T23:56:25.000Z
|
2022-03-31T20:35:49.000Z
|
darshan-util/pydarshan/darshan/experimental/plots/matplotlib.py
|
srini009/darshan
|
06faec6ae09081078da1d85e3737928361ade8f1
|
[
"mpich2"
] | 18
|
2021-05-13T14:47:56.000Z
|
2022-03-28T19:49:18.000Z
|
# -*- coding: utf-8 -*-
from darshan.report import *
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def plot_access_histogram(self, mod, filter=None, data=None):
"""
Plots a histogram of access sizes for specified module.
:param log: Handle for an opened darshan log.
:param str filter: Name of the module to generate plot for.
:param data: Array/Dictionary for use with custom data.
"""
# TODO: change to self.summary
if 'mod_agg_iohist' in dir(self):
print("Summarizing... iohist", mod)
self.mod_agg_iohist(mod)
else:
print("Can not create summary, mod_agg_iohist aggregator is not registered with the report class.")
# defaults
labels = ['0-100', '101-1K', '1K-10K', '10K-100K', '100K-1M', '1M-4M', '4M-10M', '10M-100M', '100M-1G', '1G+']
read_vals = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
write_vals = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
posix = self.summary['agg_iohist'][mod]
read_vals = [
posix['READ_0_100'],
posix['READ_100_1K'],
posix['READ_1K_10K'],
posix['READ_10K_100K'],
posix['READ_100K_1M'],
posix['READ_1M_4M'],
posix['READ_4M_10M'],
posix['READ_10M_100M'],
posix['READ_100M_1G'],
posix['READ_1G_PLUS']
]
write_vals = [
posix['WRITE_0_100'],
posix['WRITE_100_1K'],
posix['WRITE_1K_10K'],
posix['WRITE_10K_100K'],
posix['WRITE_100K_1M'],
posix['WRITE_1M_4M'],
posix['WRITE_4M_10M'],
posix['WRITE_10M_100M'],
posix['WRITE_100M_1G'],
posix['WRITE_1G_PLUS']
]
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, read_vals, width, label='Read')
rects2 = ax.bar(x + width/2, write_vals, width, label='Write')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Count')
ax.set_title('Historgram of Access Sizes: ' + str(mod))
ax.set_xticks(x)
ax.set_xticklabels(labels, rotation=45, ha='right')
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', rotation=0)
autolabel(rects1)
autolabel(rects2)
fig.tight_layout()
#plt.show()
return plt
def plot_time_summary(self, filter=None, data=None):
"""
TODO: Not implemented.
:param log: Handle for an opened darshan log.
:param str filter: Name of the module to generate plot for.
:param data: Array/Dictionary for use with custom data.
"""
pass
def plot_opcounts(self, filter=None, data=None, return_csv=False):
"""
Generates a baor chart summary for operation counts.
:param log: Handle for an opened darshan log.
:param str filter: Name of the module to generate plot for.
:param data: Array/Dictionary for use with custom data.
"""
# defaults
labels = ['Read', 'Write', 'Open', 'Stat', 'Seek', 'Mmap', 'Fsync']
posix_vals = [0, 0, 0, 0, 0, 0, 0]
mpiind_vals = [0, 0, 0, 0, 0, 0, 0]
mpicol_vals = [0, 0, 0, 0, 0, 0, 0]
stdio_vals = [0, 0, 0, 0, 0, 0, 0]
# TODO: change to self.summary
if 'agg_ioops' in dir(self):
print("Summarizing... agg_ioops")
self.agg_ioops()
else:
print("Can not create summary, agg_ioops aggregator is not registered with the report clase.")
mods = self.summary['agg_ioops']
# Gather POSIX
if 'POSIX' in mods:
#posix_record = backend.log_get_posix_record(log)
#posix = dict(zip(backend.counter_names("POSIX"), posix_record['counters']))
posix = mods['POSIX']
posix_vals = [
posix['POSIX_READS'],
posix['POSIX_WRITES'],
posix['POSIX_OPENS'],
posix['POSIX_STATS'],
posix['POSIX_SEEKS'],
0, # faulty? posix['POSIX_MMAPS'],
posix['POSIX_FSYNCS'] + posix['POSIX_FDSYNCS']
]
# Gather MPIIO
if 'MPI-IO' in mods:
#mpiio_record = backend.log_get_mpiio_record(log)
#mpiio = dict(zip(backend.counter_names("mpiio"), mpiio_record['counters']))
mpiio = mods['MPI-IO']
mpiind_vals = [
mpiio['MPIIO_INDEP_READS'],
mpiio['MPIIO_INDEP_WRITES'],
mpiio['MPIIO_INDEP_OPENS'],
0, # stat
0, # seek
0, # mmap
0, # sync
]
mpicol_vals = [
mpiio['MPIIO_COLL_READS'],
mpiio['MPIIO_COLL_WRITES'],
mpiio['MPIIO_COLL_OPENS'],
0, # stat
0, # seek
0, # mmap
mpiio['MPIIO_SYNCS']
]
# Gather Stdio
if 'STDIO' in mods:
#stdio_record = backend.log_get_stdio_record(log)
#stdio = dict(zip(backend.counter_names("STDIO"), stdio_record['counters']))
stdio = mods['STDIO']
stdio_vals = [
stdio['STDIO_READS'],
stdio['STDIO_WRITES'],
stdio['STDIO_OPENS'],
0, # stat
stdio['STDIO_SEEKS'],
0, # mmap
stdio['STDIO_FLUSHES']
]
def as_csv():
text = ""
text += ','.join(labels) + ',Layer' + "\n"
text += ','.join(str(x) for x in posix_vals) + ',POSIX' + "\n"
text += ','.join(str(x) for x in mpiind_vals) + ',MPIIND' + "\n"
text += ','.join(str(x) for x in mpicol_vals) + ',MPICOL' + "\n"
text += ','.join(str(x) for x in stdio_vals) + ',STDIO' + "\n"
return text
print(as_csv())
x = np.arange(len(labels)) # the label locations
width = 0.15 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2 - width, posix_vals, width, label='POSIX')
rects2 = ax.bar(x - width/2, mpiind_vals, width, label='MPI-IO Indep.')
rects3 = ax.bar(x + width/2, mpicol_vals, width, label='MPI-IO Coll.')
rects4 = ax.bar(x + width/2 + width, stdio_vals, width, label='STDIO')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Count')
ax.set_title('I/O Operation Counts')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate(
'{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 4 + rect.get_width(), height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom', rotation=45
)
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
autolabel(rects4)
fig.tight_layout()
#plt.show()
if return_csv:
return plt, as_csv()
else:
return plt
def plot_timeline(self, filter=None, data=None):
"""
Plots a timeline of opened files.
:param log: Handle for an opened darshan log.
:param str filter: Name of the module to generate plot for.
:param data: Array/Dictionary for use with custom data.
"""
fig, ax = plt.subplots()
ax.broken_barh([(110, 30), (150, 10)], (10, 9), facecolors='tab:blue')
ax.broken_barh([(10, 50), (100, 20), (130, 10)], (20, 9),
facecolors=('tab:orange', 'tab:green', 'tab:red'))
ax.set_ylim(5, 35)
ax.set_xlim(0, 200)
ax.set_xlabel('seconds since start')
ax.set_yticks([15, 25])
ax.set_yticklabels(['Rank 0', 'Rank 1'])
ax.set_title('TODO: This is only a placeholder.')
ax.grid(True)
plt.show()
| 27.560811
| 114
| 0.566683
|
3bfdcf36684e73f462c89be06bd31b9439608de8
| 3,946
|
py
|
Python
|
dateparser/data/date_translation_data/br.py
|
bsekiewicz/dateparser
|
babc677372376d933de5542010af3097c26e49e9
|
[
"BSD-3-Clause"
] | 1,804
|
2015-01-01T23:01:54.000Z
|
2022-03-30T18:36:16.000Z
|
dateparser/data/date_translation_data/br.py
|
bsekiewicz/dateparser
|
babc677372376d933de5542010af3097c26e49e9
|
[
"BSD-3-Clause"
] | 948
|
2015-01-04T22:18:39.000Z
|
2022-03-31T16:29:41.000Z
|
dateparser/data/date_translation_data/br.py
|
bsekiewicz/dateparser
|
babc677372376d933de5542010af3097c26e49e9
|
[
"BSD-3-Clause"
] | 463
|
2015-01-10T08:53:39.000Z
|
2022-03-18T12:45:49.000Z
|
info = {
"name": "br",
"date_order": "YMD",
"january": [
"gen",
"genver"
],
"february": [
"c'hwe",
"c'hwevrer"
],
"march": [
"meur",
"meurzh"
],
"april": [
"ebr",
"ebrel"
],
"may": [
"mae"
],
"june": [
"mezh",
"mezheven"
],
"july": [
"goue",
"gouere"
],
"august": [
"eost"
],
"september": [
"gwen",
"gwengolo"
],
"october": [
"here"
],
"november": [
"du"
],
"december": [
"ker",
"kerzu",
"kzu"
],
"monday": [
"lun"
],
"tuesday": [
"meu",
"meurzh"
],
"wednesday": [
"mer",
"merc'her"
],
"thursday": [
"yaou"
],
"friday": [
"gwe",
"gwener"
],
"saturday": [
"sad",
"sadorn"
],
"sunday": [
"sul"
],
"am": [
"am"
],
"pm": [
"gm"
],
"year": [
"bl",
"bloaz"
],
"month": [
"miz"
],
"week": [
"sizhun"
],
"day": [
"d",
"deiz"
],
"hour": [
"e",
"eur"
],
"minute": [
"min",
"munut"
],
"second": [
"eilenn",
"s"
],
"relative-type": {
"0 day ago": [
"hiziv"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 month ago": [
"ar miz-mañ"
],
"0 second ago": [
"brem",
"bremañ"
],
"0 week ago": [
"ar sizhun-mañ"
],
"0 year ago": [
"hevlene"
],
"1 day ago": [
"dec'h"
],
"1 month ago": [
"ar miz diaraok"
],
"1 week ago": [
"ar sizhun diaraok"
],
"1 year ago": [
"warlene"
],
"in 1 day": [
"warc'hoazh"
],
"in 1 month": [
"ar miz a zeu"
],
"in 1 week": [
"ar sizhun a zeu"
],
"in 1 year": [
"ar bl a zeu",
"ar bloaz a zeu"
]
},
"relative-type-regex": {
"\\1 day ago": [
"(\\d+) d zo",
"(\\d+) deiz zo"
],
"\\1 hour ago": [
"(\\d+) e zo",
"(\\d+) eur zo"
],
"\\1 minute ago": [
"(\\d+) min zo",
"(\\d+) munut zo"
],
"\\1 month ago": [
"(\\d+) miz zo"
],
"\\1 second ago": [
"(\\d+) eilenn zo",
"(\\d+) s zo"
],
"\\1 week ago": [
"(\\d+) sizhun zo"
],
"\\1 year ago": [
"(\\d+) bl zo",
"(\\d+) bloaz zo",
"(\\d+) vloaz zo"
],
"in \\1 day": [
"a-benn (\\d+) d",
"a-benn (\\d+) deiz"
],
"in \\1 hour": [
"a-benn (\\d+) e",
"a-benn (\\d+) eur"
],
"in \\1 minute": [
"a-benn (\\d+) min",
"a-benn (\\d+) munut"
],
"in \\1 month": [
"a-benn (\\d+) miz"
],
"in \\1 second": [
"a-benn (\\d+) eilenn",
"a-benn (\\d+) s"
],
"in \\1 week": [
"a-benn (\\d+) sizhun"
],
"in \\1 year": [
"a-benn (\\d+) bl",
"a-benn (\\d+) bloaz",
"a-benn (\\d+) vloaz"
]
},
"locale_specific": {},
"skip": [
" ",
"'",
",",
"-",
".",
"/",
";",
"@",
"[",
"]",
"|",
","
]
}
| 17.38326
| 35
| 0.254688
|
abe1258b64a50deb6d6b262e4e9ef7b59caa6d84
| 448
|
py
|
Python
|
.base_lessons/Loops/Break keyword/break_keyword.py
|
dannymeijer/level-up-with-python
|
1bd1169aafd0fdc124984c30edc7f0153626cf06
|
[
"MIT"
] | null | null | null |
.base_lessons/Loops/Break keyword/break_keyword.py
|
dannymeijer/level-up-with-python
|
1bd1169aafd0fdc124984c30edc7f0153626cf06
|
[
"MIT"
] | null | null | null |
.base_lessons/Loops/Break keyword/break_keyword.py
|
dannymeijer/level-up-with-python
|
1bd1169aafd0fdc124984c30edc7f0153626cf06
|
[
"MIT"
] | null | null | null |
count = 0
while True: # this condition cannot possibly be false
print(count)
count += 1
if count >= 5:
break # exit loop if count >= 5
zoo = ["lion", "tiger", "elephant"]
while True: # this condition cannot possibly be false
animal = zoo.pop() # extract one element from the list end
print(animal)
if exit loop if animal is "elephant":
break # exit loop
| 28
| 77
| 0.5625
|
332642d9e2c3df865d3b7adc4353eb4716cd068c
| 26,913
|
py
|
Python
|
tensorflow/python/training/saver_test.py
|
matsuyama/tensorflow
|
a27d844e05447e65aa279ae5269a2d75590f46f6
|
[
"Apache-2.0"
] | 1
|
2016-07-03T20:16:31.000Z
|
2016-07-03T20:16:31.000Z
|
tensorflow/python/training/saver_test.py
|
matsuyama/tensorflow
|
a27d844e05447e65aa279ae5269a2d75590f46f6
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/training/saver_test.py
|
matsuyama/tensorflow
|
a27d844e05447e65aa279ae5269a2d75590f46f6
|
[
"Apache-2.0"
] | 1
|
2021-03-16T21:45:10.000Z
|
2021-03-16T21:45:10.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
import contextlib
import shutil
import tempfile
import tensorflow.python.platform
import tensorflow as tf
import numpy as np
import six
from tensorflow.python.platform import gfile
class SaverTest(tf.test.TestCase):
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "basics")
with self.test_session() as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1})
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session() as sess:
v0_2 = tf.Variable(1000.0, name="v0")
v1_2 = tf.Variable(2000.0, name="v1")
save2 = tf.train.Saver({"v0": v0_2, "v1": v1_2})
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.test_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = tf.Variable(np.int64(15), name="v")
save = tf.train.Saver({"v": v}, restore_sequentially=True)
tf.initialize_all_variables().run()
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session() as sess:
v = tf.Variable(np.int64(-1), name="v")
save = tf.train.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v" in e.message):
sess.run(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), v.eval())
def testSomeErrors(self):
with tf.Graph().as_default():
v0 = tf.Variable([10.0], name="v0")
v1 = tf.Variable([20.0], name="v1")
v2 = tf.Variable([20.0], name="v2")
v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
tf.train.Saver([v0, v1, v2])
# The names are different and will work.
tf.train.Saver({"vee1": v1, "other": [v2]})
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.test_session(graph=tf.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver([v0, v1])
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.test_session(graph=tf.Graph()) as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
save = tf.train.Saver([v0, v1])
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=tf.Graph()) as sess:
v0_2 = tf.Variable(1000.0, name="v0")
v1_2 = tf.Variable(2000.0, name="v1")
save2 = tf.train.Saver([v0_2, v1_2])
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.test_session() as sess:
var = tf.Variable(var_value, name=var_name)
save = tf.train.Saver({var_name: var})
var.initializer.run()
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session() as sess:
var = tf.Variable(other_value, name=var_name)
save = tf.train.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, var.eval())
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
def testGPU(self):
if not tf.test.IsBuiltWithCuda():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with tf.Session("", graph=tf.Graph()) as sess:
with sess.graph.device("/gpu:0"):
v0_1 = tf.Variable(123.45)
save = tf.train.Saver({"v0": v0_1})
tf.initialize_all_variables().run()
save.save(sess, save_path)
with tf.Session("", graph=tf.Graph()) as sess:
with sess.graph.device("/gpu:0"):
v0_2 = tf.Variable(543.21)
save = tf.train.Saver({"v0": v0_2})
tf.initialize_all_variables().run()
self.assertAllClose(543.21, v0_2.eval())
save.restore(sess, save_path)
self.assertAllClose(123.45, v0_2.eval())
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with tf.Session("", graph=tf.Graph()) as sess:
one = tf.Variable(1.0)
twos = tf.Variable([2.0, 2.0, 2.0])
init = tf.initialize_all_variables()
save = tf.train.Saver(tf.all_variables())
init.run()
save.save(sess, save_path)
with tf.Session("", graph=tf.Graph()) as sess:
one = tf.Variable(0.0)
twos = tf.Variable([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = tf.train.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
def testSaveWithGlobalStep(self):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.test_session() as sess:
var = tf.Variable(1.0, name="var0")
save = tf.train.Saver({var.op.name: var})
var.initializer.run()
if use_tensor:
global_step = tf.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
class SaveRestoreShardedTest(tf.test.TestCase):
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded")
# Build a graph with 2 parameter nodes on different devices.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(10, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(20, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True)
tf.initialize_all_variables().run()
val = save.save(sess, save_path)
self.assertEqual(save_path + "-?????-of-00002", val)
# Restore a different "v0" from shard 0 of the saved files.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
save = tf.train.Saver({"v0": v0}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(111, v0.eval())
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, v0.eval())
# Restore a different "v1" from shard 1 of the saved files.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = tf.Variable(222)
save = tf.train.Saver({"v1": v1}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(222, v1.eval())
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, v1.eval())
# Now try a restore with the sharded filename.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(222, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(111, v0.eval())
self.assertEqual(222, v1.eval())
save_path = os.path.join(self.get_temp_dir(), "sharded")
save.restore(sess, save_path + "-?????-of-?????")
self.assertEqual(10, v0.eval())
self.assertEqual(20, v1.eval())
self.assertEqual(
tf.train.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded-?????-of-00002"))
def testSaverDef(self):
with self.test_session():
v0 = tf.Variable(123, name="v0")
save = tf.train.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
class MaxToKeepTest(tf.test.TestCase):
def testNonSharded(self):
save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_non_sharded")
try:
gfile.DeleteRecursively(save_dir)
except OSError:
pass # Ignore
gfile.MakeDirs(save_dir)
with self.test_session() as sess:
v = tf.Variable(10.0, name="v")
save = tf.train.Saver({"v": v}, max_to_keep=2)
tf.initialize_all_variables().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s3))
# Create a second helper, identical to the first.
save2 = tf.train.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = tf.train.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s2))
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s1))
# Exercise the second helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save2.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(gfile.Exists(s1))
# Deleted by the first helper.
self.assertFalse(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s2))
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s1))
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(gfile.Exists(s1))
# Deleted by the first helper.
self.assertFalse(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s2))
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s1))
def testSharded(self):
save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_sharded")
try:
gfile.DeleteRecursively(save_dir)
except OSError:
pass # Ignore
gfile.MakeDirs(save_dir)
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(222, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True, max_to_keep=2)
tf.initialize_all_variables().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertEquals(2, len(gfile.Glob(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertEquals(2, len(gfile.Glob(s1)))
self.assertEquals(2, len(gfile.Glob(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEquals(0, len(gfile.Glob(s1)))
self.assertEquals(2, len(gfile.Glob(s2)))
self.assertEquals(2, len(gfile.Glob(s3)))
class KeepCheckpointEveryNHoursTest(tf.test.TestCase):
def testNonSharded(self):
save_dir = os.path.join(self.get_temp_dir(),
"keep_checkpoint_every_n_hours")
try:
gfile.DeleteRecursively(save_dir)
except OSError:
pass # Ignore
gfile.MakeDirs(save_dir)
with self.test_session() as sess:
v = tf.Variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
tf.initialize_all_variables().run()
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
save = tf.train.Saver({"v": v}, max_to_keep=2,
keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 0.7 second have elapsed so s1 will be old enough to keep.
time.sleep((time.time() + 0.7) - start_time)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
# We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),
# would normally delete s1, because max_to_keep is 2. However, s1 is
# older than 0.7s so we must keep it.
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
# s1 should still be here, we are Not checking now to reduce time
# variance in the test.
# We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next
# call to Save(), will delete s2, because max_to_keep is 2, and because
# we already kept the old s1. s2 is very close in time to s1 so it gets
# deleted.
s4 = save.save(sess, os.path.join(save_dir, "s4"))
self.assertEqual([s3, s4], save.last_checkpoints)
# Check that s1 is still here, but s2 is gone.
self.assertTrue(gfile.Exists(s1))
self.assertFalse(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s4))
class SaveRestoreWithVariableNameMap(tf.test.TestCase):
def testNonReshape(self):
save_path = os.path.join(self.get_temp_dir(), "basics")
with self.test_session() as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = tf.train.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
with self.assertRaisesOpError("uninitialized value v0"):
sess.run(v0)
with self.assertRaisesOpError("uninitialized value v1"):
sess.run(v1)
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="restore_prefix/v0")
v1 = tf.Variable(-1.0, name="restore_prefix/v1")
with self.assertRaisesOpError("uninitialized value restore_prefix/v0"):
sess.run(v0)
with self.assertRaisesOpError("uninitialized value restore_prefix/v1"):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
class LatestCheckpointWithRelativePaths(tf.test.TestCase):
@staticmethod
@contextlib.contextmanager
def tempWorkingDir(temppath):
cwd = os.getcwd()
os.chdir(temppath)
try:
yield
finally:
os.chdir(cwd)
@staticmethod
@contextlib.contextmanager
def tempDir():
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
def testRelativePath(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
filename = "snapshot"
filepath = os.path.join(traindir, filename)
with self.test_session() as sess:
# Build a simple graph.
v0 = tf.Variable(0.0)
inc = v0.assign_add(1.0)
save = tf.train.Saver({"v0": v0})
# Record a short training history.
tf.initialize_all_variables().run()
save.save(sess, filepath, global_step=0)
inc.eval()
save.save(sess, filepath, global_step=1)
inc.eval()
save.save(sess, filepath, global_step=2)
with self.test_session() as sess:
# Build a new graph with different initialization.
v0 = tf.Variable(-1.0)
# Create a new saver.
save = tf.train.Saver({"v0": v0})
tf.initialize_all_variables().run()
# Get the most recent checkpoint name from the training history file.
name = tf.train.latest_checkpoint(traindir)
self.assertIsNotNone(name)
# Restore "v0" from that checkpoint.
save.restore(sess, name)
self.assertEquals(v0.eval(), 2.0)
class CheckpointStateTest(tf.test.TestCase):
def _TestDir(self, test_name):
test_dir = os.path.join(self.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
gfile.MakeDirs(test_dir)
return test_dir
def testAbsPath(self):
save_dir = self._TestDir("abs_paths")
abs_path = os.path.join(save_dir, "model-0")
ckpt = tf.train.generate_checkpoint_state_proto(save_dir, abs_path)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testRelPath(self):
train_dir = "train"
model = os.path.join(train_dir, "model-0")
# model_checkpoint_path should have no "train" directory part.
new_rel_path = "model-0"
ckpt = tf.train.generate_checkpoint_state_proto(train_dir, model)
self.assertEqual(ckpt.model_checkpoint_path, new_rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path)
def testAllModelCheckpointPaths(self):
save_dir = self._TestDir("all_models_test")
abs_path = os.path.join(save_dir, "model-0")
for paths in [None, [], ["model-2"]]:
ckpt = tf.train.generate_checkpoint_state_proto(
save_dir,
abs_path,
all_model_checkpoint_paths=paths)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(
len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testUpdateCheckpointState(self):
save_dir = self._TestDir("update_checkpoint_state")
os.chdir(save_dir)
# Make a temporary train directory.
train_dir = "train"
os.mkdir(train_dir)
abs_path = os.path.join(save_dir, "model-0")
rel_path = "train/model-2"
tf.train.update_checkpoint_state(
train_dir,
rel_path,
all_model_checkpoint_paths=[abs_path, rel_path])
ckpt = tf.train.get_checkpoint_state(train_dir)
self.assertEqual(ckpt.model_checkpoint_path, rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path)
if __name__ == "__main__":
tf.test.main()
| 37.431154
| 80
| 0.651953
|
f37665c1f1472e4e0ad8debc4a113347b1a021d0
| 580
|
py
|
Python
|
postgresql/python/main.py
|
srgnk/aiven-examples
|
d4c959abb45cf5d3e80f4a3db7ee4e34266a68a1
|
[
"Apache-2.0"
] | 14
|
2020-08-01T06:15:12.000Z
|
2022-03-07T15:45:10.000Z
|
postgresql/python/main.py
|
srgnk/aiven-examples
|
d4c959abb45cf5d3e80f4a3db7ee4e34266a68a1
|
[
"Apache-2.0"
] | 5
|
2022-01-11T20:02:47.000Z
|
2022-01-19T13:19:39.000Z
|
postgresql/python/main.py
|
srgnk/aiven-examples
|
d4c959abb45cf5d3e80f4a3db7ee4e34266a68a1
|
[
"Apache-2.0"
] | 10
|
2020-06-16T11:31:28.000Z
|
2022-02-20T20:31:35.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018 Aiven, Helsinki, Finland. https://aiven.io/
import argparse
import psycopg2
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--service-uri', help="Postgres Service URI (obtained from Aiven console)", required=True)
args = parser.parse_args()
db_conn = psycopg2.connect(args.service_uri)
cursor = db_conn.cursor()
cursor.execute("SELECT current_database()")
result = cursor.fetchone()
print("Successfully connected to: {}".format(result[0]))
if __name__ == "__main__":
main()
| 26.363636
| 114
| 0.698276
|
8e908cf508021b496a1e25a6aaa7f9abe8617093
| 924
|
py
|
Python
|
tests/data/expected/main/main_nested_directory/person.py
|
adaamz/datamodel-code-generator
|
3b34573f35f8d420e4668a85047c757fd1da7754
|
[
"MIT"
] | 891
|
2019-07-23T04:23:32.000Z
|
2022-03-31T13:36:33.000Z
|
tests/data/expected/main/main_nested_directory/person.py
|
adaamz/datamodel-code-generator
|
3b34573f35f8d420e4668a85047c757fd1da7754
|
[
"MIT"
] | 663
|
2019-07-23T09:50:26.000Z
|
2022-03-29T01:56:55.000Z
|
tests/data/expected/main/main_nested_directory/person.py
|
adaamz/datamodel-code-generator
|
3b34573f35f8d420e4668a85047c757fd1da7754
|
[
"MIT"
] | 108
|
2019-07-23T08:50:37.000Z
|
2022-03-09T10:50:22.000Z
|
# generated by datamodel-codegen:
# filename: person.json
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from typing import Any, List, Optional, Union
from pydantic import BaseModel, Field, conint
from .definitions import food, friends, pet
from .definitions.drink import coffee, tea
from .definitions.machine import robot
class Person(BaseModel):
first_name: str = Field(..., description="The person's first name.")
last_name: str = Field(..., description="The person's last name.")
age: Optional[conint(ge=0)] = Field(None, description='Age in years.')
pets: Optional[List[pet.Pet]] = None
friends: Optional[friends.Friends] = None
robot: Optional[robot.Robot] = None
comment: Optional[Any] = None
drink: Optional[List[Union[coffee.Coffee, tea.Tea]]] = None
food: Optional[List[Union[food.Noodle, food.Soup]]] = None
Person.update_forward_refs()
| 31.862069
| 74
| 0.719697
|
63ee21c01052b1b3d5dbbeead1669b5d77aa4ffd
| 11,779
|
py
|
Python
|
dspn/data.py
|
zzirnheld/dspn
|
e0c248d9e55821847841cf0c67e97225277a6e75
|
[
"MIT"
] | null | null | null |
dspn/data.py
|
zzirnheld/dspn
|
e0c248d9e55821847841cf0c67e97225277a6e75
|
[
"MIT"
] | null | null | null |
dspn/data.py
|
zzirnheld/dspn
|
e0c248d9e55821847841cf0c67e97225277a6e75
|
[
"MIT"
] | null | null | null |
import os
import math
import random
import json
import torch
import torch.utils.data
import torchvision
import torchvision.transforms as transforms
import torchvision.transforms.functional as T
import h5py
import numpy as np
import pandas
def get_loader(dataset, batch_size, num_workers=8, shuffle=True):
dl = torch.utils.data.DataLoader(
dataset,
shuffle=shuffle,
batch_size=batch_size,
pin_memory=True,
num_workers=num_workers,
drop_last=True,
)
print(f'datatset len: {len(dataset)}, dataloader len: {len(dl)}')
return dl
class LHCSet(torch.utils.data.Dataset):
def __init__(self, train=True):
data_path = '/home/zzirnhel/Desktop/events_LHCO2020_BlackBox1.h5'
label_path = '/home/zzirnhel/Desktop/events_LHCO2020_BlackBox1.masterkey'
self.train = train
self.data = self.cache(data_path, label_path)
def cache(self, data_path, label_path):
df_interval = 10000
num_to_load = 10000 if self.train else 200
label_file = open(label_path, 'r')
labels = label_file.readlines()
labels = [1 if l == '1.0\n' else 0 for l in labels]
label_file.close()
desired_label = 0 if self.train else 1
desired_labels = []
for i, l in enumerate(labels):
if l == desired_label:
desired_labels.append(i)
if len(desired_labels) > num_to_load:
break
if len(desired_labels) > num_to_load:
desired_labels = desired_labels[:num_to_load]
data = []
rowmax = 900
currmax = 0
#iterate across dataframe
for i in desired_labels:
while currmax - 1 < i:
print(f'loading pandas df from {currmax} to {currmax + df_interval}')
df = pandas.read_hdf(data_path, start=currmax, stop=currmax + df_interval)
print('loaded pandas df')
currmax += df_interval
index = i + df_interval - currmax
row = df.iloc[index]
#check if the number of nonzero points is greater than a threshold. if not, throw it out.
for index in range(len(row) - 1, rowmax - 1, -1):
if row[index] != 0:
#print('nonzero at', index)
break
if index > rowmax:
continue
point_set = torch.FloatTensor(row[:rowmax]).view((3, rowmax // 3))
#point_set = torch.FloatTensor(row[:rowmax]).unsqueeze(0)
#print('point set shape', point_set.shape)
label = desired_label
_, cardinality = point_set.shape
data.append((point_set, label, cardinality))
print('finished with', len(data), 'sets')
return data
#needs to override __getitem__
def __getitem__(self, item):
s, l, c = self.data[item]
mask = torch.ones(c)
return l, s, mask
def __len__(self):
return len(self.data)
class MNISTSet(torch.utils.data.Dataset):
def __init__(self, threshold=0.0, train=True, root="mnist", full=False):
self.train = train
self.root = root
self.threshold = threshold
self.full = full
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
torchvision.datasets.MNIST.resources = [
('https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz', 'f68b3c2dcbeaaa9fbdd348bbdeb94873'),
('https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz', 'd53e105ee54ea40749a09fcbcd1e9432'),
('https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz', '9fb629c4189551a2d022fa330f9573f3'),
('https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz', 'ec29112dd5afa0611ce80d1b7f02629c')
]
mnist = torchvision.datasets.MNIST(
train=train, transform=transform, download=True, root=root
)
self.data = self.cache(mnist)
self.max = 342
def cache(self, dataset):
cache_path = os.path.join(self.root, f"mnist_{self.train}_{self.threshold}.pth")
if os.path.exists(cache_path):
return torch.load(cache_path)
print("Processing dataset...")
data = []
for datapoint in dataset:
img, label = datapoint
point_set, cardinality = self.image_to_set(img)
data.append((point_set, label, cardinality))
'''
print('set', point_set)
print(label)
print(cardinality)
raise Exception()
'''
torch.save(data, cache_path)
print("Done!", len(data), "datapoints.")
return data
def image_to_set(self, img):
idx = (img.squeeze(0) > self.threshold).nonzero().transpose(0, 1)
cardinality = idx.size(1)
return idx, cardinality
def __getitem__(self, item):
s, l, c = self.data[item]
# make sure set is shuffled
s = s[:, torch.randperm(c)]
# pad to fixed size
padding_size = self.max - s.size(1)
s = torch.cat([s.float(), torch.zeros(2, padding_size)], dim=1)
# put in range [0, 1]
s = s / 27
# mask of which elements are valid,not padding
mask = torch.zeros(self.max)
mask[:c].fill_(1)
return l, s, mask
def __len__(self):
if self.train or self.full:
return len(self.data)
else:
return len(self.data) // 10
CLASSES = {
"material": ["rubber", "metal"],
"color": ["cyan", "blue", "yellow", "purple", "red", "green", "gray", "brown"],
"shape": ["sphere", "cube", "cylinder"],
"size": ["large", "small"],
}
class CLEVR(torch.utils.data.Dataset):
def __init__(self, base_path, split, box=False, full=False):
assert split in {
"train",
"val",
"test",
} # note: test isn't very useful since it doesn't have ground-truth scene information
self.base_path = base_path
self.split = split
self.max_objects = 10
self.box = box # True if clevr-box version, False if clevr-state version
self.full = full # Use full validation set?
with self.img_db() as db:
ids = db["image_ids"]
self.image_id_to_index = {id: i for i, id in enumerate(ids)}
self.image_db = None
with open(self.scenes_path) as fd:
scenes = json.load(fd)["scenes"]
self.img_ids, self.scenes = self.prepare_scenes(scenes)
def object_to_fv(self, obj):
coords = [p / 3 for p in obj["3d_coords"]]
one_hot = lambda key: [obj[key] == x for x in CLASSES[key]]
material = one_hot("material")
color = one_hot("color")
shape = one_hot("shape")
size = one_hot("size")
assert sum(material) == 1
assert sum(color) == 1
assert sum(shape) == 1
assert sum(size) == 1
# concatenate all the classes
return coords + material + color + shape + size
def prepare_scenes(self, scenes_json):
img_ids = []
scenes = []
for scene in scenes_json:
img_idx = scene["image_index"]
# different objects depending on bbox version or attribute version of CLEVR sets
if self.box:
objects = self.extract_bounding_boxes(scene)
objects = torch.FloatTensor(objects)
else:
objects = [self.object_to_fv(obj) for obj in scene["objects"]]
objects = torch.FloatTensor(objects).transpose(0, 1)
num_objects = objects.size(1)
# pad with 0s
if num_objects < self.max_objects:
objects = torch.cat(
[
objects,
torch.zeros(objects.size(0), self.max_objects - num_objects),
],
dim=1,
)
# fill in masks
mask = torch.zeros(self.max_objects)
mask[:num_objects] = 1
img_ids.append(img_idx)
scenes.append((objects, mask))
return img_ids, scenes
def extract_bounding_boxes(self, scene):
"""
Code used for 'Object-based Reasoning in VQA' to generate bboxes
https://arxiv.org/abs/1801.09718
https://github.com/larchen/clevr-vqa/blob/master/bounding_box.py#L51-L107
"""
objs = scene["objects"]
rotation = scene["directions"]["right"]
num_boxes = len(objs)
boxes = np.zeros((1, num_boxes, 4))
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
for i, obj in enumerate(objs):
[x, y, z] = obj["pixel_coords"]
[x1, y1, z1] = obj["3d_coords"]
cos_theta, sin_theta, _ = rotation
x1 = x1 * cos_theta + y1 * sin_theta
y1 = x1 * -sin_theta + y1 * cos_theta
height_d = 6.9 * z1 * (15 - y1) / 2.0
height_u = height_d
width_l = height_d
width_r = height_d
if obj["shape"] == "cylinder":
d = 9.4 + y1
h = 6.4
s = z1
height_u *= (s * (h / d + 1)) / ((s * (h / d + 1)) - (s * (h - s) / d))
height_d = height_u * (h - s + d) / (h + s + d)
width_l *= 11 / (10 + y1)
width_r = width_l
if obj["shape"] == "cube":
height_u *= 1.3 * 10 / (10 + y1)
height_d = height_u
width_l = height_u
width_r = height_u
obj_name = (
obj["size"]
+ " "
+ obj["color"]
+ " "
+ obj["material"]
+ " "
+ obj["shape"]
)
ymin.append((y - height_d) / 320.0)
ymax.append((y + height_u) / 320.0)
xmin.append((x - width_l) / 480.0)
xmax.append((x + width_r) / 480.0)
return xmin, ymin, xmax, ymax
@property
def images_folder(self):
return os.path.join(self.base_path, "images", self.split)
@property
def scenes_path(self):
if self.split == "test":
raise ValueError("Scenes are not available for test")
return os.path.join(
self.base_path, "scenes", "CLEVR_{}_scenes.json".format(self.split)
)
def img_db(self):
path = os.path.join(self.base_path, "{}-images.h5".format(self.split))
return h5py.File(path, "r")
def load_image(self, image_id):
if self.image_db is None:
self.image_db = self.img_db()
index = self.image_id_to_index[image_id]
image = self.image_db["images"][index]
return image
def __getitem__(self, item):
image_id = self.img_ids[item]
image = self.load_image(image_id)
objects, size = self.scenes[item]
return image, objects, size
def __len__(self):
if self.split == "train" or self.full:
return len(self.scenes)
else:
return len(self.scenes) // 10
if __name__ == "__main__":
import matplotlib.pyplot as plt
dataset = Circles()
for i in range(2):
points, centre, n_points = dataset[i]
x, y = points[0], points[1]
plt.scatter(x.numpy(), y.numpy())
plt.scatter(centre[0], centre[1])
plt.axes().set_aspect("equal", "datalim")
plt.show()
| 32.810585
| 125
| 0.548688
|
2199aa0fcf31dacd952e97094d7bff9de93a56d7
| 773
|
py
|
Python
|
Mundo 03/moeda2.py
|
AlanyLourenco/Python
|
3c61f246621abf60734ea4c40ea01a2ab079b984
|
[
"MIT"
] | null | null | null |
Mundo 03/moeda2.py
|
AlanyLourenco/Python
|
3c61f246621abf60734ea4c40ea01a2ab079b984
|
[
"MIT"
] | null | null | null |
Mundo 03/moeda2.py
|
AlanyLourenco/Python
|
3c61f246621abf60734ea4c40ea01a2ab079b984
|
[
"MIT"
] | null | null | null |
def metade(n=0,formato=False):
m=n/2
return m if formato is False else moeda(m)
def dobro(n=0,formato=False):
m=n*2
return m if formato is False else moeda(m)
def mais(n=0,d=0,formato=False):
m=((d/100)*n)+n
return m if formato is False else moeda(m)
def desc(n=0,d=0,formato=False):
m=n-((d/100)*n)
return m if formato is False else moeda(m)
def moeda(preço=0,moed='R$',formato=False):
return f'{moed}{preço:>.2f}'.replace('.',',')
def resumo(n=0,up=10,do=10):
print('='*30)
print(f'O dobro de {moeda(n)} é: \t{dobro(n,True)} ')
print(f'A metade do {moeda(n)} é: \t{metade(n,True)} ')
print(f'Aumento de {up}% é: \t{mais(n,up,True)} ')
print(f'Desconto de {do}% é: \t{desc(n,do,True)} ')
print('='*30)
| 24.15625
| 59
| 0.59379
|
8d1321520065543ffe528c2a8dbd4be0eada74a8
| 5,291
|
py
|
Python
|
pytorch_translate/test/test_preprocess.py
|
dzhulgakov/translate
|
018d3eed8d93ff32e86c912e68045c7a3f4ed0b7
|
[
"BSD-3-Clause"
] | 1
|
2020-07-24T10:59:17.000Z
|
2020-07-24T10:59:17.000Z
|
pytorch_translate/test/test_preprocess.py
|
dzhulgakov/translate
|
018d3eed8d93ff32e86c912e68045c7a3f4ed0b7
|
[
"BSD-3-Clause"
] | null | null | null |
pytorch_translate/test/test_preprocess.py
|
dzhulgakov/translate
|
018d3eed8d93ff32e86c912e68045c7a3f4ed0b7
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import os
import unittest
from pytorch_translate import constants, preprocess
from pytorch_translate.data.dictionary import Dictionary
from pytorch_translate.test import utils as test_utils
class TestPreprocess(unittest.TestCase):
def setUp(self):
self.source_text_file, self.target_text_file = (
test_utils.create_test_text_files()
)
def get_common_data_args_namespace(self):
args = argparse.Namespace()
args.train_source_text_file = self.source_text_file
args.train_target_text_file = self.target_text_file
args.eval_source_text_file = self.source_text_file
args.eval_target_text_file = self.target_text_file
# The idea is to have these filled in during preprocessing
args.train_source_binary_path = ""
args.train_target_binary_path = ""
args.eval_source_binary_path = ""
args.eval_target_binary_path = ""
# Required data preprocessing args
args.append_eos_to_source = False
args.reverse_source = True
args.fairseq_data_format = False
args.multiling_source_lang = None # Indicates no multilingual data
args.penalized_target_tokens_file = ""
args.source_vocab_file = test_utils.make_temp_file()
args.source_max_vocab_size = None
args.target_vocab_file = test_utils.make_temp_file()
args.target_max_vocab_size = None
args.char_source_vocab_file = ""
args.char_target_vocab_file = ""
args.task = "pytorch_translate"
return args
def test_build_vocabs_char(self):
args = self.get_common_data_args_namespace()
args.arch = "char_aware_hybrid"
args.char_source_max_vocab_size = 30
args.char_target_max_vocab_size = 30
args.char_source_vocab_file = test_utils.make_temp_file()
args.char_target_vocab_file = test_utils.make_temp_file()
dictionaries = preprocess.build_vocabs(args, Dictionary)
assert len(dictionaries["char_source_dict"]) > 0
assert len(dictionaries["char_target_dict"]) > 0
def test_build_vocabs_no_char(self):
args = self.get_common_data_args_namespace()
args.arch = "rnn"
dictionaries = preprocess.build_vocabs(args, Dictionary)
dictionaries["char_source_dict"] is None
dictionaries["char_target_dict"] is None
def test_preprocess(self):
"""
This is just a correctness test to make sure no errors are thrown when
all the required args are passed. Actual parsing code is tested by
test_data.py
"""
args = self.get_common_data_args_namespace()
preprocess.preprocess_corpora(args)
for file_type in (
"train_source_binary_path",
"train_target_binary_path",
"eval_source_binary_path",
"eval_target_binary_path",
):
file = getattr(args, file_type)
assert file and os.path.isfile(file)
assert file.endswith(".npz")
def test_preprocess_with_monolingual(self):
"""
This is just a correctness test to make sure no errors are thrown when
all the required args are passed. Actual parsing code is tested by
test_data.py
"""
args = self.get_common_data_args_namespace()
args.task = constants.SEMI_SUPERVISED_TASK
args.train_mono_source_text_file = self.source_text_file
args.train_mono_target_text_file = self.target_text_file
preprocess.preprocess_corpora(args)
for file_type in (
"train_source_binary_path",
"train_target_binary_path",
"eval_source_binary_path",
"eval_target_binary_path",
"train_mono_source_binary_path",
"train_mono_target_binary_path",
):
file_path = getattr(args, file_type)
assert file_path and os.path.isfile(file_path)
assert file_path.endswith(".npz")
def test_preprocess_with_monolingual_with_tgt_chars(self):
"""
This is just a correctness test to make sure no errors are thrown when
all the required args are passed. Actual parsing code is tested by
test_data.py
"""
args = self.get_common_data_args_namespace()
args.task = constants.SEMI_SUPERVISED_TASK
args.train_mono_source_text_file = self.source_text_file
args.train_mono_target_text_file = self.target_text_file
args.arch = "char_aware_hybrid"
args.char_source_max_vocab_size = 30
args.char_target_max_vocab_size = 30
args.char_source_vocab_file = test_utils.make_temp_file()
args.char_target_vocab_file = test_utils.make_temp_file()
preprocess.preprocess_corpora(args)
for file_type in (
"train_source_binary_path",
"train_target_binary_path",
"eval_source_binary_path",
"eval_target_binary_path",
"train_mono_source_binary_path",
"train_mono_target_binary_path",
):
file_path = getattr(args, file_type)
assert file_path and os.path.isfile(file_path)
assert file_path.endswith(".npz")
| 38.620438
| 78
| 0.67681
|
4f4adefb804a6f805214667daff0f9052d53bb1c
| 1,481
|
py
|
Python
|
src/client_libraries/python/dynamics/customerinsights/api/models/role_assignment_py3.py
|
microsoft/Dynamics365-CustomerInsights-Client-Libraries
|
e00632f7972717b03e0fb1a9e2667e8f9444a0fe
|
[
"MIT"
] | null | null | null |
src/client_libraries/python/dynamics/customerinsights/api/models/role_assignment_py3.py
|
microsoft/Dynamics365-CustomerInsights-Client-Libraries
|
e00632f7972717b03e0fb1a9e2667e8f9444a0fe
|
[
"MIT"
] | null | null | null |
src/client_libraries/python/dynamics/customerinsights/api/models/role_assignment_py3.py
|
microsoft/Dynamics365-CustomerInsights-Client-Libraries
|
e00632f7972717b03e0fb1a9e2667e8f9444a0fe
|
[
"MIT"
] | 7
|
2021-02-11T19:48:57.000Z
|
2021-12-17T08:00:15.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RoleAssignment(Model):
"""Represents a role assignment Metadata.
:param principal_id: Gets the Id of the principal.
:type principal_id: str
:param principal_type: Possible values include: 'user', 'group', 'app'
:type principal_type: str or ~dynamics.customerinsights.api.models.enum
:param roles: Gets the roles the principal belongs to.
:type roles: list[str]
:param instance_id: Customer Insights instance id associated with this
object.
:type instance_id: str
"""
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'principal_type': {'key': 'principalType', 'type': 'str'},
'roles': {'key': 'roles', 'type': '[str]'},
'instance_id': {'key': 'instanceId', 'type': 'str'},
}
def __init__(self, *, principal_id: str=None, principal_type=None, roles=None, instance_id: str=None, **kwargs) -> None:
super(RoleAssignment, self).__init__(**kwargs)
self.principal_id = principal_id
self.principal_type = principal_type
self.roles = roles
self.instance_id = instance_id
| 38.973684
| 124
| 0.60027
|
8b1e83fabd723eda993e35ff51f18fe72ffe4cbf
| 747
|
py
|
Python
|
visual_novel/core/middlewares.py
|
dolamroth/visual_novel
|
c67379df395561b3bca7e91e2db6547d2e943330
|
[
"MIT"
] | 9
|
2018-03-11T12:53:12.000Z
|
2020-12-19T14:21:53.000Z
|
visual_novel/core/middlewares.py
|
dolamroth/visual_novel
|
c67379df395561b3bca7e91e2db6547d2e943330
|
[
"MIT"
] | 6
|
2020-02-11T22:19:22.000Z
|
2022-03-11T23:20:10.000Z
|
visual_novel/core/middlewares.py
|
dolamroth/visual_novel
|
c67379df395561b3bca7e91e2db6547d2e943330
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponseRedirect
from django.core.exceptions import PermissionDenied
class IsAuthenticatedMiddleware(object):
def process_request(self, request):
user = request.user
if not bool(user.is_authenticated) \
or not hasattr(user, 'profile') \
or not bool(user.profile.email_confirmed):
return HttpResponseRedirect("/login?next=" + request.path)
else:
return None
class HasPermissionToEditProfile(object):
def process_view(self, request, view_func, view_args, view_kwargs):
username = view_kwargs.get('username', None)
if not (username == request.user.username):
raise PermissionDenied
return None
| 33.954545
| 71
| 0.672021
|
2957dd1a14ff5bdbe83a5f095bf0f1a79e1694c3
| 1,878
|
py
|
Python
|
qlcp21a/Q2_phot.py
|
RapidLzj/QLCP21A
|
52fb295ac72f79f0e5aa3310ed5849960659bf08
|
[
"MIT"
] | null | null | null |
qlcp21a/Q2_phot.py
|
RapidLzj/QLCP21A
|
52fb295ac72f79f0e5aa3310ed5849960659bf08
|
[
"MIT"
] | null | null | null |
qlcp21a/Q2_phot.py
|
RapidLzj/QLCP21A
|
52fb295ac72f79f0e5aa3310ed5849960659bf08
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
201901, Dr. Jie Zheng, Beijing & Xinglong, NAOC
202101-? Dr. Jie Zheng & Dr./Prof. Linqiao Jiang
Light_Curve_Pipeline
v3 (2021A) Upgrade from former version, remove unused code
"""
import numpy as np
import astropy.io.fits as fits
import os
from .JZ_utils import loadlist, datestr, logfile, conf, meanclip
#from .QZ_plotting import plot_im_star, plot_magerr
from .J2_se import _se_
def phot(ini_file, file_lst,
red_path="",
overwrite=False,
log=None,
extra_config=None):
"""
photometry
:param ini_file:
:param file_lst: list file of scientific fits files
:param red_path: path of out files
:param overwrite: over write result file or not if exists
:param log:
:param extra_config: 190619 额外的配置参数,临时覆盖配置文件中的信息
:return:
"""
ini = conf(ini_file, extra_config)
lf = logfile(log, level=ini["log_level"])
if not os.path.isfile(file_lst):
lf.show("SKIP -- FILE NOT EXISTS: " + file_lst, logfile.ERROR)
return
# load list
scif = loadlist(file_lst, suffix=ini["bf_mid"]+".fits", base_path=red_path,
separate_folder = ini["separate_folder"])
catf = loadlist(file_lst, suffix=ini["cat_mid"]+".fits", base_path=red_path,
separate_folder=ini["separate_folder"])
txtf = loadlist(file_lst, suffix=ini["cat_mid"]+".txt", base_path=red_path,
separate_folder=ini["separate_folder"])
sef = loadlist(file_lst, suffix=ini["se_mid"]+".fits", base_path=red_path,
separate_folder=ini["separate_folder"])
basename = [os.path.basename(f) for f in catf]
if not overwrite:
skiptag = [os.path.isfile(f) for f in catf]
else:
skiptag = [False for f in catf]
_se_(ini, scif, sef, catf, txtf, skiptag, lf)
lf.close()
| 31.3
| 80
| 0.642173
|
c274896071bfcdee50dc83181763e2dde00f6aec
| 2,981
|
py
|
Python
|
timm/models/layers/classifier.py
|
datduong/pytorch-image-models
|
05c9b52ca65b01e57f8cea2b6447882488aba4f6
|
[
"Apache-2.0"
] | null | null | null |
timm/models/layers/classifier.py
|
datduong/pytorch-image-models
|
05c9b52ca65b01e57f8cea2b6447882488aba4f6
|
[
"Apache-2.0"
] | null | null | null |
timm/models/layers/classifier.py
|
datduong/pytorch-image-models
|
05c9b52ca65b01e57f8cea2b6447882488aba4f6
|
[
"Apache-2.0"
] | null | null | null |
""" Classifier head and layer factory
Hacked together by / Copyright 2020 Ross Wightman
"""
from torch import nn as nn
from torch.nn import functional as F
from .adaptive_avgmax_pool import SelectAdaptivePool2d
def create_classifier(num_features, num_classes, pool_type='avg', use_conv=False):
flatten = not use_conv # flatten when we use a Linear layer after pooling
if not pool_type:
assert num_classes == 0 or use_conv,\
'Pooling can only be disabled if classifier is also removed or conv classifier is used'
flatten = False # disable flattening if pooling is pass-through (no pooling)
global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten)
num_pooled_features = num_features * global_pool.feat_mult()
if num_classes <= 0:
fc = nn.Identity() # pass-through (no classifier)
elif use_conv:
fc = nn.Conv2d(num_pooled_features, num_classes, 1, bias=True)
else:
fc = nn.Linear(num_pooled_features, num_classes, bias=True)
return global_pool, fc
def create_classifier_layerfc(num_features, num_classes, pool_type='avg', use_conv=False):
flatten = not use_conv # flatten when we use a Linear layer after pooling
if not pool_type:
assert num_classes == 0 or use_conv,\
'Pooling can only be disabled if classifier is also removed or conv classifier is used'
flatten = False # disable flattening if pooling is pass-through (no pooling)
global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=flatten)
num_pooled_features = num_features * global_pool.feat_mult()
if num_classes <= 0:
fc = nn.Identity() # pass-through (no classifier)
elif use_conv:
fc = nn.Conv2d(num_pooled_features, num_classes, 1, bias=True)
else:
fc = nn.Sequential ( nn.BatchNorm1d(num_pooled_features), # ! follow baseline paper
nn.Dropout(0.3),
nn.Linear(num_pooled_features, 64),
nn.BatchNorm1d(64),
nn.Dropout(0.3),
nn.Linear(64,num_classes) )
for i in range ( len(fc) ):
if isinstance ( fc[i] , nn.Linear ):
nn.init.xavier_uniform_ ( fc[i].weight )
nn.init.constant( fc[i].bias , 0)
return global_pool, fc
class ClassifierHead(nn.Module):
"""Classifier head w/ configurable global pooling and dropout."""
def __init__(self, in_chs, num_classes, pool_type='avg', drop_rate=0.):
super(ClassifierHead, self).__init__()
self.drop_rate = drop_rate
self.global_pool, self.fc = create_classifier(in_chs, num_classes, pool_type=pool_type)
def forward(self, x):
x = self.global_pool(x)
if self.drop_rate:
x = F.dropout(x, p=float(self.drop_rate), training=self.training)
x = self.fc(x)
return x
| 43.202899
| 99
| 0.647434
|
9727d277028cd5b90a244df9728c4e338594d0b9
| 671
|
py
|
Python
|
venv3/bin/rst2s5.py
|
paul-romeo/pytest-in-60-minutes
|
a4817312081347737f87801c0623054eba599418
|
[
"MIT"
] | null | null | null |
venv3/bin/rst2s5.py
|
paul-romeo/pytest-in-60-minutes
|
a4817312081347737f87801c0623054eba599418
|
[
"MIT"
] | null | null | null |
venv3/bin/rst2s5.py
|
paul-romeo/pytest-in-60-minutes
|
a4817312081347737f87801c0623054eba599418
|
[
"MIT"
] | null | null | null |
#!/home/paul-romeo/pytest_workshop/venv3/bin/python3
# $Id: rst2s5.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Chris Liechti <cliechti@gmx.net>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
| 26.84
| 74
| 0.745156
|
06b1326900c7b01de972591a2f61ee6e33795554
| 3,080
|
py
|
Python
|
benchmark/startCirq2430.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startCirq2430.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startCirq2430.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=40
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=23
c.append(cirq.CZ.on(input_qubit[1],input_qubit[2])) # number=24
c.append(cirq.H.on(input_qubit[2])) # number=25
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=34
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=35
c.append(cirq.H.on(input_qubit[0])) # number=36
c.append(cirq.Z.on(input_qubit[2])) # number=30
c.append(cirq.H.on(input_qubit[0])) # number=37
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=38
c.append(cirq.H.on(input_qubit[0])) # number=39
c.append(cirq.H.on(input_qubit[3])) # number=16
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=17
c.append(cirq.rx(-0.5686282702997527).on(input_qubit[3])) # number=32
c.append(cirq.H.on(input_qubit[3])) # number=18
c.append(cirq.H.on(input_qubit[3])) # number=26
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=27
c.append(cirq.H.on(input_qubit[3])) # number=28
c.append(cirq.X.on(input_qubit[3])) # number=21
c.append(cirq.rx(0.4241150082346221).on(input_qubit[2])) # number=33
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=22
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[3])) # number=12
c.append(cirq.Y.on(input_qubit[0])) # number=14
c.append(cirq.Y.on(input_qubit[0])) # number=15
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq2430.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 36.235294
| 77
| 0.680195
|
5d9f14cee18ff462862f91fc0a7bc17935d59a72
| 16,878
|
py
|
Python
|
evaluation.py
|
TUDelftHao/TUDelftHao-4DLongitudinal-MRI-segmentation
|
c028e3c8b64812a05e39efa80699a327172c095d
|
[
"MIT"
] | 4
|
2020-07-28T06:03:43.000Z
|
2021-09-10T09:12:10.000Z
|
evaluation.py
|
TUDelftHao/TUDelftHao-4DLongitudinal-MRI-segmentation
|
c028e3c8b64812a05e39efa80699a327172c095d
|
[
"MIT"
] | null | null | null |
evaluation.py
|
TUDelftHao/TUDelftHao-4DLongitudinal-MRI-segmentation
|
c028e3c8b64812a05e39efa80699a327172c095d
|
[
"MIT"
] | 1
|
2021-09-10T09:12:21.000Z
|
2021-09-10T09:12:21.000Z
|
''' Evaluate the segmentation consistancy '''
import SimpleITK as sitk
import numpy as np
import torch
import os
import sys
import pandas as pd
from data_prepara import data_split, data_construction, time_parser
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from datetime import datetime
import matplotlib.dates as mdates
from matplotlib.pyplot import MultipleLocator
LONGITUDINAL = 'longitudinal_plot'
PATIENT = 'EGD-0265'
MODEL_1 = 'CenterNormalBiLSTM1layer-unet-p64-4x3-halfpretrain'
MODEL_2 = 'UNet-p64-b4-newdata-oriinput'
DATA = 'longitudinal.csv'
ANALYSIS_DIR = 'analysis'
if not os.path.exists(ANALYSIS_DIR):
os.mkdir(ANALYSIS_DIR)
def volumn_ratio(mask):
''' count the ratio of each label'''
labels = map(int, np.unique(mask))
label_volume = {}
for label in labels:
if label != 0:
label_volume[str(label)] = np.sum(mask==label)
return label_volume
def plot_volumn_dev(patient_dict, patient_id, model_name, save_dir='longitudinal_plot', predicted_labels=None):
''' plot volumn development curve along time dim per patient '''
save_path = os.path.join(save_dir, patient_id, model_name)
if not os.path.exists(save_path):
os.makedirs(save_path)
time_dict = patient_dict[patient_id]
time_dict = sorted(time_dict.items(), key=lambda item:item[0]) # sort according to time
plt.clf()
plt.figure(figsize=(10, 10))
period = []
label_1 = []
label_2 = []
label_3 = []
label_4 = []
predicted_label_1 = []
predicted_label_2 = []
predicted_label_3 = []
predicted_label_4 = []
for i, time_point in enumerate(time_dict):
period.append(time_point[0])
mask = sitk.GetArrayFromImage(sitk.ReadImage(time_point[1]['combined_fast']))
labelwise_ratio = volumn_ratio(mask)
if predicted_labels:
predicted_mask = predicted_labels[time_point[0]]
predicted_labelwise_ratio = volumn_ratio(predicted_mask)
label_1.append(labelwise_ratio['1'])
label_2.append(labelwise_ratio['2'])
label_3.append(labelwise_ratio['3'])
label_4.append(labelwise_ratio['4'])
if predicted_labels:
if '1' in predicted_labelwise_ratio:
predicted_label_1.append(predicted_labelwise_ratio['1'])
else:
predicted_label_1.append(0)
if '2' in predicted_labelwise_ratio:
predicted_label_2.append(predicted_labelwise_ratio['2'])
else:
predicted_label_2.append(0)
if '3' in predicted_labelwise_ratio:
predicted_label_3.append(predicted_labelwise_ratio['3'])
else:
predicted_label_3.append(0)
if '4' in predicted_labelwise_ratio:
predicted_label_4.append(predicted_labelwise_ratio['4'])
else:
predicted_label_4.append(0)
df = pd.DataFrame(list(zip(period, label_1, label_2, label_3, label_4, predicted_label_1, predicted_label_2, predicted_label_3, predicted_label_4)),
columns=['date', 'GT CSF', 'GT GM', 'GT WM', 'GT TM', 'predicted CSF', 'predicted GM', 'predicted WM', 'predicted TM'])
df.to_csv(os.path.join(save_path, 'longitudinal.csv'))
plt.plot(period, label_1, 'r--', label='GT CSF')
plt.plot(period, label_2, 'g--', label='GT GM')
plt.plot(period, label_3, 'b--', label='GT WM')
plt.plot(period, label_4, 'y--', label='GT TM')
if predicted_labels:
plt.plot(period, predicted_label_1, c='r', label='predicted CSF')
plt.plot(period, predicted_label_2, c='g', label='predicted GM')
plt.plot(period, predicted_label_3, c='b', label='predicted WM')
plt.plot(period, predicted_label_4, c='y', label='predicted TM')
plt.xlabel('time', fontsize=15)
plt.ylabel('volumn (cm3)', fontsize=15)
plt.xticks(period, rotation=45)
plt.legend(loc='best')
plt.title('Comparison of brain tissues and tumor development in ground truth and predicted')
plt.savefig(os.path.join(save_path, 'longitudinal.png'))
# normalization
def norm(model):
csv_dir = os.path.join(LONGITUDINAL, PATIENT, model, DATA)
df = pd.read_csv(csv_dir, index_col=0)
date = df['date']
pure_data = df.drop('date', axis=1)
scaler = MinMaxScaler()
X = scaler.fit_transform(pure_data)
scaled_data = pd.DataFrame(X, columns=pure_data.columns, index=pure_data.index)
data = scaled_data
scaled_data.loc['mean'] = data.apply(np.mean)
scaled_data.loc['std'] = data.apply(np.std)
scaled_data['date'] = date.astype('object')
return scaled_data
def longitudinal_plot(model_1, model_2):
data_1 = norm(model_1)
data_2 = norm(model_2)
modalities = ['CSF', 'GM', 'WM', 'TM']
date = data_1['date'].dropna().to_list()
x = list(str(d) for d in date)
plt.figure(figsize=(15, 10))
grid = plt.GridSpec(4, 3, wspace=0.5, hspace=0.5)
for i, modality in enumerate(modalities):
GT = data_1['GT {}'.format(modality)].to_list()[:-2]
pred_1 = data_1['predicted {}'.format(modality)].to_list()[:-2]
pred_2 = data_2['predicted {}'.format(modality)].to_list()[:-2]
upper_1 = [max(a, b) for (a, b) in zip(GT, pred_1)]
lower_1 = [min(a, b) for (a, b) in zip(GT, pred_1)]
ax1 = plt.subplot(grid[i, 0:2])
ax1.plot(x, GT, 'k', lw=2)
ax1.plot(x, pred_1, 'y', lw=1)
ax1.plot(x, pred_2, 'r', lw=1)
ax1.plot(x, upper_1, 'y', lw=1, alpha=0.1)
ax1.plot(x, lower_1, 'y', lw=1, alpha=0.1)
ax1.fill_between(x, upper_1, lower_1, facecolor='yellow', edgecolor='yellow', alpha=0.2, label='{}'.format(model_1.split('-')[0]))
upper_2 = [max(a, b) for (a, b) in zip(GT, pred_2)]
lower_2 = [min(a, b) for (a, b) in zip(GT, pred_2)]
ax1.plot(x, upper_2, 'r', lw=1, alpha=0.1)
ax1.plot(x, lower_2, 'r', lw=1, alpha=0.1)
ax1.fill_between(x, upper_2, lower_2, facecolor='red', edgecolor='red', alpha=0.1, label='{}'.format(model_2.split('-')[0]))
ax1.set_ylim(-0.5, 1.5)
ax1.set_title('{}'.format(modality))
ax1.set_xticklabels([])
ax1.legend(loc='upper left', fontsize=10)
ax2 = plt.subplot(grid[i, 2])
ax2.errorbar(0, data_1.loc['mean', 'GT CSF'], yerr=data_1.loc['std', 'GT CSF'], fmt="o",color="black", elinewidth=2, capsize=4)
ax2.errorbar(1, data_1.loc['mean', 'predicted CSF'], yerr=data_1.loc['std', 'predicted CSF'], fmt="o",color="yellow", elinewidth=2, capsize=4)
ax2.errorbar(2, data_2.loc['mean', 'predicted CSF'], yerr=data_2.loc['std', 'predicted CSF'], fmt="o",color="red", elinewidth=2, capsize=4)
ax2.set_ylim(0, 1)
ax2.set_xlim(-0.5, 2.5)
ax2.set_xticklabels([])
ax2.xaxis.set_major_locator(MultipleLocator(1))
ax2.set_title('{}'.format(modality))
ax1.set_xticklabels(x, rotation=30)
ax1.set_xlabel('date', fontsize=15)
ax2.set_xticklabels(['', 'GT', 'longitudinal', 'UNet'])
ax2.set_xlabel('model', fontsize=15)
plt.suptitle(PATIENT, fontsize=20)
plt.savefig(os.path.join(ANALYSIS_DIR, 'logitudinal_{}.png'.format(PATIENT)), dpi=150)
def transition_matrix(patient_dict, patient_id, model_name_1, model_name_2, save_dir='inference_result'):
pred_1_path = os.path.join(save_dir, patient_id, model_name_1)
pred_1_list = [pred_mask for pred_mask in os.listdir(pred_1_path) if pred_mask.endswith('.gz')]
pred_1_dir_list = [os.path.join(pred_1_path, pred) for pred in pred_1_list]
pred_2_path = os.path.join(save_dir, patient_id, model_name_2)
pred_2_list = [pred_mask for pred_mask in os.listdir(pred_2_path) if pred_mask.endswith('.gz')]
pred_2_dir_list = [os.path.join(pred_2_path, pred) for pred in pred_2_list]
time_dict = patient_dict[patient_id]
time_dict = sorted(time_dict.items(), key=lambda item:item[0]) # sort according to time
time_points = len(time_dict)
labels = [0, 1, 2, 3, 4]
label_str = ['BG', 'CSF', 'GM', 'WM', 'TM']
fig = plt.figure(figsize=(15, 5 * time_points))
# plt.subplots_adjust(wspace=0.5, hspace=0.5)
grid = plt.GridSpec(time_points, 3, wspace=0.3, hspace=0.3)
for k in range(time_points-1):
mask_pre = sitk.GetArrayFromImage(sitk.ReadImage(time_dict[k][1]['combined_fast']))
predicted_1_mask_pre = sitk.GetArrayFromImage(sitk.ReadImage(pred_1_dir_list[k]))
predicted_2_mask_pre = sitk.GetArrayFromImage(sitk.ReadImage(pred_2_dir_list[k]))
mask_later = sitk.GetArrayFromImage(sitk.ReadImage(time_dict[k+1][1]['combined_fast']))
predicted_1_mask_later = sitk.GetArrayFromImage(sitk.ReadImage(pred_1_dir_list[k+1]))
predicted_2_mask_later = sitk.GetArrayFromImage(sitk.ReadImage(pred_2_dir_list[k+1]))
corr_mask = np.zeros((len(labels), len(labels)))
corr_pred_1 = np.zeros((len(labels), len(labels)))
corr_pred_2 = np.zeros((len(labels), len(labels)))
for i, pre in enumerate(labels):
for j, later in enumerate(labels):
corr_mask[i, j] = np.sum(mask_later[mask_pre == pre] == later)
corr_pred_1[i, j] = np.sum(predicted_1_mask_later[predicted_1_mask_pre == pre] == later)
corr_pred_2[i, j] = np.sum(predicted_2_mask_later[predicted_2_mask_pre == pre] == later)
df_mask = pd.DataFrame(corr_mask, columns=label_str, index=label_str)
df_pred_1 = pd.DataFrame(corr_pred_1, columns=label_str, index=label_str)
df_pred_2 = pd.DataFrame(corr_pred_2, columns=label_str, index=label_str)
norm_mask = df_mask.div(df_mask.sum(axis=1), axis=0)
norm_pred_1 = df_pred_1.div(df_pred_1.sum(axis=1), axis=0)
norm_pred_2 = df_pred_2.div(df_pred_2.sum(axis=1), axis=0)
ax1 = plt.subplot(grid[k, 0])
ax1.imshow(norm_mask, aspect='equal', cmap="YlGn")
ax1.set_xticks(np.arange(len(label_str)))
ax1.set_yticks(np.arange(len(label_str)))
ax1.set_xticklabels(label_str)
ax1.set_yticklabels(label_str)
ax1.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
for edge, spine in ax1.spines.items():
spine.set_visible(False)
ax1.set_xticks(np.arange(len(label_str)+1) - 0.5, minor=True)
ax1.set_yticks(np.arange(len(label_str)+1) - 0.5, minor=True)
ax1.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax1.tick_params(which="minor", bottom=False, left=False)
ax1.set_title('Mask labels transition from t{} to t{}'.format(k, k+1), fontsize=12)
ax2 = plt.subplot(grid[k, 1])
ax2.imshow(norm_pred_1, aspect='equal', cmap="YlGn")
ax2.set_xticks(np.arange(len(label_str)))
ax2.set_yticks(np.arange(len(label_str)))
ax2.set_xticklabels(label_str)
ax2.set_yticklabels(label_str)
ax2.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
for edge, spine in ax2.spines.items():
spine.set_visible(False)
ax2.set_xticks(np.arange(len(label_str)+1) - 0.5, minor=True)
ax2.set_yticks(np.arange(len(label_str)+1) - 0.5, minor=True)
ax2.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax2.tick_params(which="minor", bottom=False, left=False)
ax2.set_title('Longitudinal predicted labels transition from t{} to t{}'.format(k, k+1), fontsize=12)
ax3 = plt.subplot(grid[k, 2])
ax3.imshow(norm_pred_2, aspect='equal', cmap="YlGn")
ax3.set_xticks(np.arange(len(label_str)))
ax3.set_yticks(np.arange(len(label_str)))
ax3.set_xticklabels(label_str)
ax3.set_yticklabels(label_str)
ax3.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
for edge, spine in ax3.spines.items():
spine.set_visible(False)
ax3.set_xticks(np.arange(len(label_str)+1) - 0.5, minor=True)
ax3.set_yticks(np.arange(len(label_str)+1) - 0.5, minor=True)
ax3.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax3.tick_params(which="minor", bottom=False, left=False)
ax3.set_title('U-Net predicted labels transition from t{} to t{}'.format(k, k+1), fontsize=12)
plt.suptitle('Transition matrix of patient {}'.format(patient_id), fontsize=20)
fig.savefig(os.path.join(ANALYSIS_DIR, 'transition_matrix_{}.png'.format(patient_id)), dpi=150)
def transition_heatmap(patient_dict, patient_id, model_name_1, model_name_2, save_dir='inference_result'):
pred_1_path = os.path.join(save_dir, patient_id, model_name_1)
pred_1_list = [pred_mask for pred_mask in os.listdir(pred_1_path) if pred_mask.endswith('.gz')]
pred_1_dir_list = [os.path.join(pred_1_path, pred) for pred in pred_1_list]
pred_2_path = os.path.join(save_dir, patient_id, model_name_2)
pred_2_list = [pred_mask for pred_mask in os.listdir(pred_2_path) if pred_mask.endswith('.gz')]
pred_2_dir_list = [os.path.join(pred_2_path, pred) for pred in pred_2_list]
time_dict = patient_dict[patient_id]
time_dict = sorted(time_dict.items(), key=lambda item:item[0]) # sort according to time
time_points = len(time_dict)
fig = plt.figure(figsize=(15, 5 * time_points))
grid = plt.GridSpec(time_points, 3, wspace=0.4, hspace=0.3)
for k in range(time_points-1):
mask_pre = sitk.GetArrayFromImage(sitk.ReadImage(time_dict[k][1]['combined_fast']))
predicted_mask_1_pre = sitk.GetArrayFromImage(sitk.ReadImage(pred_1_dir_list[k]))
predicted_mask_2_pre = sitk.GetArrayFromImage(sitk.ReadImage(pred_2_dir_list[k]))
mask_later = sitk.GetArrayFromImage(sitk.ReadImage(time_dict[k+1][1]['combined_fast']))
predicted_mask_1_later = sitk.GetArrayFromImage(sitk.ReadImage(pred_1_dir_list[k+1]))
predicted_mask_2_later = sitk.GetArrayFromImage(sitk.ReadImage(pred_2_dir_list[k+1]))
if k == 0:
loc = np.where(mask_pre[0]==4)
num_tumor = len(loc[0])
if num_tumor > 0:
idx = np.random.choice(range(num_tumor), 1, replace=False)
c_z = int(loc[0][idx])
else:
c_z = mask_pre.shape[-3]//2
mask_pre = mask_pre[c_z]
predicted_mask_1_pre = predicted_mask_1_pre[c_z]
predicted_mask_2_pre = predicted_mask_2_pre[c_z]
mask_later = mask_later[c_z]
predicted_mask_1_later = predicted_mask_1_later[c_z]
predicted_mask_2_later = predicted_mask_2_later[c_z]
mask_transition_heatmap = np.zeros_like(mask_pre)
pred_mask_1_transition_heatmap = np.zeros_like(predicted_mask_1_pre)
pred_mask_2_transition_heatmap = np.zeros_like(predicted_mask_2_pre)
mask_transition_heatmap[mask_pre != mask_later] = 1
pred_mask_1_transition_heatmap[predicted_mask_1_pre != predicted_mask_1_later] = 1
pred_mask_2_transition_heatmap[predicted_mask_2_pre != predicted_mask_2_later] = 1
ax1 = plt.subplot(grid[k, 0])
ax1.imshow(mask_transition_heatmap)
ax1.set_title('Mask labels transition from t{} to t{}'.format(k, k+1), fontsize=12)
ax1.axis('off')
ax2 = plt.subplot(grid[k, 1])
ax2.imshow(pred_mask_1_transition_heatmap)
ax2.set_title('longitudinal Predicted labels transition from t{} to t{}'.format(k, k+1), fontsize=12)
ax2.axis('off')
ax3 = plt.subplot(grid[k, 2])
ax3.imshow(pred_mask_2_transition_heatmap)
ax3.set_title('UNet Predicted labels transition from t{} to t{}'.format(k, k+1), fontsize=12)
ax3.axis('off')
plt.suptitle('Transition heatmap of patient {}'.format(patient_id), fontsize=20)
fig.savefig(os.path.join(ANALYSIS_DIR, 'transition_heatmap_{}.png'.format(patient_id)), dpi=150)
if __name__ == '__main__':
labels = [0, 1, 2, 3, 4]
data_class = data_split()
train, val, test = data_construction(data_class)
test_dict = time_parser(test)
model_name_1 = 'CenterNormalBiLSTM1layer-unet-p64-4x3-halfpretrain'
model_name_2 = 'UNet-p64-b4-newdata-oriinput'
longitudinal_plot(MODEL_1, MODEL_2)
transition_matrix(test_dict, PATIENT, MODEL_1, MODEL_2)
transition_heatmap(test_dict, PATIENT, MODEL_1, MODEL_2)
| 44.183246
| 154
| 0.647707
|
462b60efd7f3d2d6f29223bae3f05d27e357126d
| 36,035
|
py
|
Python
|
ibis/backends/pandas/execution/generic.py
|
rohankumardubey/ibis
|
e416dcfdb32792ffeb6f5214b361872582aa8795
|
[
"Apache-2.0"
] | 986
|
2017-06-07T07:33:01.000Z
|
2022-03-31T13:00:46.000Z
|
ibis/backends/pandas/execution/generic.py
|
rohankumardubey/ibis
|
e416dcfdb32792ffeb6f5214b361872582aa8795
|
[
"Apache-2.0"
] | 2,623
|
2017-06-07T18:29:11.000Z
|
2022-03-31T20:27:31.000Z
|
ibis/backends/pandas/execution/generic.py
|
gerrymanoim/ibis
|
37616bd3df0599f33b28101ca1c19e0c0003cf4d
|
[
"Apache-2.0"
] | 238
|
2017-06-26T19:02:58.000Z
|
2022-03-31T15:18:29.000Z
|
"""Execution rules for generic ibis operations."""
import collections
import datetime
import decimal
import functools
import math
import numbers
import operator
from collections.abc import Sized
from typing import Dict, Optional
import numpy as np
import pandas as pd
import toolz
from pandas.api.types import DatetimeTZDtype
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.types as ir
from ibis.expr.scope import Scope
from ibis.expr.timecontext import get_time_col
from ibis.expr.typing import TimeContext
from .. import Backend as PandasBackend
from .. import aggcontext as agg_ctx
from ..client import PandasTable
from ..core import (
boolean_types,
execute,
fixed_width_types,
floating_types,
integer_types,
numeric_types,
scalar_types,
simple_types,
timedelta_types,
timestamp_types,
)
from ..dispatch import execute_literal, execute_node
from ..execution import constants
from ..execution.util import coerce_to_output
# By default return the literal value
@execute_literal.register(ops.Literal, object, dt.DataType)
def execute_node_literal_value_datatype(op, value, datatype, **kwargs):
return value
# Because True and 1 hash to the same value, if we have True or False in scope
# keys while executing anything that should evaluate to 1 or 0 evaluates to
# True or False respectively. This is a hack to work around that by casting the
# bool to an integer.
@execute_literal.register(ops.Literal, object, dt.Integer)
def execute_node_literal_any_integer_datatype(op, value, datatype, **kwargs):
return int(value)
@execute_literal.register(ops.Literal, object, dt.Boolean)
def execute_node_literal_any_boolean_datatype(op, value, datatype, **kwargs):
return bool(value)
@execute_literal.register(ops.Literal, object, dt.Floating)
def execute_node_literal_any_floating_datatype(op, value, datatype, **kwargs):
return float(value)
@execute_literal.register(ops.Literal, object, dt.Array)
def execute_node_literal_any_array_datatype(op, value, datatype, **kwargs):
return np.array(value)
@execute_literal.register(ops.Literal, dt.DataType)
def execute_node_literal_datatype(op, datatype, **kwargs):
return op.value
@execute_literal.register(
ops.Literal, timedelta_types + (str,) + integer_types, dt.Interval
)
def execute_interval_literal(op, value, dtype, **kwargs):
return pd.Timedelta(value, dtype.unit)
@execute_node.register(ops.Limit, pd.DataFrame, integer_types, integer_types)
def execute_limit_frame(op, data, nrows, offset, **kwargs):
return data.iloc[offset : offset + nrows]
@execute_node.register(ops.Cast, SeriesGroupBy, dt.DataType)
def execute_cast_series_group_by(op, data, type, **kwargs):
result = execute_cast_series_generic(op, data.obj, type, **kwargs)
return result.groupby(data.grouper.groupings)
@execute_node.register(ops.Cast, pd.Series, dt.DataType)
def execute_cast_series_generic(op, data, type, **kwargs):
return data.astype(constants.IBIS_TYPE_TO_PANDAS_TYPE[type])
@execute_node.register(ops.Cast, pd.Series, dt.Array)
def execute_cast_series_array(op, data, type, **kwargs):
value_type = type.value_type
numpy_type = constants.IBIS_TYPE_TO_PANDAS_TYPE.get(value_type, None)
if numpy_type is None:
raise ValueError(
'Array value type must be a primitive type '
'(e.g., number, string, or timestamp)'
)
return data.map(
lambda array, numpy_type=numpy_type: array.astype(numpy_type)
)
@execute_node.register(ops.Cast, pd.Series, dt.Timestamp)
def execute_cast_series_timestamp(op, data, type, **kwargs):
arg = op.arg
from_type = arg.type()
if from_type.equals(type): # noop cast
return data
tz = type.timezone
if isinstance(from_type, (dt.Timestamp, dt.Date)):
return data.astype(
'M8[ns]' if tz is None else DatetimeTZDtype('ns', tz)
)
if isinstance(from_type, (dt.String, dt.Integer)):
timestamps = pd.to_datetime(data.values, infer_datetime_format=True)
if getattr(timestamps.dtype, "tz", None) is not None:
method_name = "tz_convert"
else:
method_name = "tz_localize"
method = getattr(timestamps, method_name)
timestamps = method(tz)
return pd.Series(timestamps, index=data.index, name=data.name)
raise TypeError(f"Don't know how to cast {from_type} to {type}")
def _normalize(values, original_index, name, timezone=None):
index = pd.DatetimeIndex(values, tz=timezone)
return pd.Series(index.normalize(), index=original_index, name=name)
@execute_node.register(ops.Cast, pd.Series, dt.Date)
def execute_cast_series_date(op, data, type, **kwargs):
arg = op.args[0]
from_type = arg.type()
if from_type.equals(type):
return data
if isinstance(from_type, dt.Timestamp):
return _normalize(
data.values, data.index, data.name, timezone=from_type.timezone
)
if from_type.equals(dt.string):
values = data.values
datetimes = pd.to_datetime(values, infer_datetime_format=True)
try:
datetimes = datetimes.tz_convert(None)
except TypeError:
pass
dates = _normalize(datetimes, data.index, data.name)
return pd.Series(dates, index=data.index, name=data.name)
if isinstance(from_type, dt.Integer):
return pd.Series(
pd.to_datetime(data.values, unit='D').values,
index=data.index,
name=data.name,
)
raise TypeError(f"Don't know how to cast {from_type} to {type}")
@execute_node.register(ops.SortKey, pd.Series, bool)
def execute_sort_key_series_bool(op, data, ascending, **kwargs):
return data
def call_numpy_ufunc(func, op, data, **kwargs):
if data.dtype == np.dtype(np.object_):
return data.apply(functools.partial(execute_node, op, **kwargs))
return func(data)
@execute_node.register(ops.Negate, fixed_width_types + timedelta_types)
def execute_obj_negate(op, data, **kwargs):
return -data
@execute_node.register(ops.Negate, pd.Series)
def execute_series_negate(op, data, **kwargs):
return call_numpy_ufunc(np.negative, op, data, **kwargs)
@execute_node.register(ops.Negate, SeriesGroupBy)
def execute_series_group_by_negate(op, data, **kwargs):
return execute_series_negate(op, data.obj, **kwargs).groupby(
data.grouper.groupings
)
@execute_node.register(ops.UnaryOp, pd.Series)
def execute_series_unary_op(op, data, **kwargs):
function = getattr(np, type(op).__name__.lower())
return call_numpy_ufunc(function, op, data, **kwargs)
@execute_node.register((ops.Ceil, ops.Floor), pd.Series)
def execute_series_ceil(op, data, **kwargs):
return_type = np.object_ if data.dtype == np.object_ else np.int64
func = getattr(np, type(op).__name__.lower())
return call_numpy_ufunc(func, op, data, **kwargs).astype(return_type)
def vectorize_object(op, arg, *args, **kwargs):
func = np.vectorize(functools.partial(execute_node, op, **kwargs))
return pd.Series(func(arg, *args), index=arg.index, name=arg.name)
@execute_node.register(
ops.Log, pd.Series, (pd.Series, numbers.Real, decimal.Decimal, type(None))
)
def execute_series_log_with_base(op, data, base, **kwargs):
if data.dtype == np.dtype(np.object_):
return vectorize_object(op, data, base, **kwargs)
if base is None:
return np.log(data)
return np.log(data) / np.log(base)
@execute_node.register(ops.Ln, pd.Series)
def execute_series_natural_log(op, data, **kwargs):
if data.dtype == np.dtype(np.object_):
return data.apply(functools.partial(execute_node, op, **kwargs))
return np.log(data)
@execute_node.register(
ops.Clip,
pd.Series,
(pd.Series, type(None)) + numeric_types,
(pd.Series, type(None)) + numeric_types,
)
def execute_series_clip(op, data, lower, upper, **kwargs):
return data.clip(lower=lower, upper=upper)
@execute_node.register(ops.Quantile, (pd.Series, SeriesGroupBy), numeric_types)
def execute_series_quantile(op, data, quantile, aggcontext=None, **kwargs):
return aggcontext.agg(
data, 'quantile', q=quantile, interpolation=op.interpolation
)
@execute_node.register(ops.MultiQuantile, pd.Series, np.ndarray)
def execute_series_quantile_multi(
op, data, quantile, aggcontext=None, **kwargs
):
result = aggcontext.agg(
data, 'quantile', q=quantile, interpolation=op.interpolation
)
return np.array(result)
@execute_node.register(ops.MultiQuantile, SeriesGroupBy, np.ndarray)
def execute_series_quantile_multi_groupby(
op, data, quantile, aggcontext=None, **kwargs
):
def q(x, quantile, interpolation):
result = x.quantile(quantile, interpolation=interpolation).tolist()
res = [result for _ in range(len(x))]
return res
result = aggcontext.agg(data, q, quantile, op.interpolation)
return result
@execute_node.register(ops.Cast, type(None), dt.DataType)
def execute_cast_null_to_anything(op, data, type, **kwargs):
return None
@execute_node.register(ops.Cast, datetime.datetime, dt.String)
def execute_cast_datetime_or_timestamp_to_string(op, data, type, **kwargs):
"""Cast timestamps to strings"""
return str(data)
@execute_node.register(ops.Cast, datetime.datetime, dt.Int64)
def execute_cast_datetime_to_integer(op, data, type, **kwargs):
"""Cast datetimes to integers"""
return pd.Timestamp(data).value
@execute_node.register(ops.Cast, pd.Timestamp, dt.Int64)
def execute_cast_timestamp_to_integer(op, data, type, **kwargs):
"""Cast timestamps to integers"""
return data.value
@execute_node.register(ops.Cast, (np.bool_, bool), dt.Timestamp)
def execute_cast_bool_to_timestamp(op, data, type, **kwargs):
raise TypeError(
'Casting boolean values to timestamps does not make sense. If you '
'really want to cast boolean values to timestamps please cast to '
'int64 first then to timestamp: '
"value.cast('int64').cast('timestamp')"
)
@execute_node.register(ops.Cast, (np.bool_, bool), dt.Interval)
def execute_cast_bool_to_interval(op, data, type, **kwargs):
raise TypeError(
'Casting boolean values to intervals does not make sense. If you '
'really want to cast boolean values to intervals please cast to '
'int64 first then to interval: '
"value.cast('int64').cast(ibis.expr.datatypes.Interval(...))"
)
@execute_node.register(ops.Cast, integer_types + (str,), dt.Timestamp)
def execute_cast_simple_literal_to_timestamp(op, data, type, **kwargs):
"""Cast integer and strings to timestamps"""
return pd.Timestamp(data, tz=type.timezone)
@execute_node.register(ops.Cast, pd.Timestamp, dt.Timestamp)
def execute_cast_timestamp_to_timestamp(op, data, type, **kwargs):
"""Cast timestamps to other timestamps including timezone if necessary"""
input_timezone = data.tz
target_timezone = type.timezone
if input_timezone == target_timezone:
return data
if input_timezone is None or target_timezone is None:
return data.tz_localize(target_timezone)
return data.tz_convert(target_timezone)
@execute_node.register(ops.Cast, datetime.datetime, dt.Timestamp)
def execute_cast_datetime_to_datetime(op, data, type, **kwargs):
return execute_cast_timestamp_to_timestamp(
op, data, type, **kwargs
).to_pydatetime()
@execute_node.register(ops.Cast, fixed_width_types + (str,), dt.DataType)
def execute_cast_string_literal(op, data, type, **kwargs):
try:
cast_function = constants.IBIS_TO_PYTHON_LITERAL_TYPES[type]
except KeyError:
raise TypeError(f"Don't know how to cast {data!r} to type {type}")
else:
return cast_function(data)
@execute_node.register(ops.Round, scalar_types, (int, type(None)))
def execute_round_scalars(op, data, places, **kwargs):
return round(data, places) if places else round(data)
@execute_node.register(
ops.Round, pd.Series, (pd.Series, np.integer, type(None), int)
)
def execute_round_series(op, data, places, **kwargs):
if data.dtype == np.dtype(np.object_):
return vectorize_object(op, data, places, **kwargs)
result = data.round(places or 0)
return result if places else result.astype('int64')
@execute_node.register(ops.TableColumn, (pd.DataFrame, DataFrameGroupBy))
def execute_table_column_df_or_df_groupby(op, data, **kwargs):
return data[op.name]
@execute_node.register(ops.Aggregation, pd.DataFrame)
def execute_aggregation_dataframe(
op, data, scope=None, timecontext: Optional[TimeContext] = None, **kwargs
):
assert op.metrics, 'no metrics found during aggregation execution'
if op.sort_keys:
raise NotImplementedError(
'sorting on aggregations not yet implemented'
)
predicates = op.predicates
if predicates:
predicate = functools.reduce(
operator.and_,
(
execute(p, scope=scope, timecontext=timecontext, **kwargs)
for p in predicates
),
)
data = data.loc[predicate]
columns: Dict[str, str] = {}
if op.by:
grouping_key_pairs = list(
zip(op.by, map(operator.methodcaller('op'), op.by))
)
grouping_keys = [
by_op.name
if isinstance(by_op, ops.TableColumn)
else execute(
by, scope=scope, timecontext=timecontext, **kwargs
).rename(by.get_name())
for by, by_op in grouping_key_pairs
]
columns.update(
(by_op.name, by.get_name())
for by, by_op in grouping_key_pairs
if hasattr(by_op, 'name')
)
source = data.groupby(grouping_keys)
else:
source = data
scope = scope.merge_scope(Scope({op.table.op(): source}, timecontext))
pieces = [
coerce_to_output(
execute(metric, scope=scope, timecontext=timecontext, **kwargs),
metric,
)
for metric in op.metrics
]
result = pd.concat(pieces, axis=1)
# If grouping, need a reset to get the grouping key back as a column
if op.by:
result = result.reset_index()
result.columns = [columns.get(c, c) for c in result.columns]
if op.having:
# .having(...) is only accessible on groupby, so this should never
# raise
if not op.by:
raise ValueError(
'Filtering out aggregation values is not allowed without at '
'least one grouping key'
)
# TODO(phillipc): Don't recompute identical subexpressions
predicate = functools.reduce(
operator.and_,
(
execute(having, scope=scope, timecontext=timecontext, **kwargs)
for having in op.having
),
)
assert len(predicate) == len(
result
), 'length of predicate does not match length of DataFrame'
result = result.loc[predicate.values]
return result
@execute_node.register(ops.Reduction, SeriesGroupBy, type(None))
def execute_reduction_series_groupby(
op, data, mask, aggcontext=None, **kwargs
):
return aggcontext.agg(data, type(op).__name__.lower())
variance_ddof = {'pop': 0, 'sample': 1}
@execute_node.register(ops.Variance, SeriesGroupBy, type(None))
def execute_reduction_series_groupby_var(
op, data, _, aggcontext=None, **kwargs
):
return aggcontext.agg(data, 'var', ddof=variance_ddof[op.how])
@execute_node.register(ops.StandardDev, SeriesGroupBy, type(None))
def execute_reduction_series_groupby_std(
op, data, _, aggcontext=None, **kwargs
):
return aggcontext.agg(data, 'std', ddof=variance_ddof[op.how])
@execute_node.register(
(ops.CountDistinct, ops.HLLCardinality), SeriesGroupBy, type(None)
)
def execute_count_distinct_series_groupby(
op, data, _, aggcontext=None, **kwargs
):
return aggcontext.agg(data, 'nunique')
@execute_node.register(ops.Arbitrary, SeriesGroupBy, type(None))
def execute_arbitrary_series_groupby(op, data, _, aggcontext=None, **kwargs):
how = op.how
if how is None:
how = 'first'
if how not in {'first', 'last'}:
raise com.OperationNotDefinedError(
f'Arbitrary {how!r} is not supported'
)
return aggcontext.agg(data, how)
def _filtered_reduction(mask, method, data):
return method(data[mask[data.index]])
@execute_node.register(ops.Reduction, SeriesGroupBy, SeriesGroupBy)
def execute_reduction_series_gb_mask(
op, data, mask, aggcontext=None, **kwargs
):
method = operator.methodcaller(type(op).__name__.lower())
return aggcontext.agg(
data, functools.partial(_filtered_reduction, mask.obj, method)
)
@execute_node.register(
(ops.CountDistinct, ops.HLLCardinality), SeriesGroupBy, SeriesGroupBy
)
def execute_count_distinct_series_groupby_mask(
op, data, mask, aggcontext=None, **kwargs
):
return aggcontext.agg(
data,
functools.partial(_filtered_reduction, mask.obj, pd.Series.nunique),
)
@execute_node.register(ops.Variance, SeriesGroupBy, SeriesGroupBy)
def execute_var_series_groupby_mask(op, data, mask, aggcontext=None, **kwargs):
return aggcontext.agg(
data,
lambda x, mask=mask.obj, ddof=variance_ddof[op.how]: (
x[mask[x.index]].var(ddof=ddof)
),
)
@execute_node.register(ops.StandardDev, SeriesGroupBy, SeriesGroupBy)
def execute_std_series_groupby_mask(op, data, mask, aggcontext=None, **kwargs):
return aggcontext.agg(
data,
lambda x, mask=mask.obj, ddof=variance_ddof[op.how]: (
x[mask[x.index]].std(ddof=ddof)
),
)
@execute_node.register(ops.Count, DataFrameGroupBy, type(None))
def execute_count_frame_groupby(op, data, _, **kwargs):
result = data.size()
# FIXME(phillipc): We should not hard code this column name
result.name = 'count'
return result
@execute_node.register(ops.Reduction, pd.Series, (pd.Series, type(None)))
def execute_reduction_series_mask(op, data, mask, aggcontext=None, **kwargs):
operand = data[mask] if mask is not None else data
return aggcontext.agg(operand, type(op).__name__.lower())
@execute_node.register(
(ops.CountDistinct, ops.HLLCardinality), pd.Series, (pd.Series, type(None))
)
def execute_count_distinct_series_mask(
op, data, mask, aggcontext=None, **kwargs
):
return aggcontext.agg(data[mask] if mask is not None else data, 'nunique')
@execute_node.register(ops.Arbitrary, pd.Series, (pd.Series, type(None)))
def execute_arbitrary_series_mask(op, data, mask, aggcontext=None, **kwargs):
if op.how == 'first':
index = 0
elif op.how == 'last':
index = -1
else:
raise com.OperationNotDefinedError(
f'Arbitrary {op.how!r} is not supported'
)
data = data[mask] if mask is not None else data
return data.iloc[index]
@execute_node.register(ops.StandardDev, pd.Series, (pd.Series, type(None)))
def execute_standard_dev_series(op, data, mask, aggcontext=None, **kwargs):
return aggcontext.agg(
data[mask] if mask is not None else data,
'std',
ddof=variance_ddof[op.how],
)
@execute_node.register(ops.Variance, pd.Series, (pd.Series, type(None)))
def execute_variance_series(op, data, mask, aggcontext=None, **kwargs):
return aggcontext.agg(
data[mask] if mask is not None else data,
'var',
ddof=variance_ddof[op.how],
)
@execute_node.register((ops.Any, ops.All), (pd.Series, SeriesGroupBy))
def execute_any_all_series(op, data, aggcontext=None, **kwargs):
if isinstance(aggcontext, (agg_ctx.Summarize, agg_ctx.Transform)):
result = aggcontext.agg(data, type(op).__name__.lower())
else:
result = aggcontext.agg(
data, lambda data: getattr(data, type(op).__name__.lower())()
)
try:
return result.astype(bool)
except TypeError:
return result
@execute_node.register(ops.NotAny, (pd.Series, SeriesGroupBy))
def execute_notany_series(op, data, aggcontext=None, **kwargs):
if isinstance(aggcontext, (agg_ctx.Summarize, agg_ctx.Transform)):
result = ~(aggcontext.agg(data, 'any'))
else:
result = aggcontext.agg(data, lambda data: ~(data.any()))
try:
return result.astype(bool)
except TypeError:
return result
@execute_node.register(ops.NotAll, (pd.Series, SeriesGroupBy))
def execute_notall_series(op, data, aggcontext=None, **kwargs):
if isinstance(aggcontext, (agg_ctx.Summarize, agg_ctx.Transform)):
result = ~(aggcontext.agg(data, 'all'))
else:
result = aggcontext.agg(data, lambda data: ~(data.all()))
try:
return result.astype(bool)
except TypeError:
return result
@execute_node.register(ops.Count, pd.DataFrame, type(None))
def execute_count_frame(op, data, _, **kwargs):
return len(data)
@execute_node.register(ops.Not, (bool, np.bool_))
def execute_not_bool(op, data, **kwargs):
return not data
@execute_node.register(ops.BinaryOp, pd.Series, pd.Series)
@execute_node.register(
(ops.NumericBinaryOp, ops.LogicalBinaryOp, ops.Comparison),
numeric_types,
pd.Series,
)
@execute_node.register(
(ops.NumericBinaryOp, ops.LogicalBinaryOp, ops.Comparison),
pd.Series,
numeric_types,
)
@execute_node.register(
(ops.NumericBinaryOp, ops.LogicalBinaryOp, ops.Comparison),
numeric_types,
numeric_types,
)
@execute_node.register((ops.Comparison, ops.Add, ops.Multiply), pd.Series, str)
@execute_node.register((ops.Comparison, ops.Add, ops.Multiply), str, pd.Series)
@execute_node.register((ops.Comparison, ops.Add), str, str)
@execute_node.register(ops.Multiply, integer_types, str)
@execute_node.register(ops.Multiply, str, integer_types)
@execute_node.register(ops.Comparison, pd.Series, timestamp_types)
@execute_node.register(ops.Comparison, timestamp_types, pd.Series)
def execute_binary_op(op, left, right, **kwargs):
op_type = type(op)
try:
operation = constants.BINARY_OPERATIONS[op_type]
except KeyError:
raise NotImplementedError(
f'Binary operation {op_type.__name__} not implemented'
)
else:
return operation(left, right)
@execute_node.register(ops.BinaryOp, SeriesGroupBy, SeriesGroupBy)
def execute_binary_op_series_group_by(op, left, right, **kwargs):
left_groupings = left.grouper.groupings
right_groupings = right.grouper.groupings
if left_groupings != right_groupings:
raise ValueError(
'Cannot perform {} operation on two series with '
'different groupings'.format(type(op).__name__)
)
result = execute_binary_op(op, left.obj, right.obj, **kwargs)
return result.groupby(left_groupings)
@execute_node.register(ops.BinaryOp, SeriesGroupBy, simple_types)
def execute_binary_op_series_gb_simple(op, left, right, **kwargs):
result = execute_binary_op(op, left.obj, right, **kwargs)
return result.groupby(left.grouper.groupings)
@execute_node.register(ops.BinaryOp, simple_types, SeriesGroupBy)
def execute_binary_op_simple_series_gb(op, left, right, **kwargs):
result = execute_binary_op(op, left, right.obj, **kwargs)
return result.groupby(right.grouper.groupings)
@execute_node.register(ops.UnaryOp, SeriesGroupBy)
def execute_unary_op_series_gb(op, operand, **kwargs):
result = execute_node(op, operand.obj, **kwargs)
return result.groupby(operand.grouper.groupings)
@execute_node.register(
(ops.Log, ops.Round),
SeriesGroupBy,
(numbers.Real, decimal.Decimal, type(None)),
)
def execute_log_series_gb_others(op, left, right, **kwargs):
result = execute_node(op, left.obj, right, **kwargs)
return result.groupby(left.grouper.groupings)
@execute_node.register((ops.Log, ops.Round), SeriesGroupBy, SeriesGroupBy)
def execute_log_series_gb_series_gb(op, left, right, **kwargs):
result = execute_node(op, left.obj, right.obj, **kwargs)
return result.groupby(left.grouper.groupings)
@execute_node.register(ops.Not, pd.Series)
def execute_not_series(op, data, **kwargs):
return ~data
@execute_node.register(ops.NullIfZero, pd.Series)
def execute_null_if_zero_series(op, data, **kwargs):
return data.where(data != 0, np.nan)
@execute_node.register(ops.StringSplit, pd.Series, (pd.Series, str))
def execute_string_split(op, data, delimiter, **kwargs):
# Doing the iteration using `map` is much faster than doing the iteration
# using `Series.apply` due to Pandas-related overhead.
return pd.Series(map(lambda s: np.array(s.split(delimiter)), data))
@execute_node.register(
ops.Between,
pd.Series,
(pd.Series, numbers.Real, str, datetime.datetime),
(pd.Series, numbers.Real, str, datetime.datetime),
)
def execute_between(op, data, lower, upper, **kwargs):
return data.between(lower, upper)
@execute_node.register(ops.DistinctColumn, pd.Series)
def execute_series_distinct(op, data, **kwargs):
return pd.Series(data.unique(), name=data.name)
@execute_node.register(ops.Union, pd.DataFrame, pd.DataFrame, bool)
def execute_union_dataframe_dataframe(
op, left: pd.DataFrame, right: pd.DataFrame, distinct, **kwargs
):
result = pd.concat([left, right], axis=0)
return result.drop_duplicates() if distinct else result
@execute_node.register(ops.Intersection, pd.DataFrame, pd.DataFrame)
def execute_intersection_dataframe_dataframe(
op, left: pd.DataFrame, right: pd.DataFrame, **kwargs
):
result = left.merge(right, on=list(left.columns), how="inner")
return result
@execute_node.register(ops.Difference, pd.DataFrame, pd.DataFrame)
def execute_difference_dataframe_dataframe(
op, left: pd.DataFrame, right: pd.DataFrame, **kwargs
):
merged = left.merge(
right, on=list(left.columns), how='outer', indicator=True
)
result = merged[merged["_merge"] != "both"].drop("_merge", 1)
return result
@execute_node.register(ops.IsNull, pd.Series)
def execute_series_isnull(op, data, **kwargs):
return data.isnull()
@execute_node.register(ops.NotNull, pd.Series)
def execute_series_notnnull(op, data, **kwargs):
return data.notnull()
@execute_node.register(ops.IsNan, (pd.Series, floating_types))
def execute_isnan(op, data, **kwargs):
return np.isnan(data)
@execute_node.register(ops.IsInf, (pd.Series, floating_types))
def execute_isinf(op, data, **kwargs):
return np.isinf(data)
@execute_node.register(ops.SelfReference, pd.DataFrame)
def execute_node_self_reference_dataframe(op, data, **kwargs):
return data
@execute_node.register(ops.ValueList, collections.abc.Sequence)
def execute_node_value_list(op, _, **kwargs):
return [execute(arg, **kwargs) for arg in op.values]
@execute_node.register(ops.StringConcat, collections.abc.Sequence)
def execute_node_string_concat(op, args, **kwargs):
return functools.reduce(operator.add, args)
@execute_node.register(ops.StringJoin, collections.abc.Sequence)
def execute_node_string_join(op, args, **kwargs):
return op.sep.join(args)
@execute_node.register(
ops.Contains, pd.Series, (collections.abc.Sequence, collections.abc.Set)
)
def execute_node_contains_series_sequence(op, data, elements, **kwargs):
return data.isin(elements)
@execute_node.register(
ops.NotContains, pd.Series, (collections.abc.Sequence, collections.abc.Set)
)
def execute_node_not_contains_series_sequence(op, data, elements, **kwargs):
return ~(data.isin(elements))
# Series, Series, Series
# Series, Series, scalar
@execute_node.register(ops.Where, pd.Series, pd.Series, pd.Series)
@execute_node.register(ops.Where, pd.Series, pd.Series, scalar_types)
def execute_node_where_series_series_series(op, cond, true, false, **kwargs):
# No need to turn false into a series, pandas will broadcast it
return true.where(cond, other=false)
# Series, scalar, Series
def execute_node_where_series_scalar_scalar(op, cond, true, false, **kwargs):
return pd.Series(np.repeat(true, len(cond))).where(cond, other=false)
# Series, scalar, scalar
for scalar_type in scalar_types:
execute_node_where_series_scalar_scalar = execute_node.register(
ops.Where, pd.Series, scalar_type, scalar_type
)(execute_node_where_series_scalar_scalar)
# scalar, Series, Series
@execute_node.register(ops.Where, boolean_types, pd.Series, pd.Series)
def execute_node_where_scalar_scalar_scalar(op, cond, true, false, **kwargs):
# Note that it is not necessary to check that true and false are also
# scalars. This allows users to do things like:
# ibis.where(even_or_odd_bool, [2, 4, 6], [1, 3, 5])
return true if cond else false
# scalar, scalar, scalar
for scalar_type in scalar_types:
execute_node_where_scalar_scalar_scalar = execute_node.register(
ops.Where, boolean_types, scalar_type, scalar_type
)(execute_node_where_scalar_scalar_scalar)
# scalar, Series, scalar
@execute_node.register(ops.Where, boolean_types, pd.Series, scalar_types)
def execute_node_where_scalar_series_scalar(op, cond, true, false, **kwargs):
return (
true
if cond
else pd.Series(np.repeat(false, len(true)), index=true.index)
)
# scalar, scalar, Series
@execute_node.register(ops.Where, boolean_types, scalar_types, pd.Series)
def execute_node_where_scalar_scalar_series(op, cond, true, false, **kwargs):
return pd.Series(np.repeat(true, len(false))) if cond else false
@execute_node.register(PandasTable, PandasBackend)
def execute_database_table_client(
op, client, timecontext: Optional[TimeContext], **kwargs
):
df = client.dictionary[op.name]
if timecontext:
begin, end = timecontext
time_col = get_time_col()
if time_col not in df:
raise com.IbisError(
f'Table {op.name} must have a time column named {time_col}'
' to execute with time context.'
)
# filter with time context
mask = df[time_col].between(begin, end)
return df.loc[mask].reset_index(drop=True)
return df
MATH_FUNCTIONS = {
ops.Floor: math.floor,
ops.Ln: math.log,
ops.Log2: lambda x: math.log(x, 2),
ops.Log10: math.log10,
ops.Exp: math.exp,
ops.Sqrt: math.sqrt,
ops.Abs: abs,
ops.Ceil: math.ceil,
ops.Sign: lambda x: 0 if not x else -1 if x < 0 else 1,
}
MATH_FUNCTION_TYPES = tuple(MATH_FUNCTIONS.keys())
@execute_node.register(MATH_FUNCTION_TYPES, numeric_types)
def execute_node_math_function_number(op, value, **kwargs):
return MATH_FUNCTIONS[type(op)](value)
@execute_node.register(ops.Log, numeric_types, numeric_types)
def execute_node_log_number_number(op, value, base, **kwargs):
return math.log(value, base)
@execute_node.register(ops.DropNa, pd.DataFrame)
def execute_node_dropna_dataframe(op, df, **kwargs):
subset = [col.get_name() for col in op.subset] if op.subset else None
return df.dropna(how=op.how, subset=subset)
@execute_node.register(ops.FillNa, pd.DataFrame, simple_types)
def execute_node_fillna_dataframe_scalar(op, df, replacements, **kwargs):
return df.fillna(replacements)
@execute_node.register(ops.FillNa, pd.DataFrame)
def execute_node_fillna_dataframe_dict(op, df, **kwargs):
return df.fillna(op.replacements)
@execute_node.register(ops.IfNull, pd.Series, simple_types)
@execute_node.register(ops.IfNull, pd.Series, pd.Series)
def execute_node_ifnull_series(op, value, replacement, **kwargs):
return value.fillna(replacement)
@execute_node.register(ops.IfNull, simple_types, pd.Series)
def execute_node_ifnull_scalar_series(op, value, replacement, **kwargs):
return (
replacement
if pd.isnull(value)
else pd.Series(value, index=replacement.index)
)
@execute_node.register(ops.IfNull, simple_types, simple_types)
def execute_node_if_scalars(op, value, replacement, **kwargs):
return replacement if pd.isnull(value) else value
@execute_node.register(ops.NullIf, simple_types, simple_types)
def execute_node_nullif_scalars(op, value1, value2, **kwargs):
return np.nan if value1 == value2 else value1
@execute_node.register(ops.NullIf, pd.Series, pd.Series)
def execute_node_nullif_series(op, series1, series2, **kwargs):
return series1.where(series1 != series2)
@execute_node.register(ops.NullIf, pd.Series, simple_types)
def execute_node_nullif_series_scalar(op, series, value, **kwargs):
return series.where(series != value)
@execute_node.register(ops.NullIf, simple_types, pd.Series)
def execute_node_nullif_scalar_series(op, value, series, **kwargs):
return pd.Series(
np.where(series.values == value, np.nan, value), index=series.index
)
def coalesce(values):
return functools.reduce(lambda x, y: x if not pd.isnull(x) else y, values)
@toolz.curry
def promote_to_sequence(length, obj):
return obj.values if isinstance(obj, pd.Series) else np.repeat(obj, length)
def compute_row_reduction(func, value, **kwargs):
final_sizes = {len(x) for x in value if isinstance(x, Sized)}
if not final_sizes:
return func(value)
(final_size,) = final_sizes
raw = func(list(map(promote_to_sequence(final_size), value)), **kwargs)
return pd.Series(raw).squeeze()
@execute_node.register(ops.Greatest, collections.abc.Sequence)
def execute_node_greatest_list(op, value, **kwargs):
return compute_row_reduction(np.maximum.reduce, value, axis=0)
@execute_node.register(ops.Least, collections.abc.Sequence)
def execute_node_least_list(op, value, **kwargs):
return compute_row_reduction(np.minimum.reduce, value, axis=0)
@execute_node.register(ops.Coalesce, collections.abc.Sequence)
def execute_node_coalesce(op, values, **kwargs):
# TODO: this is slow
return compute_row_reduction(coalesce, values)
def wrap_case_result(raw, expr):
"""Wrap a CASE statement result in a Series and handle returning scalars.
Parameters
----------
raw : ndarray[T]
The raw results of executing the ``CASE`` expression
expr : ValueExpr
The expression from the which `raw` was computed
Returns
-------
Union[scalar, Series]
"""
raw_1d = np.atleast_1d(raw)
if np.any(pd.isnull(raw_1d)):
result = pd.Series(raw_1d)
else:
result = pd.Series(
raw_1d, dtype=constants.IBIS_TYPE_TO_PANDAS_TYPE[expr.type()]
)
if result.size == 1 and isinstance(expr, ir.ScalarExpr):
return result.iloc[0].item()
return result
@execute_node.register(ops.SearchedCase, list, list, object)
def execute_searched_case(op, whens, thens, otherwise, **kwargs):
if otherwise is None:
otherwise = np.nan
raw = np.select(whens, thens, otherwise)
return wrap_case_result(raw, op.to_expr())
@execute_node.register(ops.SimpleCase, object, list, list, object)
def execute_simple_case_scalar(op, value, whens, thens, otherwise, **kwargs):
if otherwise is None:
otherwise = np.nan
raw = np.select(np.asarray(whens) == value, thens, otherwise)
return wrap_case_result(raw, op.to_expr())
@execute_node.register(ops.SimpleCase, pd.Series, list, list, object)
def execute_simple_case_series(op, value, whens, thens, otherwise, **kwargs):
if otherwise is None:
otherwise = np.nan
raw = np.select([value == when for when in whens], thens, otherwise)
return wrap_case_result(raw, op.to_expr())
@execute_node.register(ops.Distinct, pd.DataFrame)
def execute_distinct_dataframe(op, df, **kwargs):
return df.drop_duplicates()
@execute_node.register(ops.RowID)
def execute_rowid(op, *args, **kwargs):
raise com.UnsupportedOperationError(
'rowid is not supported in pandas backends'
)
| 32.231664
| 79
| 0.70784
|
e04a159af7d2e2392fcdf22023b73c84b1840206
| 764
|
py
|
Python
|
plugin/core/popups.py
|
gobijan/LSP
|
fe6267f7741b24c760f3ca9ec36697a43dab2684
|
[
"MIT"
] | null | null | null |
plugin/core/popups.py
|
gobijan/LSP
|
fe6267f7741b24c760f3ca9ec36697a43dab2684
|
[
"MIT"
] | null | null | null |
plugin/core/popups.py
|
gobijan/LSP
|
fe6267f7741b24c760f3ca9ec36697a43dab2684
|
[
"MIT"
] | null | null | null |
popup_class = "lsp_popup"
popup_css = '''
.lsp_popup {
margin: 0.5rem 0.5rem 0 0.5rem;
}
.lsp_popup .highlight {
border-width: 0;
border-radius: 0;
}
.lsp_popup p {
margin-bottom: 0.5rem;
padding: 0 0.5rem;
font-family: system;
}
.lsp_popup li {
font-family: system;
}
.lsp_popup .errors {
border-width: 0;
background-color: color(var(--redish) alpha(0.25));
color: --whitish;
margin-bottom: 0.5rem;
padding: 0.5rem;
}
.lsp_popup .warnings {
border-width: 0;
background-color: color(var(--yellowish) alpha(0.25));
color: --whitish;
margin-bottom: 0.5rem;
padding: 0.5rem;
}
'''
| 21.828571
| 62
| 0.524869
|
aec0b391e55de664082608f9435db14908f78107
| 4,180
|
py
|
Python
|
map_of_the_world.py
|
janiszewskibartlomiej/Hack4Change-join_a_meeting
|
d5d471e7cd8a405f92635e1c54d693019b7c21a3
|
[
"MIT"
] | null | null | null |
map_of_the_world.py
|
janiszewskibartlomiej/Hack4Change-join_a_meeting
|
d5d471e7cd8a405f92635e1c54d693019b7c21a3
|
[
"MIT"
] | null | null | null |
map_of_the_world.py
|
janiszewskibartlomiej/Hack4Change-join_a_meeting
|
d5d471e7cd8a405f92635e1c54d693019b7c21a3
|
[
"MIT"
] | null | null | null |
import folium
from folium import DivIcon
from data_processing import DataProcessing
from connect_to_db import ConnectToDb
class CreatingMap:
def __init__(self):
pass
def map_of_the_world(self):
title = "Hack4Change: <br> Activity Booster"
meetup_count = ConnectToDb().select_one_record(query="select count(*) from meetups;", parameter="")
cases_map = folium.Map(
location=[52.0, 20.0],
width="99%",
height="99%",
left="0%",
top="0%",
zoom_start=6.5,
max_zoom=9,
min_zoom=5,
titles=title,
attr="attribution",
)
folium.map.Marker(
[54.0, 26.0],
icon=DivIcon(
icon_size=(160, 200),
icon_anchor=(200, 40),
html=f'<div style="background-color:rgba(255, 255, 255, 0.4);">'
f'<center><div style="color: black; padding-top:2px;"><h4>{title}</h4></div>'
f'<img src="static/logo.png" alt="Logo" style="width:80px;">'
'<br>'
f'<h4 style="color: black;">Total meetups: <b style="color: red; padding-bottom:2px;"> {meetup_count[0]}</b></h4>'
f"</div>",
),
).add_to(cases_map)
data = DataProcessing().get_all_towns()
meetups = DataProcessing().get_all_meetups()
# print(data)
for row in data:
coordinates = DataProcessing().slice_location(row[2])
for item in meetups:
meetup_id = item[1]
town_id = item[0]
name_meetup = item[3]
create_time = item[2]
if town_id == row[0]:
folium.Marker(
location=[coordinates[1], coordinates[0]],
popup=folium.Popup(
html=f"""<div style="opacity:1.3;">
<b><center><p style="color:red; font-size:14px;">{row[1]}</p></center></b>
<center><p style="color:black; font-size:14px; margin-top:-.9em; margin-bottom:0.2em;">Meetups:</p></center>
<center><button type="button" class="btn btn-primary btn-sm" style="padding: 5px; margin-top:3px; margin-bottom: 3px;" onclick=window.open("/meetup={meetup_id}")>{name_meetup}</button></center>
<center><button type="button" class="btn btn-primary btn-sm" style="padding: 5px; margin-top:3px; margin-bottom: 3px;" onclick=window.open("/meetup={meetup_id}")>{name_meetup}</button></center>
<center><button type="button" class="btn btn-primary btn-sm" style="padding: 5px; margin-top:3px; margin-bottom: 3px;" onclick=window.open("/meetup={meetup_id}")>{name_meetup}</button></center>
<center><button type="button" class="btn btn-primary btn-sm" style=" padding: 5px; line-height: 1; border-color: red; margin-block-start: 0.9em; border-width: 2px;" onclick=window.open("/add-meetup")>{chr(128200)} New meetup</button></center>
</div>""",
max_width=140,
),
icon=folium.Icon(
color="green",
icon="certificate",
html="position: absolute; z-index: 1",
),
tooltip=f"""
<center>Click me</center>
""",
).add_to(cases_map)
color = "yelow"
folium.CircleMarker(
location=[coordinates[1], coordinates[0]],
radius=10,
color=f"{color}",
fill=True,
fill_color=f"{color}",
).add_to(cases_map)
cases_map.save("index.html") # only for github
cases_map.save("templates/index.html") # only for github
if __name__ == '__main__':
CreatingMap().map_of_the_world()
| 45.434783
| 270
| 0.494737
|
556b3746d636b351db8eb6ec8a74e89b4096e053
| 1,053
|
py
|
Python
|
text_files/filegen_BASE_21122.py
|
gthd/pawk
|
ef32cd0d5f6ed4abc57b414110d5bc2be5ae4c81
|
[
"Apache-2.0"
] | 3
|
2020-01-11T10:20:26.000Z
|
2020-01-13T12:54:21.000Z
|
text_files/filegen_BASE_21122.py
|
gthd/pawk
|
ef32cd0d5f6ed4abc57b414110d5bc2be5ae4c81
|
[
"Apache-2.0"
] | 17
|
2020-03-07T21:44:17.000Z
|
2020-12-29T20:14:59.000Z
|
text_files/filegen_BASE_20995.py
|
gthd/pawk
|
ef32cd0d5f6ed4abc57b414110d5bc2be5ae4c81
|
[
"Apache-2.0"
] | 1
|
2021-04-07T23:41:31.000Z
|
2021-04-07T23:41:31.000Z
|
import random
import string
class FileGenerator:
def genRandomDigits(self):
digits = "".join( [random.choice(string.digits) for i in range(random.randint(1,8))])
return digits
def genRandomChars(self):
chars = "".join( [random.choice(string.ascii_letters[:26]) for i in range(random.randint(5, 15))] )
return chars
def createFile(self):
randomChars = []
for i in range(10):
randomChars.append(self.genRandomChars())
randomDigits = []
for i in range(200):
randomDigits.append(self.genRandomDigits())
for i in range(250):
with open('dummydata.txt', 'a') as the_file:
charIndex = random.randint(0,9)
digfirstIndex = random.randint(0,199)
digsecondIndex = random.randint(0,199)
line = randomChars[charIndex] + " " + randomDigits[digfirstIndex] + " " + randomDigits[digsecondIndex] + "\n"
the_file.write(line)
file = FileGenerator()
file.createFile()
| 31.909091
| 125
| 0.598291
|
0d0d43afb3267d4780641fe432419f1c8bf4d0f4
| 17,206
|
py
|
Python
|
.eggs/pytest-3.6.1-py3.6.egg/_pytest/runner.py
|
dyspop/responsysrest
|
7b8e9edab1808f753be6383c7925529775a4fa89
|
[
"MIT"
] | 1
|
2018-09-19T15:08:08.000Z
|
2018-09-19T15:08:08.000Z
|
.eggs/pytest-3.6.1-py3.6.egg/_pytest/runner.py
|
dyspop/responsysrest
|
7b8e9edab1808f753be6383c7925529775a4fa89
|
[
"MIT"
] | 22
|
2018-04-23T13:52:20.000Z
|
2019-09-20T15:11:32.000Z
|
.eggs/pytest-3.6.1-py3.6.egg/_pytest/runner.py
|
dyspop/responsysrest
|
7b8e9edab1808f753be6383c7925529775a4fa89
|
[
"MIT"
] | 1
|
2020-04-28T17:03:40.000Z
|
2020-04-28T17:03:40.000Z
|
""" basic collect and runtest protocol implementations """
from __future__ import absolute_import, division, print_function
import bdb
import os
import sys
from time import time
import py
from _pytest._code.code import TerminalRepr, ExceptionInfo
from _pytest.outcomes import skip, Skipped, TEST_OUTCOME
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption(
"--durations",
action="store",
type=int,
default=None,
metavar="N",
help="show N slowest setup/test durations (N=0 for all).",
),
def pytest_terminal_summary(terminalreporter):
durations = terminalreporter.config.option.durations
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration)
dlist.reverse()
if not durations:
tr.write_sep("=", "slowest test durations")
else:
tr.write_sep("=", "slowest %s test durations" % durations)
dlist = dlist[:durations]
for rep in dlist:
nodeid = rep.nodeid.replace("::()::", "::")
tr.write_line("%02.2fs %-8s %s" % (rep.duration, rep.when, nodeid))
def pytest_sessionstart(session):
session._setupstate = SetupState()
def pytest_sessionfinish(session):
session._setupstate.teardown_all()
def pytest_runtest_protocol(item, nextitem):
item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
runtestprotocol(item, nextitem=nextitem)
item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
return True
def runtestprotocol(item, log=True, nextitem=None):
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request:
item._initrequest()
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
if item.config.option.setupshow:
show_test_item(item)
if not item.config.option.setuponly:
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log, nextitem=nextitem))
# after all teardown hooks have been called
# want funcargs and request info to go away
if hasrequest:
item._request = False
item.funcargs = None
return reports
def show_test_item(item):
"""Show test function, parameters and the fixtures of the test item."""
tw = item.config.get_terminal_writer()
tw.line()
tw.write(" " * 8)
tw.write(item._nodeid)
used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())
if used_fixtures:
tw.write(" (fixtures used: {})".format(", ".join(used_fixtures)))
def pytest_runtest_setup(item):
_update_current_test_var(item, "setup")
item.session._setupstate.prepare(item)
def pytest_runtest_call(item):
_update_current_test_var(item, "call")
sys.last_type, sys.last_value, sys.last_traceback = (None, None, None)
try:
item.runtest()
except Exception:
# Store trace info to allow postmortem debugging
type, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
del type, value, tb # Get rid of these in this frame
raise
def pytest_runtest_teardown(item, nextitem):
_update_current_test_var(item, "teardown")
item.session._setupstate.teardown_exact(item, nextitem)
_update_current_test_var(item, None)
def _update_current_test_var(item, when):
"""
Update PYTEST_CURRENT_TEST to reflect the current item and stage.
If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment.
"""
var_name = "PYTEST_CURRENT_TEST"
if when:
value = "{} ({})".format(item.nodeid, when)
# don't allow null bytes on environment variables (see #2644, #2957)
value = value.replace("\x00", "(null)")
os.environ[var_name] = value
else:
os.environ.pop(var_name)
def pytest_report_teststatus(report):
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
#
# Implementation
def call_and_report(item, when, log=True, **kwds):
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
hook.pytest_exception_interact(node=item, call=call, report=report)
return report
def check_interactive_exception(call, report):
return call.excinfo and not (
hasattr(report, "wasxfail")
or call.excinfo.errisinstance(skip.Exception)
or call.excinfo.errisinstance(bdb.BdbQuit)
)
def call_runtest_hook(item, when, **kwds):
hookname = "pytest_runtest_" + when
ihook = getattr(item.ihook, hookname)
return CallInfo(
lambda: ihook(item=item, **kwds),
when=when,
treat_keyboard_interrupt_as_exception=item.config.getvalue("usepdb"),
)
class CallInfo(object):
""" Result/Exception info a function invocation. """
#: None or ExceptionInfo object.
excinfo = None
def __init__(self, func, when, treat_keyboard_interrupt_as_exception=False):
#: context of invocation: one of "setup", "call",
#: "teardown", "memocollect"
self.when = when
self.start = time()
try:
self.result = func()
except KeyboardInterrupt:
if treat_keyboard_interrupt_as_exception:
self.excinfo = ExceptionInfo()
else:
self.stop = time()
raise
except: # noqa
self.excinfo = ExceptionInfo()
self.stop = time()
def __repr__(self):
if self.excinfo:
status = "exception: %s" % str(self.excinfo.value)
else:
status = "result: %r" % (self.result,)
return "<CallInfo when=%r %s>" % (self.when, status)
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d["version_info"][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d["id"],
d["sysplatform"],
ver,
d["executable"],
)
return s
class BaseReport(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
if hasattr(self, "node"):
out.line(getslaveinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, "toterminal"):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
def get_sections(self, prefix):
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
@property
def longreprtext(self):
"""
Read-only property that returns the full string representation
of ``longrepr``.
.. versionadded:: 3.0
"""
tw = py.io.TerminalWriter(stringio=True)
tw.hasmarkup = False
self.toterminal(tw)
exc = tw.stringio.getvalue()
return exc.strip()
@property
def caplog(self):
"""Return captured log lines, if log capturing is enabled
.. versionadded:: 3.5
"""
return "\n".join(
content for (prefix, content) in self.get_sections("Captured log")
)
@property
def capstdout(self):
"""Return captured text from stdout, if capturing is enabled
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stdout")
)
@property
def capstderr(self):
"""Return captured text from stderr, if capturing is enabled
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stderr")
)
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
def pytest_runtest_makereport(item, call):
when = call.when
duration = call.stop - call.start
keywords = {x: 1 for x in item.keywords}
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(
excinfo, style=item.config.option.tbstyle
)
for rwhen, key, content in item._report_sections:
sections.append(("Captured %s %s" % (key, rwhen), content))
return TestReport(
item.nodeid,
item.location,
keywords,
outcome,
longrepr,
when,
sections,
duration,
user_properties=item.user_properties,
)
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
def __init__(
self,
nodeid,
location,
keywords,
outcome,
longrepr,
when,
sections=(),
duration=0,
user_properties=(),
**extra
):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: user properties is a list of tuples (name, value) that holds user
#: defined properties of the test
self.user_properties = user_properties
#: list of pairs ``(str, str)`` of extra information which needs to
#: marshallable. Used by pytest to add captured text
#: from ``stdout`` and ``stderr``, but may be used by other plugins
#: to add arbitrary information to reports.
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<TestReport %r when=%r outcome=%r>" % (
self.nodeid,
self.when,
self.outcome,
)
class TeardownErrorReport(BaseReport):
outcome = "failed"
when = "teardown"
def __init__(self, longrepr, **extra):
self.longrepr = longrepr
self.sections = []
self.__dict__.update(extra)
def pytest_make_collect_report(collector):
call = CallInfo(lambda: list(collector.collect()), "collect")
longrepr = None
if not call.excinfo:
outcome = "passed"
else:
from _pytest import nose
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
if call.excinfo.errisinstance(skip_exceptions):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
rep = CollectReport(
collector.nodeid, outcome, longrepr, getattr(call, "result", None)
)
rep.call = call # see collect_one_node
return rep
class CollectReport(BaseReport):
def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid,
len(self.result),
self.outcome,
)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)
class SetupState(object):
""" shared state for setting up/tearing down test items or collectors. """
def __init__(self):
self.stack = []
self._finalizers = {}
def addfinalizer(self, finalizer, colitem):
""" attach a finalizer to the given colitem.
if colitem is None, this will add a finalizer that
is called at the end of teardown_all().
"""
assert colitem and not isinstance(colitem, tuple)
assert callable(finalizer)
# assert colitem in self.stack # some unit tests don't setup stack :/
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem):
finalizers = self._finalizers.pop(colitem, None)
exc = None
while finalizers:
fin = finalizers.pop()
try:
fin()
except TEST_OUTCOME:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = sys.exc_info()
if exc:
py.builtin._reraise(*exc)
def _teardown_with_finalization(self, colitem):
self._callfinalizers(colitem)
if hasattr(colitem, "teardown"):
colitem.teardown()
for colitem in self._finalizers:
assert (
colitem is None or colitem in self.stack or isinstance(colitem, tuple)
)
def teardown_all(self):
while self.stack:
self._pop_and_teardown()
for key in list(self._finalizers):
self._teardown_with_finalization(key)
assert not self._finalizers
def teardown_exact(self, item, nextitem):
needed_collectors = nextitem and nextitem.listchain() or []
self._teardown_towards(needed_collectors)
def _teardown_towards(self, needed_collectors):
while self.stack:
if self.stack == needed_collectors[: len(self.stack)]:
break
self._pop_and_teardown()
def prepare(self, colitem):
""" setup objects along the collector chain to the test-method
and teardown previously setup objects."""
needed_collectors = colitem.listchain()
self._teardown_towards(needed_collectors)
# check if the last collection node has raised an error
for col in self.stack:
if hasattr(col, "_prepare_exc"):
py.builtin._reraise(*col._prepare_exc)
for col in needed_collectors[len(self.stack) :]:
self.stack.append(col)
try:
col.setup()
except TEST_OUTCOME:
col._prepare_exc = sys.exc_info()
raise
def collect_one_node(collector):
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
rep = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
return rep
| 30.345679
| 86
| 0.611298
|
f04913481f1a71107c3cfd0e9a28a5329f68da1f
| 253
|
py
|
Python
|
text/stick/_line.py
|
jedhsu/text
|
8525b602d304ac571a629104c48703443244545c
|
[
"Apache-2.0"
] | null | null | null |
text/stick/_line.py
|
jedhsu/text
|
8525b602d304ac571a629104c48703443244545c
|
[
"Apache-2.0"
] | null | null | null |
text/stick/_line.py
|
jedhsu/text
|
8525b602d304ac571a629104c48703443244545c
|
[
"Apache-2.0"
] | null | null | null |
"""
*Stick*
A string imbued with plane geometry.
"""
from dataclasses import dataclass
from ..string import String
from .geometry import StickGeometry
__all__ = ["Stick"]
@dataclass
class Stick(
String,
):
geometry: StickGeometry
| 11
| 38
| 0.695652
|
40e3a5bd242e53abad701b6362f711f7fc79c7ae
| 1,369
|
py
|
Python
|
solar/dblayer/conflict_resolution.py
|
Mirantis/solar
|
7d12e56d403d70a923cd1caa9c7e3c8cf6fc57aa
|
[
"Apache-2.0"
] | 7
|
2015-09-07T22:52:32.000Z
|
2016-01-14T09:27:09.000Z
|
solar/dblayer/conflict_resolution.py
|
Mirantis/solar
|
7d12e56d403d70a923cd1caa9c7e3c8cf6fc57aa
|
[
"Apache-2.0"
] | 117
|
2015-09-08T05:46:16.000Z
|
2016-04-14T16:46:33.000Z
|
solar/dblayer/conflict_resolution.py
|
Mirantis/solar
|
7d12e56d403d70a923cd1caa9c7e3c8cf6fc57aa
|
[
"Apache-2.0"
] | 21
|
2015-09-08T06:34:50.000Z
|
2015-12-09T09:14:24.000Z
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import Counter
def naive_resolver(riak_object):
# for now we support deleted vs existing object
siblings = riak_object.siblings
siblings_len = map(
lambda sibling: (len(sibling._get_encoded_data()), sibling), siblings)
siblings_len.sort()
c = Counter((x[0] for x in siblings_len))
if len(c) > 2:
raise RuntimeError(
"Too many different siblings, not sure what to do with siblings")
if 0 not in c:
raise RuntimeError("No empty object for resolution"
" not sure what to do with siblings")
selected = max(siblings_len)
# TODO: pass info to obj save_lazy too
riak_object.siblings = [selected[1]]
dblayer_conflict_resolver = naive_resolver
| 37
| 78
| 0.692476
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.