content
stringlengths 5
1.05M
|
|---|
# Created by MechAviv
# Map ID :: 620100043
# Ballroom : Lobby
sm.curNodeEventEnd(True)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(True, True, False, False)
sm.sendDelay(3000)
sm.showEffect("Effect/DirectionNewPirate.img/newPirate/balloonMsg2/6", 2000, 130, 0, 10, -2, True, 0)
sm.sendDelay(1000)
sm.setSpeakerID(9270088)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendNext("(.......)")
sm.sendDelay(1000)
sm.showEffect("Effect/DirectionNewPirate.img/newPirate/balloonMsg2/7", 2000, 130, 0, 10, -2, True, 0)
sm.sendDelay(2000)
sm.showEffect("Effect/DirectionNewPirate.img/newPirate/balloonMsg2/8", 2000, 130, 0, 10, -2, True, 0)
sm.sendDelay(1000)
sm.setSpeakerID(9270088)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendNext("(Ugh... where... am I?)")
sm.sendDelay(500)
sm.setSpeakerID(9270088)
sm.removeEscapeButton()
sm.setSpeakerType(3)
sm.sendNext("I don't know a spaceship from a barnacle, but anybody that can survive that kinda fall and still have a thirst for treasure is good in my book.")
sm.sendDelay(500)
sm.setSpeakerID(9270088)
sm.removeEscapeButton()
sm.setPlayerAsSpeaker()
sm.setSpeakerType(3)
sm.sendNext("(Who... are these voices? #b#p9270084##k... my core... )")
sm.sendDelay(1500)
sm.showFieldEffect("newPirate/wakeup2", 0)
sm.sendDelay(7600)
sm.setTemporarySkillSet(0)
sm.setInGameDirectionMode(False, True, False, False)
sm.warp(620100044, 0)
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyPulses Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Test the templates sequences functionalities.
"""
import pytest
from exopy_pulses.pulses.sequences.base_sequences import RootSequence
from exopy_pulses.pulses.sequences.template_sequence import TemplateSequence
from exopy_pulses.testing.context import TestContext
pytestmark = pytest.mark.skipif('True')
@pytest.fixture
def template_preferences(pulses_plugin, template_sequence):
"""Preferences corresponding to the template sequence.
"""
infos = pulses_plugin.get_item_infos(template_sequence)
return infos.metadata['template-config']
@pytest.fixture
def template_depependencies(pulses_plugin, template_preferences):
"""Collect the build dependency of the template_sequence.
"""
workbench = pulses_plugin.workbench
core = workbench.get_plugin('enaml.workbench.core')
cmd = 'exopy.app.dependencies.analyse'
dep_analysis = core.invoke_command(cmd, {'obj': template_preferences})
cmd = 'exopy.app.dependencies.collect'
dep = core.invoke_command(cmd, {'kind': 'build',
'dependencies': dep_analysis.dependencies})
dep = dep.dependencies
dep[''] = {'': template_preferences}
return dep
def root_with_template(template_sequence, template_dependencies):
"""Build a root using the template sequence.
"""
root = RootSequence()
root.context = TestContext(sampling=0.5)
conf = {'template_id': template_sequence, 'name': 'Template',
'template_vars': "{'b': '19'}"}
seq = TemplateSequence.build_from_config(conf, template_dependencies)
seq.context.channel_mapping = {'A': 'Ch1_L', 'B': 'Ch2_L',
'Ch1': 'Ch2_A', 'Ch2': 'Ch1_A'}
seq.def_1 = '1.0'
seq.def_2 = '20.0'
root.add_child_item(0, seq)
return root
def test_build_from_config1(template_sequence, template_dependencies):
""" Test building a template sequence from only the template file.
No information is knwon about channel mapping of template_vars values
"""
conf = {'template_id': template_sequence, 'name': 'Template',
'template_vars': "{'b': '19', 'c': ''}"}
seq = TemplateSequence.build_from_config(conf, template_dependencies)
assert seq.name == 'Template'
assert seq.template_id == template_sequence
assert seq.template_vars == dict(b='19')
assert seq.local_vars == dict(a='1.5')
assert len(seq.items) == 4
assert seq.items[3].index == 5
assert seq.docs == 'Basic user comment\nff'
context = seq.context
assert context.template is seq
assert context.logical_channels == ['A', 'B']
assert context.analogical_channels == ['Ch1', 'Ch2']
assert context.channel_mapping == {'A': '', 'B': '', 'Ch1': '',
'Ch2': ''}
def test_build_from_config2(template_sequence, template_dependencies):
""" Test rebuilding a sequence including a template sequence.
Channel mapping of template_vars values are known.
"""
conf = {'template_id': 'test', 'name': 'Template',
'template_vars': "{'b': '25'}"}
seq = TemplateSequence.build_from_config(conf, template_dependencies)
seq.context.channel_mapping = {'A': 'Ch1_L', 'B': 'Ch2_L',
'Ch1': 'Ch2_A', 'Ch2': 'Ch1_A'}
root = RootSequence()
context = TestContext(sampling=0.5)
root.context = context
root.add_child_item(0, seq)
pref = root.preferences_from_members()
new = RootSequence.build_from_config(pref, template_dependencies)
assert new.items[0].index == 1
seq = new.items[0]
assert seq.name == 'Template'
assert seq.template_id == template_sequence
assert seq.template_vars == dict(b='25')
assert seq.local_vars == dict(a='1.5')
assert len(seq.items) == 4
assert seq.items[3].index == 5
assert seq.docs == 'Basic user comment\nff'
context = seq.context
assert context.template is seq
assert context.logical_channels == ['A', 'B']
assert context.analogical_channels == ['Ch1', 'Ch2']
assert context.channel_mapping == {'A': 'Ch1_L', 'B': 'Ch2_L',
'Ch1': 'Ch2_A', 'Ch2': 'Ch1_A'}
def test_build_from_config3(template_sequence, template_dependencies):
"""Test rebuilding a sequence including twice the same template sequence
"""
conf = {'template_id': template_sequence, 'name': 'Template',
'template_vars': "{'b': '19'}"}
seq = TemplateSequence.build_from_config(conf, template_dependencies)
seq.context.channel_mapping = {'A': 'Ch1_L', 'B': 'Ch2_L',
'Ch1': 'Ch2_A', 'Ch2': 'Ch1_A'}
conf = {'template_id': 'test', 'name': 'Template',
'template_vars': "{'b': '12'}"}
seq2 = TemplateSequence.build_from_config(conf, template_dependencies)
seq2.context.channel_mapping = {'A': 'Ch1_L', 'B': 'Ch2_L',
'Ch1': 'Ch1_A', 'Ch2': 'Ch2_A'}
root = RootSequence()
context = TestContext(sampling=0.5)
root.context = context
root.add_child_item(0, seq)
root.add_child_item(0, seq2)
pref = root.preferences_from_members()
new = RootSequence.build_from_config(pref, template_dependencies)
assert new.items[0].index == 1
seq = new.items[0]
assert seq.name == 'Template'
assert seq.template_id == template_sequence
assert seq.template_vars == dict(b='19')
assert seq.local_vars == dict(a='1.5')
assert len(seq.items) == 4
assert seq.items[3].index == 5
assert seq.docs == 'Basic user comment\nff'
context = seq.context
assert context.template == seq
assert context.logical_channels == ['A', 'B']
assert context.analogical_channel == ['Ch1', 'Ch2']
assert context.channel_mapping == {'A': 'Ch1_L', 'B': 'Ch2_L',
'Ch1': 'Ch2_A', 'Ch2': 'Ch1_A'}
assert new.items[1].index == 2
seq = new.items[1]
assert seq.name == 'Template'
assert seq.template_id == 'test'
assert seq.template_vars == dict(b='12')
assert seq.local_vars == dict(a='1.5')
assert len(seq.items) == 4
assert seq.items[3].index == 5
assert seq.docs == 'Basic user comment\nff'
context = seq.context
assert context.template == seq
assert context.logical_channels == ['A', 'B']
assert context.analogical_channels == ['Ch1', 'Ch2']
assert context.channel_mapping == {'A': 'Ch1_L', 'B': 'Ch2_L',
'Ch1': 'Ch1_A', 'Ch2': 'Ch2_A'}
def test_sequence_compilation1(root_with_template):
"""Test evaluating and simplifying a template when everything is ok.
"""
res, missings, errors = root_with_template.evaluate_sequence()
pulses = root_with_template.simplify_sequence()
assert res, errors
assert len(pulses) == 4
pulse = pulses[0]
assert pulse.index == 1
assert pulse.start == 2.0
assert pulse.stop == 2.5
assert pulse.duration == 0.5
assert pulse.channel == 'Ch1_L'
pulse = pulses[1]
assert pulse.index == 2
assert pulse.start == 3.5
assert pulse.stop == 4
assert pulse.duration == 0.5
assert pulse.channel == 'Ch2_L'
pulse = pulses[2]
assert pulse.index == 4
assert pulse.start == 4.5
assert pulse.stop == 20
assert pulse.duration == 15.5
assert pulse.channel == 'Ch1_A'
pulse = pulses[3]
assert pulse.index == 5
assert pulse.start == 4.5
assert pulse.stop == 20
assert pulse.duration == 15.5
assert pulse.channel == 'Ch2_A'
def test_sequence_compilation2(root_with_template):
"""Test compiling a template : issue in context, incomplete mapping.
"""
template = root_with_template.items[0]
template.context.channel_mapping = {'A': 'Ch1_L', 'B': 'Ch2_L',
'Ch1': 'Ch2_A'}
res, miss, errors = root_with_template.evaluate_sequence()
assert not res
assert not miss
assert 'Template-context' in errors
assert 'Ch2' in errors['Template-context']
def test_sequence_compilation3(root_with_template):
"""Test compiling a template : issue in context, erroneous mapping.
"""
template = root_with_template.items[0]
template.context.channel_mapping = {'A': 'Ch1_L', 'B': 'Ch2_L',
'Ch1': 'Ch2_A', 'Ch2': 'A'}
res, miss, errors = root_with_template.evaluate_sequence()
assert not res
assert not miss
assert 'Template-context' in errors
assert 'Ch2' in errors['Template-context']
def test_sequence_compilation3bis(root_with_template):
"""Test compiling a template : pulse as umapped channel.
"""
template = root_with_template.items[0]
template.items[0].channel = '__'
res, miss, errors = root_with_template.evaluate_sequence(False)
assert not res
assert not miss
assert 'Template-channels' in errors
assert '__' in errors['Template-channels']
def test_sequence_compilation4(root_with_template):
"""Test compiling a template : issue in defs.
"""
template = root_with_template.items[0]
template.def_1 = 'r*'
res, miss, errors = root_with_template.evaluate_sequence()
assert not res
assert not miss
assert '1_start' in errors
def test_sequence_compilation5(root_with_template):
"""Test compiling a template : issue in template_vars.
"""
template = root_with_template.items[0]
template.template_vars = {'b': '*1'}
res, miss, errors = root_with_template.compile_sequence(False)
assert not res
assert '1_b' in errors
def test_sequence_compilation6(root_with_template):
"""Test compiling a template : issue in local_vars.
"""
template = root_with_template.items[0]
template.local_vars = {'a': '*1'}
res, miss, errors = root_with_template.evaluate_sequence()
assert not res
assert '1_a' in errors
def test_sequence_compilation7(root_with_template):
"""Test compiling a template : issue in stop time.
"""
template = root_with_template.items[0]
template.items[0].def_2 = '200'
res, miss, errors = root_with_template.compile_sequence(False)
assert not res
assert 'Template-stop' in errors
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import functools
import inspect
import math
import typing
from collections import OrderedDict
from importlib import import_module
import makefun
import funsor.delta
import funsor.ops as ops
from funsor.affine import is_affine
from funsor.cnf import GaussianMixture
from funsor.domains import Domain, reals
from funsor.gaussian import Gaussian
from funsor.interpreter import gensym
from funsor.tensor import (Tensor, align_tensors, dummy_numeric_array, get_default_prototype,
ignore_jit_warnings, numeric_array, stack)
from funsor.terms import Funsor, FunsorMeta, Independent, Number, Variable, eager, to_data, to_funsor
from funsor.util import broadcast_shape, get_backend
BACKEND_TO_DISTRIBUTIONS_BACKEND = {
"torch": "funsor.torch.distributions",
"jax": "funsor.jax.distributions",
}
def numbers_to_tensors(*args):
"""
Convert :class:`~funsor.terms.Number` s to :class:`funsor.tensor.Tensor` s,
using any provided tensor as a prototype, if available.
"""
if any(isinstance(x, Number) for x in args):
prototype = get_default_prototype()
options = dict(dtype=prototype.dtype)
for x in args:
if isinstance(x, Tensor):
options = dict(dtype=x.data.dtype, device=getattr(x.data, "device", None))
break
with ignore_jit_warnings():
args = tuple(Tensor(numeric_array(x.data, **options), dtype=x.dtype)
if isinstance(x, Number) else x
for x in args)
return args
class DistributionMeta(FunsorMeta):
"""
Wrapper to fill in default values and convert Numbers to Tensors.
"""
def __call__(cls, *args, **kwargs):
kwargs.update(zip(cls._ast_fields, args))
value = kwargs.pop('value', 'value')
kwargs = OrderedDict(
(k, to_funsor(kwargs[k], output=cls._infer_param_domain(k, getattr(kwargs[k], "shape", ()))))
for k in cls._ast_fields if k != 'value')
value = to_funsor(value, output=cls._infer_value_domain(**{k: v.output for k, v in kwargs.items()}))
args = numbers_to_tensors(*(tuple(kwargs.values()) + (value,)))
return super(DistributionMeta, cls).__call__(*args)
class Distribution(Funsor, metaclass=DistributionMeta):
r"""
Funsor backed by a PyTorch/JAX distribution object.
:param \*args: Distribution-dependent parameters. These can be either
funsors or objects that can be coerced to funsors via
:func:`~funsor.terms.to_funsor` . See derived classes for details.
"""
dist_class = "defined by derived classes"
def __init__(self, *args):
params = tuple(zip(self._ast_fields, args))
assert any(k == 'value' for k, v in params)
inputs = OrderedDict()
for name, value in params:
assert isinstance(name, str)
assert isinstance(value, Funsor)
inputs.update(value.inputs)
inputs = OrderedDict(inputs)
output = reals()
super(Distribution, self).__init__(inputs, output)
self.params = OrderedDict(params)
def __repr__(self):
return '{}({})'.format(type(self).__name__,
', '.join('{}={}'.format(*kv) for kv in self.params.items()))
def eager_reduce(self, op, reduced_vars):
if op is ops.logaddexp and isinstance(self.value, Variable) and self.value.name in reduced_vars:
return Number(0.) # distributions are normalized
return super(Distribution, self).eager_reduce(op, reduced_vars)
@classmethod
def eager_log_prob(cls, *params):
inputs, tensors = align_tensors(*params)
params = dict(zip(cls._ast_fields, tensors))
value = params.pop('value')
data = cls.dist_class(**params).log_prob(value)
return Tensor(data, inputs)
def unscaled_sample(self, sampled_vars, sample_inputs, rng_key=None):
params = OrderedDict(self.params)
value = params.pop("value")
assert all(isinstance(v, (Number, Tensor)) for v in params.values())
assert isinstance(value, Variable) and value.name in sampled_vars
inputs_, tensors = align_tensors(*params.values())
inputs = OrderedDict(sample_inputs.items())
inputs.update(inputs_)
sample_shape = tuple(v.size for v in sample_inputs.values())
raw_dist = self.dist_class(**dict(zip(self._ast_fields[:-1], tensors)))
sample_args = (sample_shape,) if rng_key is None else (rng_key, sample_shape)
if getattr(raw_dist, "has_rsample", False):
raw_sample = raw_dist.rsample(*sample_args)
else:
raw_sample = ops.detach(raw_dist.sample(*sample_args))
result = funsor.delta.Delta(value.name, Tensor(raw_sample, inputs, value.output.dtype))
if not getattr(raw_dist, "has_rsample", False):
# scaling of dice_factor by num samples should already be handled by Funsor.sample
raw_log_prob = raw_dist.log_prob(raw_sample)
dice_factor = Tensor(raw_log_prob - ops.detach(raw_log_prob), inputs)
result = result + dice_factor
return result
def __getattribute__(self, attr):
if attr in type(self)._ast_fields and attr != 'name':
return self.params[attr]
return super().__getattribute__(attr)
@classmethod
@functools.lru_cache(maxsize=5000)
def _infer_value_domain(cls, **kwargs):
# rely on the underlying distribution's logic to infer the event_shape given param domains
instance = cls.dist_class(**{k: dummy_numeric_array(domain) for k, domain in kwargs.items()},
validate_args=False)
out_shape = instance.event_shape
if type(instance.support).__name__ == "_IntegerInterval":
out_dtype = int(instance.support.upper_bound + 1)
else:
out_dtype = 'real'
return Domain(dtype=out_dtype, shape=out_shape)
@classmethod
@functools.lru_cache(maxsize=5000)
def _infer_param_domain(cls, name, raw_shape):
support = cls.dist_class.arg_constraints.get(name, None)
# XXX: if the backend does not have the same definition of constraints, we should
# define backend-specific distributions and overide these `infer_value_domain`,
# `infer_param_domain` methods.
# Because NumPyro and Pyro have the same pattern, we use name check for simplicity.
support_name = type(support).__name__
if support_name == "_Simplex":
output = reals(raw_shape[-1])
elif support_name == "_RealVector":
output = reals(raw_shape[-1])
elif support_name in ["_LowerCholesky", "_PositiveDefinite"]:
output = reals(*raw_shape[-2:])
# resolve the issue: logits's constraints are real (instead of real_vector)
# for discrete multivariate distributions in Pyro
elif support_name == "_Real" and name == "logits" and (
"probs" in cls.dist_class.arg_constraints
and type(cls.dist_class.arg_constraints["probs"]).__name__ == "_Simplex"):
output = reals(raw_shape[-1])
else:
output = None
return output
################################################################################
# Distribution Wrappers
################################################################################
def make_dist(backend_dist_class, param_names=()):
if not param_names:
param_names = tuple(name for name in inspect.getfullargspec(backend_dist_class.__init__)[0][1:]
if name in backend_dist_class.arg_constraints)
@makefun.with_signature("__init__(self, {}, value='value')".format(', '.join(param_names)))
def dist_init(self, **kwargs):
return Distribution.__init__(self, *tuple(kwargs[k] for k in self._ast_fields))
dist_class = DistributionMeta(backend_dist_class.__name__.split("Wrapper_")[-1], (Distribution,), {
'dist_class': backend_dist_class,
'__init__': dist_init,
})
eager.register(dist_class, *((Tensor,) * (len(param_names) + 1)))(dist_class.eager_log_prob)
return dist_class
FUNSOR_DIST_NAMES = [
('Beta', ('concentration1', 'concentration0')),
('BernoulliProbs', ('probs',)),
('BernoulliLogits', ('logits',)),
('Binomial', ('total_count', 'probs')),
('Categorical', ('probs',)),
('CategoricalLogits', ('logits',)),
('Delta', ('v', 'log_density')),
('Dirichlet', ('concentration',)),
('Gamma', ('concentration', 'rate')),
('Multinomial', ('total_count', 'probs')),
('MultivariateNormal', ('loc', 'scale_tril')),
('NonreparameterizedBeta', ('concentration1', 'concentration0')),
('NonreparameterizedDirichlet', ('concentration',)),
('NonreparameterizedGamma', ('concentration', 'rate')),
('NonreparameterizedNormal', ('loc', 'scale')),
('Normal', ('loc', 'scale')),
('Poisson', ('rate',))
]
###############################################
# Converting backend Distributions to funsors
###############################################
def backenddist_to_funsor(backend_dist, output=None, dim_to_name=None):
funsor_dist = import_module(BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()])
funsor_dist_class = getattr(funsor_dist, type(backend_dist).__name__.split("Wrapper_")[-1])
params = [to_funsor(
getattr(backend_dist, param_name),
output=funsor_dist_class._infer_param_domain(
param_name, getattr(getattr(backend_dist, param_name), "shape", ())),
dim_to_name=dim_to_name)
for param_name in funsor_dist_class._ast_fields if param_name != 'value']
return funsor_dist_class(*params)
def indepdist_to_funsor(backend_dist, output=None, dim_to_name=None):
dim_to_name = OrderedDict((dim - backend_dist.reinterpreted_batch_ndims, name)
for dim, name in dim_to_name.items())
dim_to_name.update(OrderedDict((i, "_pyro_event_dim_{}".format(i))
for i in range(-backend_dist.reinterpreted_batch_ndims, 0)))
result = to_funsor(backend_dist.base_dist, dim_to_name=dim_to_name)
for i in reversed(range(-backend_dist.reinterpreted_batch_ndims, 0)):
name = "_pyro_event_dim_{}".format(i)
result = funsor.terms.Independent(result, "value", name, "value")
return result
def maskeddist_to_funsor(backend_dist, output=None, dim_to_name=None):
mask = to_funsor(ops.astype(backend_dist._mask, 'float32'), output=output, dim_to_name=dim_to_name)
funsor_base_dist = to_funsor(backend_dist.base_dist, output=output, dim_to_name=dim_to_name)
return mask * funsor_base_dist
def transformeddist_to_funsor(backend_dist, output=None, dim_to_name=None):
raise NotImplementedError("TODO implement conversion of TransformedDistribution")
def mvndist_to_funsor(backend_dist, output=None, dim_to_name=None, real_inputs=OrderedDict()):
funsor_dist = backenddist_to_funsor(backend_dist, output=output, dim_to_name=dim_to_name)
if len(real_inputs) == 0:
return funsor_dist
discrete, gaussian = funsor_dist(value="value").terms
inputs = OrderedDict((k, v) for k, v in gaussian.inputs.items() if v.dtype != 'real')
inputs.update(real_inputs)
return discrete + Gaussian(gaussian.info_vec, gaussian.precision, inputs)
###############################################################
# Converting distribution funsors to backend distributions
###############################################################
@to_data.register(Distribution)
def distribution_to_data(funsor_dist, name_to_dim=None):
pyro_dist_class = funsor_dist.dist_class
params = [to_data(getattr(funsor_dist, param_name), name_to_dim=name_to_dim)
for param_name in funsor_dist._ast_fields if param_name != 'value']
pyro_dist = pyro_dist_class(**dict(zip(funsor_dist._ast_fields[:-1], params)))
funsor_event_shape = funsor_dist.value.output.shape
pyro_dist = pyro_dist.to_event(max(len(funsor_event_shape) - len(pyro_dist.event_shape), 0))
if pyro_dist.event_shape != funsor_event_shape:
raise ValueError("Event shapes don't match, something went wrong")
return pyro_dist
@to_data.register(Independent[typing.Union[Independent, Distribution], str, str, str])
def indep_to_data(funsor_dist, name_to_dim=None):
raise NotImplementedError("TODO implement conversion of Independent")
@to_data.register(Gaussian)
def gaussian_to_data(funsor_dist, name_to_dim=None, normalized=False):
if normalized:
return to_data(funsor_dist.log_normalizer + funsor_dist, name_to_dim=name_to_dim)
loc = ops.cholesky_solve(ops.unsqueeze(funsor_dist.info_vec, -1),
ops.cholesky(funsor_dist.precision)).squeeze(-1)
int_inputs = OrderedDict((k, d) for k, d in funsor_dist.inputs.items() if d.dtype != "real")
loc = to_data(Tensor(loc, int_inputs), name_to_dim)
precision = to_data(Tensor(funsor_dist.precision, int_inputs), name_to_dim)
backend_dist = import_module(BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()])
return backend_dist.MultivariateNormal.dist_class(loc, precision_matrix=precision)
@to_data.register(GaussianMixture)
def gaussianmixture_to_data(funsor_dist, name_to_dim=None):
discrete, gaussian = funsor_dist.terms
backend_dist = import_module(BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()])
cat = backend_dist.CategoricalLogits.dist_class(logits=to_data(
discrete + gaussian.log_normalizer, name_to_dim=name_to_dim))
mvn = to_data(gaussian, name_to_dim=name_to_dim)
return cat, mvn
################################################
# Backend-agnostic distribution patterns
################################################
def Bernoulli(probs=None, logits=None, value='value'):
"""
Wraps backend `Bernoulli` distributions.
This dispatches to either `BernoulliProbs` or `BernoulliLogits`
to accept either ``probs`` or ``logits`` args.
:param Funsor probs: Probability of 1.
:param Funsor value: Optional observation in ``{0,1}``.
"""
backend_dist = import_module(BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()])
if probs is not None:
return backend_dist.BernoulliProbs(probs, value) # noqa: F821
if logits is not None:
return backend_dist.BernoulliLogits(logits, value) # noqa: F821
raise ValueError('Either probs or logits must be specified')
def LogNormal(loc, scale, value='value'):
"""
Wraps backend `LogNormal` distributions.
:param Funsor loc: Mean of the untransformed Normal distribution.
:param Funsor scale: Standard deviation of the untransformed Normal
distribution.
:param Funsor value: Optional real observation.
"""
loc, scale = to_funsor(loc), to_funsor(scale)
y = to_funsor(value, output=loc.output)
t = ops.exp
x = t.inv(y)
log_abs_det_jacobian = t.log_abs_det_jacobian(x, y)
backend_dist = import_module(BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()])
return backend_dist.Normal(loc, scale, x) - log_abs_det_jacobian # noqa: F821
def eager_beta(concentration1, concentration0, value):
concentration = stack((concentration0, concentration1))
value = stack((1 - value, value))
backend_dist = import_module(BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()])
return backend_dist.Dirichlet(concentration, value=value) # noqa: F821
def eager_binomial(total_count, probs, value):
probs = stack((1 - probs, probs))
value = stack((total_count - value, value))
backend_dist = import_module(BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()])
return backend_dist.Multinomial(total_count, probs, value=value) # noqa: F821
def eager_multinomial(total_count, probs, value):
# Multinomial.log_prob() supports inhomogeneous total_count only by
# avoiding passing total_count to the constructor.
inputs, (total_count, probs, value) = align_tensors(total_count, probs, value)
shape = broadcast_shape(total_count.shape + (1,), probs.shape, value.shape)
probs = Tensor(ops.expand(probs, shape), inputs)
value = Tensor(ops.expand(value, shape), inputs)
if get_backend() == "torch":
total_count = Number(ops.amax(total_count, None).item()) # Used by distributions validation code.
else:
total_count = Tensor(ops.expand(total_count, shape[:-1]), inputs)
backend_dist = import_module(BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()])
return backend_dist.Multinomial.eager_log_prob(total_count, probs, value) # noqa: F821
def eager_categorical_funsor(probs, value):
return probs[value].log()
def eager_categorical_tensor(probs, value):
value = probs.materialize(value)
backend_dist = import_module(BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()])
return backend_dist.Categorical(probs=probs, value=value) # noqa: F821
def eager_delta_tensor(v, log_density, value):
# This handles event_dim specially, and hence cannot use the
# generic Delta.eager_log_prob() method.
assert v.output == value.output
event_dim = len(v.output.shape)
inputs, (v, log_density, value) = align_tensors(v, log_density, value)
backend_dist = import_module(BACKEND_TO_DISTRIBUTIONS_BACKEND[get_backend()])
data = backend_dist.Delta.dist_class(v, log_density, event_dim).log_prob(value) # noqa: F821
return Tensor(data, inputs)
def eager_delta_funsor_variable(v, log_density, value):
assert v.output == value.output
return funsor.delta.Delta(value.name, v, log_density)
def eager_delta_funsor_funsor(v, log_density, value):
assert v.output == value.output
return funsor.delta.Delta(v.name, value, log_density)
def eager_delta_variable_variable(v, log_density, value):
return None
def eager_normal(loc, scale, value):
assert loc.output == reals()
assert scale.output == reals()
assert value.output == reals()
if not is_affine(loc) or not is_affine(value):
return None # lazy
info_vec = ops.new_zeros(scale.data, scale.data.shape + (1,))
precision = ops.pow(scale.data, -2).reshape(scale.data.shape + (1, 1))
log_prob = -0.5 * math.log(2 * math.pi) - ops.log(scale).sum()
inputs = scale.inputs.copy()
var = gensym('value')
inputs[var] = reals()
gaussian = log_prob + Gaussian(info_vec, precision, inputs)
return gaussian(**{var: value - loc})
def eager_mvn(loc, scale_tril, value):
assert len(loc.shape) == 1
assert len(scale_tril.shape) == 2
assert value.output == loc.output
if not is_affine(loc) or not is_affine(value):
return None # lazy
info_vec = ops.new_zeros(scale_tril.data, scale_tril.data.shape[:-1])
precision = ops.cholesky_inverse(scale_tril.data)
scale_diag = Tensor(ops.diagonal(scale_tril.data, -1, -2), scale_tril.inputs)
log_prob = -0.5 * scale_diag.shape[0] * math.log(2 * math.pi) - ops.log(scale_diag).sum()
inputs = scale_tril.inputs.copy()
var = gensym('value')
inputs[var] = reals(scale_diag.shape[0])
gaussian = log_prob + Gaussian(info_vec, precision, inputs)
return gaussian(**{var: value - loc})
|
from .decode import get_decoded_tx
from .fees import get_fee, get_fee_cached
from .rates import (
currency_to_satoshi, currency_to_satoshi_cached,
satoshi_to_currency, satoshi_to_currency_cached
)
from .services import NetworkAPI
|
import sys
import numpy as np
from PIL import Image
import torch
import torch.utils.data as data
import matplotlib.pyplot as plt
from ..utils import *
import matplotlib.patches as patches
import os
from ..sample_generator import *
import sys
# from pretrain_options import *
from ..img_cropper import *
class RGB_T_Dataset(data.Dataset):
def __init__(self, rgb_img_dir, rgb_img_list, t_img_dir, t_img_list, gt, receptive_field, opts):
self.rgb_img_list = np.array([os.path.join(rgb_img_dir, img) for img in rgb_img_list])
self.t_img_list = np.array([os.path.join(t_img_dir, img) for img in t_img_list])
self.gt = gt
self.batch_frames = opts['batch_frames']
self.batch_pos = opts['batch_pos']
self.batch_neg = opts['batch_neg']
self.overlap_pos = opts['overlap_pos']
self.overlap_neg = opts['overlap_neg']
self.crop_size = opts['img_size']
self.padding = opts['padding']
self.index = np.random.permutation(len(self.rgb_img_list))
self.pointer = 0
image = Image.open(self.rgb_img_list[0]).convert('RGB')
self.scene_generator = SampleGenerator('gaussian', image.size, trans_f=1.5, scale_f=1.2, valid=True)
self.pos_generator = SampleGenerator('gaussian', image.size, 0.1, 1.2, 1.1, True)
self.neg_generator = SampleGenerator('uniform', image.size, 1, 1.2, 1.1, True)
self.receptive_field = receptive_field
self.interval = opts['frame_interval']
self.img_crop_model = imgCropper(opts['padded_img_size'])
self.img_crop_model.eval()
self.use_gpu = opts['use_gpu']
if opts['use_gpu']:
self.img_crop_model.gpuEnable()
def __iter__(self):
return self
def __next__(self):
next_pointer = min(self.pointer + self.batch_frames, len(self.rgb_img_list))
idx = self.index[self.pointer:next_pointer]
if len(idx) < self.batch_frames:
self.index = np.random.permutation(len(self.rgb_img_list))
next_pointer = self.batch_frames - len(idx)
idx = np.concatenate((idx, self.index[:next_pointer]))
self.pointer = next_pointer
n_pos = self.batch_pos
n_neg = self.batch_neg
scenes_rgb = []
scenes_t = []
total_pos_rois = []
total_neg_rois = []
for i, (rgb_img_path, t_img_path, bbox) in enumerate(
zip(self.rgb_img_list[idx], self.t_img_list[idx], self.gt[idx])):
image_rgb = Image.open(rgb_img_path).convert('RGB')
image_rgb = np.asarray(image_rgb)
image_t = Image.open(t_img_path).convert('RGB')
image_t = np.asarray(image_t)
ishape = image_rgb.shape
pos_examples = gen_samples(SampleGenerator('gaussian', (ishape[1], ishape[0]), 0.1, 1.2, 1.1, False), bbox,
n_pos, overlap_range=self.overlap_pos)
neg_examples = gen_samples(SampleGenerator('uniform', (ishape[1], ishape[0]), 1, 1.2, 1.1, False), bbox,
n_neg, overlap_range=self.overlap_neg)
# compute padded sample
padded_x1 = (neg_examples[:, 0] - neg_examples[:, 2] * (self.padding - 1.) / 2.).min()
padded_y1 = (neg_examples[:, 1] - neg_examples[:, 3] * (self.padding - 1.) / 2.).min()
padded_x2 = (neg_examples[:, 0] + neg_examples[:, 2] * (self.padding + 1.) / 2.).max()
padded_y2 = (neg_examples[:, 1] + neg_examples[:, 3] * (self.padding + 1.) / 2.).max()
padded_scene_box = np.asarray((padded_x1, padded_y1, padded_x2 - padded_x1, padded_y2 - padded_y1))
jitter_scale = 1.1 ** np.clip(3. * np.random.randn(1, 1), -2, 2)
crop_img_size = (padded_scene_box[2:4] * ((self.crop_size, self.crop_size) / bbox[2:4])).astype('int64') * \
jitter_scale[0][0]
cropped_image, cur_image_var = self.img_crop_model.crop_image(image_rgb,
np.reshape(padded_scene_box, (1, 4)),
crop_img_size)
cropped_image_t, cur_image_var_t = self.img_crop_model.crop_image(image_t,
np.reshape(padded_scene_box, (1, 4)),
crop_img_size)
cropped_image = cropped_image - 128.
cropped_image_t = cropped_image_t - 128.
if self.use_gpu:
cropped_image = cropped_image.data.cpu()
cur_image_var = cur_image_var.cpu()
cropped_image_t = cropped_image_t.data.cpu()
cur_image_var_t = cur_image_var_t.cpu()
# cropped_image_4channel = torch.cat((cropped_image, cropped_image_t), 1)
scenes_rgb.append(cropped_image)
scenes_t.append(cropped_image_t)
## get current frame and heatmap
rel_bbox = np.copy(bbox)
rel_bbox[0:2] -= padded_scene_box[0:2]
jittered_obj_size = jitter_scale[0][0] * float(self.crop_size)
batch_num = np.zeros((pos_examples.shape[0], 1))
pos_rois = np.copy(pos_examples)
pos_rois[:, 0:2] -= np.repeat(np.reshape(padded_scene_box[0:2], (1, 2)), pos_rois.shape[0], axis=0)
pos_rois = samples2maskroi(pos_rois, self.receptive_field, (jittered_obj_size, jittered_obj_size),
bbox[2:4], self.padding)
pos_rois = np.concatenate((batch_num, pos_rois), axis=1)
batch_num = np.zeros((neg_examples.shape[0], 1))
neg_rois = np.copy(neg_examples)
neg_rois[:, 0:2] -= np.repeat(np.reshape(padded_scene_box[0:2], (1, 2)), neg_rois.shape[0], axis=0)
neg_rois = samples2maskroi(neg_rois, self.receptive_field, (jittered_obj_size, jittered_obj_size),
bbox[2:4], self.padding)
neg_rois = np.concatenate((batch_num, neg_rois), axis=1)
total_pos_rois.append(torch.from_numpy(np.copy(pos_rois).astype('float32')))
total_neg_rois.append(torch.from_numpy(np.copy(neg_rois).astype('float32')))
return scenes_rgb, scenes_t, total_pos_rois, total_neg_rois
next = __next__
def extract_regions(self, image, samples):
regions = np.zeros((len(samples), self.crop_size, self.crop_size, 3), dtype='uint8')
for i, sample in enumerate(samples):
regions[i] = crop_image(image, sample, self.crop_size, self.padding, True)
regions = regions.transpose(0, 3, 1, 2)
regions = regions.astype('float32') - 128.
return regions
class RegionExtractor:
def __init__(self, image, samples, crop_size, padding, batch_size, shuffle=False):
self.image = np.asarray(image)
self.samples = samples
self.crop_size = crop_size
self.padding = padding
self.batch_size = batch_size
self.shuffle = shuffle
self.index = np.arange(len(samples))
self.pointer = 0
self.mean = self.image.mean(0).mean(0).astype('float32')
def __iter__(self):
return self
def __next__(self):
if self.pointer == len(self.samples):
self.pointer = 0
raise StopIteration
else:
next_pointer = min(self.pointer + self.batch_size, len(self.samples))
index = self.index[self.pointer:next_pointer]
self.pointer = next_pointer
regions = self.extract_regions(index)
regions = torch.from_numpy(regions)
return regions
next = __next__
def extract_regions(self, index):
regions = np.zeros((len(index), self.crop_size, self.crop_size, 3), dtype='uint8')
for i, sample in enumerate(self.samples[index]):
regions[i] = crop_image(self.image, sample, self.crop_size, self.padding)
regions = regions.transpose(0, 3, 1, 2).astype('float32')
regions = regions - 128.
return regions
|
"""Tests of the transform submodule"""
import math
import pytest
from fiona import transform
from .conftest import requires_gdal_lt_3
TEST_GEOMS = [
{"type": "Point", "coordinates": [0.0, 0.0, 1000.0]},
{
"type": "LineString",
"coordinates": [[0.0, 0.0, 1000.0], [0.1, 0.1, -1000.0]],
},
{
"type": "MultiPoint",
"coordinates": [[0.0, 0.0, 1000.0], [0.1, 0.1, -1000.0]],
},
{
"type": "Polygon",
"coordinates": [
[
[0.0, 0.0, 1000.0],
[0.1, 0.1, -1000.0],
[0.1, -0.1, math.pi],
[0.0, 0.0, 1000.0],
]
],
},
{
"type": "MultiPolygon",
"coordinates": [
[
[
[0.0, 0.0, 1000.0],
[0.1, 0.1, -1000.0],
[0.1, -0.1, math.pi],
[0.0, 0.0, 1000.0],
]
]
],
},
]
@pytest.mark.parametrize("geom", TEST_GEOMS)
def test_transform_geom_with_z(geom):
"""Transforming a geom with Z succeeds"""
transform.transform_geom("epsg:4326", "epsg:3857", geom, precision=3)
@pytest.mark.parametrize("geom", TEST_GEOMS)
def test_transform_geom_array_z(geom):
"""Transforming a geom array with Z succeeds"""
g2 = transform.transform_geom(
"epsg:4326",
"epsg:3857",
[geom for _ in range(5)],
precision=3,
)
assert isinstance(g2, list)
assert len(g2) == 5
@requires_gdal_lt_3
def test_transform_geom_null_dest():
failed_geom = {
'type': 'Polygon',
'coordinates': ((
(81.2180196471443, 6.197141424988303),
(80.34835696810447, 5.968369859232141),
(79.87246870312859, 6.763463446474915),
(79.69516686393516, 8.200843410673372),
(80.14780073437967, 9.824077663609557),
(80.83881798698664, 9.268426825391174),
(81.3043192890718, 8.564206244333675),
(81.78795901889143, 7.523055324733178),
(81.63732221876066, 6.481775214051936),
(81.2180196471443, 6.197141424988303)
),)
}
with pytest.warns(UserWarning):
transformed_geom = transform.transform_geom(
src_crs="epsg:4326",
dst_crs="epsg:32628",
geom=failed_geom,
antimeridian_cutting=True,
precision=2,
)
assert transformed_geom is None
@pytest.mark.parametrize("crs", ["epsg:4326",
"EPSG:4326",
"WGS84",
{'init': 'epsg:4326'},
{'proj': 'longlat', 'datum': 'WGS84', 'no_defs': True},
"OGC:CRS84"])
def test_axis_ordering(crs):
""" Test if transform uses traditional_axis_mapping """
expected = (-8427998.647958742, 4587905.27136252)
t1 = transform.transform(crs, "epsg:3857", [-75.71], [38.06])
assert (t1[0][0], t1[1][0]) == pytest.approx(expected)
geom = {"type": "Point", "coordinates": [-75.71, 38.06]}
g1 = transform.transform_geom(crs, "epsg:3857", geom, precision=3)
assert g1["coordinates"] == pytest.approx(expected)
rev_expected = (-75.71, 38.06)
t2 = transform.transform("epsg:3857", crs, [-8427998.647958742], [4587905.27136252])
assert (t2[0][0], t2[1][0]) == pytest.approx(rev_expected)
geom = {"type": "Point", "coordinates": [-8427998.647958742, 4587905.27136252]}
g2 = transform.transform_geom("epsg:3857", crs, geom, precision=3)
assert g2["coordinates"] == pytest.approx(rev_expected)
def test_transform_issue971():
""" See https://github.com/Toblerity/Fiona/issues/971 """
source_crs = "epsg:25832"
dest_src = "epsg:4326"
geom = {'type': 'GeometryCollection', 'geometries': [{'type': 'LineString',
'coordinates': [(512381.8870945257, 5866313.311218272),
(512371.23869999964, 5866322.282500001),
(512364.6014999999, 5866328.260199999)]}]}
geom_transformed = transform.transform_geom(source_crs, dest_src, geom, precision=3)
assert geom_transformed['geometries'][0]['coordinates'][0] == pytest.approx((9.184, 52.946))
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: parameter_utils
:platform: Unix
:synopsis: Parameter utilities
.. moduleauthor:: Jessica Verschoyle,Nicola Wadeson <scientificsoftware@diamond.ac.uk>
"""
import os
import re
import copy
import h5py
import posixpath
import numpy as np
import configparser
from colorama import Fore
import savu.plugins.loaders.utils.yaml_utils as yu
import savu.plugins.utils as pu
def _int(value):
# do not use isinstance as this also picks up boolean values
if type(value) in (int, np.int_):
return True
return False
def _str(value):
if isinstance(value, str):
return True
return False
def _float(value):
valid = isinstance(value, (float, np.float))
if not valid:
valid = _int(value)
return valid
def _bool(value): # should eventually be a drop-down list
valid = isinstance(value, bool)
if not valid and isinstance(value, str):
return value.lower() == "true" or value.lower() == "false"
return valid
def _dir(value):
""" A directory """
valid = False
if _str(value):
valid = os.path.isdir(value)
return valid
def _filepath(value):
""" file path """
valid = False
if _str(value):
valid = os.path.isfile(value)
if not valid:
valid = _savufilepath(value)
return valid
def _h5path(value): # Extend this later as we need to know which file to apply the check to
""" internal path to a hdf5 dataset """
return _str(value)
def _savufilepath(value, returnpath=False):
""" A file path inside the Savu directory"""
savu_base_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../../')
split_path = value.split('Savu')
if len(split_path) > 1:
value = os.path.join(savu_base_path, split_path[1][1:])
if returnpath:
return os.path.isfile(value), value
return os.path.isfile(value)
def _yamlfilepath(value):
""" yaml_file """
# does the filepath exist
if _str(value):
if not os.path.isfile(value):
# is it a file path in Savu folder
valid, value = _savufilepath(value, returnpath=True)
if not valid:
return False
return _yaml_is_valid(value)
return False
def _yaml_is_valid(filepath):
"""Read the yaml file at the provided file path """
with open(filepath, 'r') as f:
errors = yu.check_yaml_errors(f)
try:
yu.read_yaml(filepath)
return True
except:
if errors:
print("There were some errors with your yaml file structure.")
for e in errors:
print(e)
return False
def _nptype(value):
"""Check if the value is a numpy data type. Return true if it is."""
if _int(value) or _str(value):
return (value in np.typecodes) or (value in np.sctypeDict.keys())
return False
def _preview(value):
""" preview value """
valid = _typelist(_preview_dimension, value)
if not valid and _list(value) and not value:
return True # empty list is allowed
return valid
def _typelist(func, value):
if isinstance(value, list):
if value:
return all(func(item) for item in value)
return False
def _preview_dimension(value):
""" Check the full preview parameter value """
if _str(value):
slice_str = [":"*n for n in range(1,5)]
if value in slice_str:
# If : notation is used, accept this
valid = True
elif ":" in value:
valid = _split_notation_is_valid(value)
else:
valid = _preview_dimension_singular(value)
else:
valid = _float(value)
return valid
def _split_notation_is_valid(value):
"""Check if the start step stock chunk entries are valid
:param value: The value to check
:return: parameter_valid True if the split notation is valid
"""
if value.count(":") < 4:
# Only allow 4 colons, start stop step block
start_stop_split = value.split(":")
try:
type_list = [pu._dumps(v) for v in start_stop_split if v]
return _typelist(_preview_dimension_singular,
type_list)
except Exception as e:
print(f"There was an error with your slice notation, '{value}'")
return False
def _preview_dimension_singular(value):
""" Check the singular value within the preview dimension"""
valid = False
if _str(value):
string_valid = re.fullmatch("(mid|end|[^a-zA-z])+", value)
# Check that the string does not contain any letters [^a-zA-Z]
# If it does contain letters, mid and end are the only keywords allowed
if string_valid:
try:
# Attempt to evaluate the provided equation
temp_value = _preview_eval(value)
valid = _float(temp_value)
except Exception:
print("There was an error with your dimension value input.")
else:
print('If you are trying to use an expression, '
'please only use mid and end command words.')
else:
valid = _float(value)
return valid
def _preview_eval(value):
""" Evaluate with mid and end"""
mid = 0
end = 0
return eval(value,{"__builtins__":None},{"mid":mid,"end":end})
#Replace this with if list combination contains filepath and h5path e.g. list[filepath, h5path, int] then perform this check
def _check_h5path(filepath, h5path):
""" Check if the internal path is valid"""
with h5py.File(filepath, "r") as hf:
try:
# Hdf5 dataset object
h5path = hf.get(h5path)
if h5path is None:
print("There is no data stored at that internal path.")
else:
# Internal path is valid, check data is present
int_data = np.array(h5path)
if int_data.size >= 1:
return True, ""
except AttributeError:
print("Attribute error.")
except:
print(
Fore.BLUE + "Please choose another interior path."
+ Fore.RESET
)
print("Example interior paths: ")
for group in hf:
for subgroup in hf[group]:
subgroup_str = "/" + group + "/" + subgroup
print(u"\t" + subgroup_str)
raise
return False, "Invalid path %s for file %s" % (h5path, filepath)
def _list(value):
""" A non-empty list """
if isinstance(value, list):
return True
return False
def _dict(value):
""" A dictionary """
return isinstance(value, dict)
def _None(value):
""" None """
return value == None or value == "None"
def _dict_combination(param_name, value, param_def):
dtype = copy.copy(param_def['dtype'])
param_def['dtype'] = 'dict'
# check this is a dictionary
pvalid, error_str = _check_type(param_name, value, param_def)
if not pvalid:
return pvalid, error_str
param_def['dtype'] = dtype
return _check_dict_combination(param_name, value, param_def)
def _check_dict_combination(param_name, value, param_def):
dtype = copy.copy(param_def['dtype'])
dtype = dtype[len('dict'):]
dtype = _find_options(dtype, 'dict', '{', '}', ':')
#special case of empty dict
if not value:
if dtype[0] != "":
error = "The empty dict is not a valid option for %s" % param_name
return False, error
else:
return True, ""
# check there are only two options - for key and for value:
if len(dtype) != 2:
return False, "Incorrect number of dtypes supplied for dictionary"
return _check_dict_entry_dtype(param_name, value, param_def, dtype)
def _check_dict_entry_dtype(param_name, value, param_def, dtype):
""" Check that the dict keys and values are of the correct dtype """
# check the keys
n_vals = len(value.keys())
multi_vals = zip(list([dtype[0]] * n_vals), list(value.keys()))
pvalid, error_str = _is_valid_multi(param_name, param_def, multi_vals)
if not pvalid:
# If the keys are not the correct type, break and return False
return pvalid, error_str
# check the values:
multi_vals = zip(list([dtype[1]] * n_vals), list(value.values()))
return _is_valid_multi(param_name, param_def, multi_vals)
def _options_list(param_name, value, param_def):
"""
There are multiple options of dtype defined in a list.
E.g. dtype: [string, int] # dtype can be a string or an integer
"""
dtype = _find_options(param_def['dtype'])
for atype in dtype:
param_def['dtype'] = atype
pvalid, error_str = is_valid(param_name, value, param_def)
if pvalid:
return pvalid, error_str
return pvalid, _error_message(param_name, dtype)
def _list_combination(param_name, value, param_def):
"""
e.g.
(1) list
(1) list[btype] => any length
(2) list[btype, btype] => fixed length (and btype can be same or different)
- list[int], list[string, string], list[list[string, float], int]
(3) list[filepath, h5path, int]
(4) list[[option1, option2]] = list[option1 AND/OR option2]
"""
dtype = copy.copy(param_def['dtype'])
# is it a list?
param_def['dtype'] = 'list'
pvalid, error_str = _check_type(param_name, value, param_def)
if not pvalid:
return pvalid, error_str
param_def['dtype'] = dtype
return _check_list_combination(param_name, value, param_def)
def _check_list_combination(param_name, value, param_def):
dtype = copy.copy(param_def['dtype'])
# remove outer list from dtype and find separate list entries
dtype = _find_options(dtype[len('list'):])
#special case of empty list
if not value:
if dtype[0] != "":
error = "The empty list is not a valid option for %s" % param_name
return False, error
else:
return True, ""
# list can have any length if btype_list has length 1
if len(dtype) == 1:
dtype = dtype*len(value)
if len(dtype) != len(value):
return False, f"Incorrect number of list entries for {value}. " \
f"The required format is {dtype}"
return _is_valid_multi(param_name, param_def, zip(dtype, value))
def _matched_brackets(string, dtype, bstart, bend):
start_brackets = [m.start() for m in re.finditer(r'\%s' % bstart, string)]
end_brackets = [m.start() for m in re.finditer(r'\%s' % bend, string)]
matched = []
# Match start and end brackets
while(end_brackets):
try:
end = end_brackets.pop(0)
idx = start_brackets.index([s for s in start_brackets if s < end][-1])
start = start_brackets.pop(idx)
extra = len(dtype) if string[start-4:start] == dtype else 0
except IndexError as ie:
raise IndexError(f"Incorrect number of brackets in {string}")
matched.append((start - extra, end))
return matched
def _remove_nested_brackets(matched):
if len(matched) > 1:
for outer in matched[::-1]:
for i in range(len(matched[:-1]))[::-1]:
# Remove if is this bracket inside the outer bracket
if matched[i][0] > outer[0] and matched[i][1] < outer[1]:
matched.pop(i)
return matched
def _find_options(string, dtype='list', bstart="[", bend="]", split=","):
string = string[1:-1]
matched = _matched_brackets(string, dtype, bstart, bend)
# find and remove nested brackets
matched = _remove_nested_brackets(matched)
replace_strings = {}
# replace statements with place holders containing no commas
shift = 0
for i in range(len(matched)):
replace = string[matched[i][0]-shift:matched[i][1]-shift+1]
replacement = '$' + str(i)
replace_strings[replacement] = replace
string = string.replace(replace, replacement)
shift = matched[i][1] - matched[i][0] - 1
options = string.split(split)
# substitute original statements back in
for i in range(len(options)):
if options[i] in replace_strings.keys():
options[i] = replace_strings[options[i]]
return options
def _convert_to_list(value):
return value if isinstance(value, list) else [value]
def _is_valid_multi(param_name, param_def, multi_vals):
dtype = copy.copy(param_def['dtype'])
for atype, val in multi_vals:
param_def['dtype'] = atype
_check_val = pu._dumps(val)
pvalid, error_str = is_valid(param_name, _check_val, param_def)
if not pvalid:
error_str = "The value %s should be of type %s" % (val, atype)
return pvalid, error_str
param_def['dtype'] = dtype
return True, ""
def is_valid(param_name, value, param_def, check=False):
"""Check if the parameter value is a valid data type for the parameter
:param param_name: The name of the parameter
:param value: The new value of the parameter
:param param_def: Parameter definition dictionary, containing e.g.,
description, dtype, default
:return: boolean True if the value is a valid parameter value
"""
original_dtype = copy.copy(param_def['dtype'])
# remove all whitespaces from dtype
param_def['dtype'] = param_def['dtype'].replace(" ", "")
# If a default value is used, this is a valid option
# Don't perform this check when checking the default value itself
if not check:
if _check_default(value, param_def['default']):
return True, ""
dtype = param_def["dtype"]
# If this is parameter tuning, check each individually
if is_multi_param(param_name, value):
return _check_multi_param(param_name, value, param_def)
if not dtype.split('list[')[0]:
pvalid, error_str = _list_combination(param_name, value, param_def)
elif not dtype.split('dict{')[0]:
pvalid, error_str = _dict_combination(param_name, value, param_def)
elif not dtype.split('[')[0] and not dtype.split(']')[-1]:
pvalid, error_str = _options_list(param_name, value, param_def)
else:
pvalid, error_str =_check_type(param_name, value, param_def)
# set dtype back to the original
param_def['dtype'] = original_dtype
return pvalid, error_str
def _check_type(param_name, value, param_def):
"""Check if the provided value matches the required date type
:param param_name: The parameter name
:param value: The new value
:param param_def: Parameter definition dictionary
:return: pvalid, True if the value type matches the required dtype
type_error_str, Error message
"""
dtype = param_def['dtype']
# If this is parameter tuning, check each individually
try:
pvalid = globals()["_" + dtype](value)
except KeyError:
return False, "Unknown dtype '%s'" % dtype
pvalid, opt_err = _check_options(param_def, value, pvalid)
if not pvalid:
return pvalid, opt_err if opt_err \
else _error_message(param_name, dtype)
return True, ""
def _check_multi_param(param_name, value, param_def):
""" Check each multi parameter value individually
:param param_name: The parameter name
:param value: The multi parameter value to check
:param param_def: The dictionary of parameter definitions
:return: pvalid True if the value type matches the required dtype
type_error_str, Error message
"""
val_list, error_str = pu.convert_multi_params(param_name, value)
# incorrect parameter tuning syntax
if error_str:
return False, error_str
for val in val_list:
pvalid, error_str = is_valid(param_name, val, param_def)
if not pvalid:
break
return pvalid, error_str
def is_multi_param(param_name, value):
"""Return True if the value is made up of multiple parameters"""
return (
_str(value) and (";" in value) and param_name != "preview"
)
def _check_default(value, default_value):
"""Return true if the new value is a match for the default
parameter value
"""
default_present = False
if str(default_value) == str(value):
default_present = True
return default_present
def _check_options(param_def, value, pvalid):
"""Check if the input value matches one of the valid parameter options"""
option_error_str = ""
options = param_def.get("options") or {}
if len(options) >= 1:
if value in options or str(value) in options:
pvalid = True
else:
pvalid = False
option_error_str = (
"That does not match one of the required options."
)
option_error_str += Fore.CYAN + "\nThe options are:\n"
option_error_str += "\n".join(str(o) for o in options) + Fore.RESET
return pvalid, option_error_str
def _error_message(param_name, dtype):
"""Create an error message"""
if isinstance(dtype, list):
type_options = "' or '".join(
[str(type_error_dict[t] if t in type_error_dict else t)
for t in dtype]
)
error_str = f"The parameter '{param_name}' does not match" \
f" the options: '{type_options}'."
else:
error_str = f"The parameter '{param_name}' does not match " \
f"the type: '{type_error_dict[dtype]}'."
return error_str
def _gui_error_message(param_name, dtype):
"""Create an error string for the GUI
Remove the paramter name, as the GUI message will be displayed below
each parameter input box
"""
if isinstance(dtype, list):
type_options = "' or '".join([str(t) for t in dtype])
error_str = f"Type must match '{type_options}'."
else:
error_str = f"Type must match '{type_error_dict[dtype]}'."
return error_str
type_error_dict = {
"preview": "preview slices",
"yamlfilepath": "yaml filepath",
"filepath": "filepath",
"h5path" : "hdf5 path",
"filename": "file name",
"nptype": "numpy data type",
"int": "integer",
"bool": "true/false",
"str": "string",
"float": "float/integer",
"list": "list",
"dict": "dict",
"None": "None"
}
def is_valid_dtype(dtype):
"""
Checks if the dtype is defined correctly
"""
if not dtype.split('list[')[0]:
pvalid, error_str = _is_valid_list_combination_type(dtype)
elif not dtype.split('dict{')[0]:
pvalid, error_str = _is_valid_dict_combination_type(dtype)
elif not dtype.split('[')[0] and not dtype.split(']')[-1]:
pvalid, error_str = _is_valid_options_list_type(dtype)
else:
if '_' + dtype in globals().keys():
return True, ""
else:
return "False", "The basic dtype %s does not exist" % dtype
return pvalid, error_str
def _is_valid_list_combination_type(dtype):
if not dtype:
return True, "" # the empty list
if not dtype[-1] == ']':
return False, "List combination is missing a closing bracket."
return is_valid_dtype(dtype[len('list['):-1])
def _is_valid_dict_combination_type(dtype):
if not dtype[-1] == '}':
return False, "Dict combination is missing a closing bracket"
dtype = dtype[len('dict{'):-1]
dtype = _find_options(dtype, 'dict', '{', '}', ':')
for atype in dtype:
pvalid, error_str = is_valid_dtype(atype)
if not pvalid:
break
return pvalid, error_str
def _is_valid_options_list_type(dtype):
if not dtype[-1] == ']':
return False, "Options list is missing a closing bracket."
dtype = _find_options(dtype)
for atype in dtype:
pvalid, error_str = is_valid_dtype(atype)
if not pvalid:
break
return pvalid, error_str
|
# Update the area of the bathroom area to be 10.50 square meters instead of 9.50.
# Make the areas list more trendy! Change "living room" to "chill zone".
# Create the areas list
areas = ["hallway", 11.25, "kitchen", 18.0, "living room", 20.0, "bedroom", 10.75, "bathroom", 9.50]
# Correct the bathroom area
areas[-1] = 10.50
# Change "living room" to "chill zone"
areas[4] = "chill zone"
|
import gc
import uasyncio as asyncio
METHODS = ["GET", "POST", "PUT", "DELETE"]
class Request:
def __init__(self, request_string):
lines = request_string.split("\r\n")
t = lines[0].split(" ")
try:
self.method = t[0]
self.url = t[1]
self.protocol = t[2]
except IndexError:
print("request empty")
self.headers = []
for line in lines[1:]:
if line == "":
break
t = line.split(":")
self.headers.append({t[0]: t[1]})
# parse params in url
t = self.url.split("?")
try:
self.route = t[0]
params = t[1]
self.params = []
param_lines = params.split("&")
for p in param_lines:
k, v = p.split("=")
self.params.append({k: v})
except ValueError:
# no params in request, url includes ?
self.route = self.url[:-1]
self.params = []
except IndexError:
# no params in request
self.route = self.url
self.params = []
self.body = ""
flag = False
for line in lines:
if line == "":
flag = True
if flag:
self.body += line + "\r\n"
def __str__(self):
ret = "method:{}\r\nURL:{}\r\n".format(self.method, self.url)
for h in self.headers:
ret += "{}\r\n".format(h)
return ret
class Response:
def __init__(self, **kwargs):
self.protocol = "HTTP/1.1"
self.status = kwargs.get("status", 500)
self.reason = kwargs.get("reason", "")
self.headers = kwargs.get("headers", {})
self.body = kwargs.get("body", "")
def to_bytes(self):
yield "{} {} {}\n".format(self.protocol, self.status, self.reason).encode()
for k, v in self.headers.items():
yield "{}:{}\n".format(k, v).encode()
yield "\n".encode()
if isinstance(self.body, str):
yield (str(self.body)).encode()
else:
buf = bytearray(1024)
while True:
gc.collect()
size = self.body.readinto(buf)
if size == 0:
break
yield bytes(buf[:size])
class Server:
def __init__(self, lock):
self.routes = []
self.lock = lock
async def handler(self, reader, writer):
await self.lock.acquire()
request_bytes = await reader.read(-1)
req = Request(request_bytes.decode())
addr = reader.get_extra_info("peername")
print("{}: '{}' from {}".format(req.method, req.route, addr[0]))
resp = Response(status=404)
for r in self.routes:
if r["route"] == req.route:
view = r.get("view")
func = r.get("func")
asfc = r.get("asfc")
if view is not None:
resp = view.runMethod(req)
print("View: {}".format(r["view"]))
elif func is not None:
if req.method in r["methods"]:
print("Func {}".format(r["func"]))
resp = func(req)
else:
resp = Response(status=405)
elif asfc is not None:
if req.method in r["methods"]:
print("Async func {}".format(r["asfc"]))
resp = await asfc(req)
else:
resp = Response(status=405)
for b in resp.to_bytes():
await writer.awrite(b)
await writer.aclose()
await reader.aclose()
self.lock.release()
def register_view(self, view, route):
"""set function for foute
"""
print("registering {}".format({"view": str(view), "route": route}))
self.routes.append({"view": view, "route": route})
def register_func(self, func, route, methods):
print(
"registering {}".format(
{"func": str(func), "route": route, "methods": methods}
)
)
self.routes.append({"func": func, "route": route, "methods": methods})
def register_afunc(self, async_func, route, methods):
print(
"registering {}".format(
{"asfc": str(async_func), "route": route, "methods": methods}
)
)
self.routes.append({"asfc": async_func, "route": route, "methods": methods})
def run(self):
task = asyncio.start_server(self.handler, "0.0.0.0", 80, backlog=2)
loop = asyncio.get_event_loop()
loop.create_task(task)
loop.run_forever()
class View:
description = "default view"
def runMethod(self, request: Request):
method = request.method
if method == "HEAD":
return Response(status=200)
if method not in METHODS:
return Response(status=501)
if method == "GET":
return self.get(request)
if method == "POST":
return self.post(request)
if method == "PUT":
return self.put(request)
if method == "DELETE":
return self.delete(request)
def get(self, request):
return Response(status=405)
def post(self, request):
return Response(status=405)
def put(self, request):
return Response(status=405)
def delete(self, request):
return Response(status=405)
def __str__(self):
return self.description
|
# if stmt
s = int(input("Input grade => "))
if s >= 60:
print("Pass!")
else:
print("Not pass!")
|
from decimal import Decimal
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from couchdbkit.exceptions import ResourceNotFound
from memoized import memoized
from casexml.apps.case.cleanup import close_case
from casexml.apps.case.models import CommCareCase
from casexml.apps.stock.consumption import (
ConsumptionConfiguration,
ConsumptionHelper,
)
from casexml.apps.stock.models import DocDomainMapping
from couchforms.signals import xform_archived, xform_unarchived
from dimagi.ext.couchdbkit import *
from corehq.apps.cachehq.mixins import QuickCachedDocumentMixin
from corehq.apps.consumption.shortcuts import get_default_monthly_consumption
from corehq.apps.domain.dbaccessors import get_docs_in_domain_by_class
from corehq.apps.domain.models import Domain
from corehq.apps.domain.signals import commcare_domain_pre_delete
from corehq.apps.locations.models import SQLLocation
from corehq.apps.products.models import SQLProduct
from corehq.form_processor.change_publishers import publish_ledger_v1_saved
from corehq.form_processor.interfaces.supply import SupplyInterface
from corehq.util.quickcache import quickcache
from . import const
from .const import StockActions
STOCK_ACTION_ORDER = [
StockActions.RECEIPTS,
StockActions.CONSUMPTION,
StockActions.STOCKONHAND,
StockActions.STOCKOUT,
]
class CommtrackActionConfig(DocumentSchema):
# one of the base stock action types (see StockActions enum)
action = StringProperty()
# (optional) to further distinguish different kinds of the base action
# (i.e., separately tracking consumption as 'dispensed' or 'lost'). note that when the system
# infers consumption/receipts from reported stock, it will be marked here as a subaction
subaction = StringProperty()
# sms code
_keyword = StringProperty()
# display title
caption = StringProperty()
@classmethod
def wrap(cls, data):
if 'action_type' in data:
data['action'] = data['action_type']
del data['action_type']
if 'name' in data:
if data['name'] == 'lost':
data['subaction'] = 'loss'
del data['name']
return super(CommtrackActionConfig, cls).wrap(data)
def __repr__(self):
return '{action} ({subaction}): {caption} ({_keyword})'.format(**self._doc)
@property
def keyword(self):
return self._keyword
@keyword.setter
def keyword(self, val):
self._keyword = val.lower() if val else None
@property
def name(self):
return ':'.join(filter(None, [self.action, self.subaction]))
@property
def is_stock(self):
return self.action in STOCK_ACTION_ORDER
class ConsumptionConfig(DocumentSchema):
min_transactions = IntegerProperty(default=2)
min_window = IntegerProperty(default=10)
optimal_window = IntegerProperty()
use_supply_point_type_default_consumption = BooleanProperty(default=False)
exclude_invalid_periods = BooleanProperty(default=False)
class StockLevelsConfig(DocumentSchema):
emergency_level = DecimalProperty(default=0.5) # in months
understock_threshold = DecimalProperty(default=1.5) # in months
overstock_threshold = DecimalProperty(default=3) # in months
class AlertConfig(DocumentSchema):
stock_out_facilities = BooleanProperty(default=False)
stock_out_commodities = BooleanProperty(default=False)
stock_out_rates = BooleanProperty(default=False)
non_report = BooleanProperty(default=False)
class StockRestoreConfig(DocumentSchema):
section_to_consumption_types = DictProperty()
force_consumption_case_types = ListProperty()
use_dynamic_product_list = BooleanProperty(default=False)
@classmethod
def wrap(cls, obj):
# todo: remove this cruft at some point
if 'force_to_consumption_case_types' in obj:
realval = obj['force_to_consumption_case_types']
oldval = obj.get('force_consumption_case_types')
if realval and not oldval:
obj['force_consumption_case_types'] = realval
del obj['force_to_consumption_case_types']
return super(StockRestoreConfig, cls).wrap(obj)
class CommtrackConfig(QuickCachedDocumentMixin, Document):
domain = StringProperty()
# supported stock actions for this commtrack domain
# listed in the order they are processed -- TODO support for this custom ordering might go away
actions = SchemaListProperty(CommtrackActionConfig)
# TODO must catch ambiguous action lists (two action configs with the same 'name')
# configured on Advanced Settings page
use_auto_emergency_levels = BooleanProperty(default=False)
sync_consumption_fixtures = BooleanProperty(default=False)
use_auto_consumption = BooleanProperty(default=False)
consumption_config = SchemaProperty(ConsumptionConfig)
stock_levels_config = SchemaProperty(StockLevelsConfig)
ota_restore_config = SchemaProperty(StockRestoreConfig)
individual_consumption_defaults = BooleanProperty(default=False)
# configured on Subscribe Sms page
alert_config = SchemaProperty(AlertConfig)
def clear_caches(self):
super(CommtrackConfig, self).clear_caches()
self.for_domain.clear(self.__class__, self.domain)
@classmethod
@quickcache(vary_on=['domain'])
def for_domain(cls, domain):
result = get_docs_in_domain_by_class(domain, cls)
try:
return result[0]
except IndexError:
return None
@property
def all_actions(self):
return self.actions
def action_by_keyword(self, keyword):
return dict((a.keyword.lower(), a) for a in self.actions).get(keyword.lower())
def get_consumption_config(self):
def _default_monthly_consumption(case_id, product_id):
# note: for now as an optimization hack, per-supply point type is not supported
# unless explicitly configured, because it will require looking up the case
facility_type = None
if self.consumption_config.use_supply_point_type_default_consumption:
try:
supply_point = SupplyInterface(self.domain).get_supply_point(case_id)
facility_type = supply_point.sql_location.location_type_name
except ResourceNotFound:
pass
return get_default_monthly_consumption(self.domain, product_id, facility_type, case_id)
return ConsumptionConfiguration(
min_periods=self.consumption_config.min_transactions,
min_window=self.consumption_config.min_window,
max_window=self.consumption_config.optimal_window,
default_monthly_consumption_function=_default_monthly_consumption,
exclude_invalid_periods=self.consumption_config.exclude_invalid_periods
)
def get_ota_restore_settings(self):
# for some reason it doesn't like this import
from casexml.apps.phone.restore import StockSettings
default_product_ids = []
if self.ota_restore_config.use_dynamic_product_list:
default_product_ids = SQLProduct.active_objects.filter(domain=self.domain).product_ids()
case_filter = lambda stub: stub.type in set(self.ota_restore_config.force_consumption_case_types)
return StockSettings(
section_to_consumption_types=self.ota_restore_config.section_to_consumption_types,
consumption_config=self.get_consumption_config(),
default_product_list=default_product_ids,
force_consumption_case_filter=case_filter,
sync_consumption_ledger=self.sync_consumption_fixtures
)
@receiver(commcare_domain_pre_delete)
def clear_commtrack_config_cache(domain, **kwargs):
config = CommtrackConfig.for_domain(domain.name)
if config:
config.delete()
def force_int(value):
if value is None:
return None
else:
return int(value)
def force_bool(value):
if value is None:
return None
elif value is 'false':
return False
else:
return bool(value)
def force_empty_string_to_null(value):
if value == '':
return None
else:
return value
class SupplyPointCase(CommCareCase):
"""
A wrapper around CommCareCases to get more built in functionality
specific to supply points.
"""
location_id = StringProperty()
class Meta(object):
# This is necessary otherwise couchdbkit will confuse this app with casexml
app_label = "commtrack"
@property
@memoized
def sql_location(self):
return SQLLocation.objects.get(location_id=self.location_id)
location = sql_location
UNDERSTOCK_THRESHOLD = 0.5 # months
OVERSTOCK_THRESHOLD = 2. # months
DEFAULT_CONSUMPTION = 10. # per month
class ActiveManager(models.Manager):
"""
Filter any object that is associated to an archived product
"""
def get_queryset(self):
return super(ActiveManager, self).get_queryset() \
.exclude(sql_product__is_archived=True) \
.exclude(sql_location__is_archived=True)
class StockState(models.Model):
"""
Read only reporting model for keeping computed stock states per case/product
"""
section_id = models.CharField(max_length=100, db_index=True)
case_id = models.CharField(max_length=100, db_index=True)
product_id = models.CharField(max_length=100, db_index=True)
stock_on_hand = models.DecimalField(max_digits=20, decimal_places=5, default=Decimal(0))
daily_consumption = models.DecimalField(max_digits=20, decimal_places=5, null=True)
last_modified_date = models.DateTimeField()
last_modified_form_id = models.CharField(max_length=100, null=True)
sql_product = models.ForeignKey(SQLProduct, on_delete=models.CASCADE)
sql_location = models.ForeignKey(SQLLocation, null=True, on_delete=models.CASCADE)
# override default model manager to only include unarchived data
objects = ActiveManager()
# leave a way to get unfiltered data
include_archived = models.Manager()
@property
def entry_id(self):
return self.product_id
@property
def location_id(self):
return self.sql_location.location_id if self.sql_location else None
@property
def balance(self):
return self.stock_on_hand
@property
def ledger_reference(self):
from corehq.form_processor.parsers.ledgers.helpers import UniqueLedgerReference
return UniqueLedgerReference(
case_id=self.case_id, section_id=self.section_id, entry_id=self.product_id
)
@property
@memoized
def consumption_helper(self):
return ConsumptionHelper(
domain=self.get_domain(),
case_id=self.case_id,
section_id=self.section_id,
entry_id=self.product_id,
daily_consumption=self.daily_consumption,
balance=self.balance,
sql_location=self.sql_location,
)
@property
def months_remaining(self):
return self.consumption_helper.get_months_remaining()
@property
def resupply_quantity_needed(self):
return self.consumption_helper.get_resupply_quantity_needed()
@property
def stock_category(self):
return self.consumption_helper.get_stock_category()
@property
@memoized
def domain(self):
try:
domain_name = self.__domain
if domain_name:
return domain_name
except AttributeError:
pass
try:
return DocDomainMapping.objects.get(doc_id=self.case_id).domain_name
except DocDomainMapping.DoesNotExist:
return CommCareCase.get(self.case_id).domain
@memoized
def get_domain(self):
return Domain.get_by_name(self.domain)
def get_daily_consumption(self):
return self.consumption_helper.get_daily_consumption()
def get_monthly_consumption(self):
return self.consumption_helper.get_monthly_consumption()
def to_json(self):
from corehq.form_processor.serializers import StockStateSerializer
serializer = StockStateSerializer(self)
return dict(serializer.data)
class Meta(object):
app_label = 'commtrack'
unique_together = ('section_id', 'case_id', 'product_id')
def close_supply_point_case(domain, supply_point_id):
if supply_point_id:
close_case(
supply_point_id,
domain,
const.COMMTRACK_USERNAME,
__name__ + ".close_supply_point_case",
)
def _reopen_or_create_supply_point(location):
from .helpers import update_supply_point_from_location
supply_point = SupplyInterface(location.domain).get_closed_and_open_by_location_id_and_domain(
location.domain,
location.location_id
)
if supply_point:
if supply_point and supply_point.closed:
transactions = supply_point.get_closing_transactions()
for transaction in transactions:
transaction.form.archive(user_id=const.COMMTRACK_USERNAME)
update_supply_point_from_location(supply_point, location)
return supply_point
else:
return SupplyInterface.create_from_location(location.domain, location)
def sync_supply_point(location, is_deletion=False):
"""Called on location save() or delete(). Updates the supply_point_id if appropriate"""
domain_obj = Domain.get_by_name(location.domain)
if not domain_obj.commtrack_enabled:
return None
if location.location_type.administrative or is_deletion:
close_supply_point_case(location.domain, location.supply_point_id)
location.supply_point_id = None
elif location.is_archived:
close_supply_point_case(location.domain, location.supply_point_id)
else:
updated_supply_point = _reopen_or_create_supply_point(location)
location.supply_point_id = updated_supply_point.case_id
@receiver(post_save, sender=StockState)
def update_domain_mapping(sender, instance, *args, **kwargs):
case_id = str(instance.case_id)
try:
domain_name = instance.__domain
if not domain_name:
raise ValueError()
except (AttributeError, ValueError):
domain_name = CommCareCase.get(case_id).domain
if not DocDomainMapping.objects.filter(doc_id=case_id).exists():
mapping = DocDomainMapping(
doc_id=case_id,
doc_type='CommCareCase',
domain_name=domain_name,
)
mapping.save()
@receiver(post_save, sender=StockState)
def publish_stock_state_to_kafka_on_save(sender, instance, *args, **kwargs):
publish_ledger_v1_saved(instance)
@receiver(post_delete, sender=StockState)
def publish_stock_state_to_kafka_on_delete(sender, instance, *args, **kwargs):
publish_ledger_v1_saved(instance, deleted=True)
@receiver(xform_archived)
def remove_data(sender, xform, *args, **kwargs):
from corehq.form_processor.interfaces.processor import FormProcessorInterface
FormProcessorInterface(xform.domain).ledger_processor.process_form_archived(xform)
@receiver(xform_unarchived)
def reprocess_form(sender, xform, *args, **kwargs):
from corehq.form_processor.interfaces.processor import FormProcessorInterface
FormProcessorInterface(xform.domain).ledger_processor.process_form_unarchived(xform)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-10-18 08:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cms', '0016_auto_20160608_1535'),
]
operations = [
migrations.CreateModel(
name='HubspotCTA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name of the CTA. This is just to identify the CTA when you want to place it into a page.', max_length=300)),
('embed_code', models.TextField()),
],
options={
'verbose_name': 'Hubspot CTA',
'verbose_name_plural': 'Hubspot CTAs',
},
),
migrations.CreateModel(
name='HubspotCTAPluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_hubspot_hubspotctapluginmodel', serialize=False, to='cms.CMSPlugin')),
('cta', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='djangocms_hubspot.HubspotCTA')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.CreateModel(
name='HubspotForm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Name of the form. This is just to identify the form when you want to place it into a page.', max_length=300)),
('embed_code', models.TextField()),
],
options={
'verbose_name': 'Hubspot form',
'verbose_name_plural': 'Hubspot forms',
},
),
migrations.CreateModel(
name='HubspotFormPluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='djangocms_hubspot_hubspotformpluginmodel', serialize=False, to='cms.CMSPlugin')),
('form', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='djangocms_hubspot.HubspotForm')),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
from ptrlib.util.encoding import *
from ptrlib.util.packing import *
from ptrlib.util.logic import *
from ptrlib.util.construct import *
from ptrlib.util.opebinary import *
from ptrlib.util.misc import *
|
from enum import Enum
class BaseEnum(Enum):
@classmethod
def list(cls):
return list(map(lambda c: c.value, cls))
class SentimentType(BaseEnum):
POS = 'pos'
NEG = 'neg'
NEU = 'neu'
UNK = 'unk'
class SourceType(BaseEnum):
TWITTER = 'twitter'
INSTAGRAM = 'instagram'
WEB = 'web'
|
import unittest
from unittest.mock import patch
from andonapp import AndonAppClient
from andonapp.exceptions import *
class TestAndonClient(unittest.TestCase):
def setUp(self):
self.endpoint = 'https://portal.andonapp.com/public/api/v1'
self.report_data_url = self.endpoint + '/data/report'
self.update_status_url = self.endpoint + '/station/update'
self.org_name = 'Demo'
self.api_token = 'api-token'
self.client = AndonAppClient(self.org_name, self.api_token)
self.headers = {
'Content-Type': 'application/json; charset=utf-8',
'Authorization': 'Bearer ' + self.api_token
}
@patch('requests.post')
def test_report_data_when_valid_pass_request(self, mock_post):
self._expect_post(mock_post, 200, {})
request = {
'orgName': self.org_name,
'lineName': 'line 1',
'stationName': 'station 1',
'passResult': 'PASS',
'processTimeSeconds': 100,
'failReason': None,
'failNotes': None
}
self.client.report_data(line_name=request['lineName'],
station_name=request['stationName'],
pass_result=request['passResult'],
process_time_seconds=request['processTimeSeconds'])
self._assert_post_called(mock_post, self.report_data_url, request)
@patch('requests.post')
def test_report_data_when_valid_fail_request(self, mock_post):
self._expect_post(mock_post, 200, {})
request = {
'orgName': self.org_name,
'lineName': 'line 1',
'stationName': 'station 1',
'passResult': 'PASS',
'processTimeSeconds': 200,
'failReason': 'Test Failure',
'failNotes': 'notes'
}
self.client.report_data(line_name=request['lineName'],
station_name=request['stationName'],
pass_result=request['passResult'],
process_time_seconds=request['processTimeSeconds'],
fail_reason=request['failReason'],
fail_notes=request['failNotes'])
self._assert_post_called(mock_post, self.report_data_url, request)
@patch('requests.post')
def test_fail_report_data_when_missing_line_name(self, mock_post):
self._expect_post(mock_post, 400, {
'errorType': 'INVALID_REQUEST',
'errorMessage': 'lineName may not be empty'
})
request = {
'orgName': self.org_name,
'lineName': None,
'stationName': 'station 1',
'passResult': 'PASS',
'processTimeSeconds': 100,
'failReason': None,
'failNotes': None
}
with self.assertRaises(AndonInvalidRequestException) as context:
self.client.report_data(line_name=request['lineName'],
station_name=request['stationName'],
pass_result=request['passResult'],
process_time_seconds=request['processTimeSeconds'],
fail_reason=request['failReason'],
fail_notes=request['failNotes'])
self._assert_post_called(mock_post, self.report_data_url, request)
@patch('requests.post')
def test_fail_report_data_when_station_not_found(self, mock_post):
self._expect_post(mock_post, 400, {
'errorType': 'RESOURCE_NOT_FOUND',
'errorMessage': 'Station not found.'
})
request = {
'orgName': self.org_name,
'lineName': 'line 1',
'stationName': 'station 1',
'passResult': 'PASS',
'processTimeSeconds': 100,
'failReason': None,
'failNotes': None
}
with self.assertRaises(AndonResourceNotFoundException) as context:
self.client.report_data(line_name=request['lineName'],
station_name=request['stationName'],
pass_result=request['passResult'],
process_time_seconds=request['processTimeSeconds'],
fail_reason=request['failReason'],
fail_notes=request['failNotes'])
self._assert_post_called(mock_post, self.report_data_url, request)
@patch('requests.post')
def test_fail_report_data_when_invalid_pass_result(self, mock_post):
self._expect_post(mock_post, 400, {
'errorType': 'INVALID_REQUEST',
'errorMessage': "'PAS' is not a valid pass result."
})
request = {
'orgName': self.org_name,
'lineName': 'line 1',
'stationName': 'station 1',
'passResult': 'PAS',
'processTimeSeconds': 100,
'failReason': None,
'failNotes': None
}
with self.assertRaises(AndonInvalidRequestException) as context:
self.client.report_data(line_name=request['lineName'],
station_name=request['stationName'],
pass_result=request['passResult'],
process_time_seconds=request['processTimeSeconds'],
fail_reason=request['failReason'],
fail_notes=request['failNotes'])
self._assert_post_called(mock_post, self.report_data_url, request)
@patch('requests.post')
def test_fail_report_data_when_unauthorized(self, mock_post):
self._expect_post(mock_post, 401, {
'timestamp': '2018-03-07T16:15:19.033+0000',
'status': 401,
'error': 'Unauthorized',
'message': 'Unauthorized',
'path': '/public/api/v1/data/report'
})
request = {
'orgName': self.org_name,
'lineName': 'line 1',
'stationName': 'station 1',
'passResult': 'PASS',
'processTimeSeconds': 100,
'failReason': None,
'failNotes': None
}
with self.assertRaises(AndonUnauthorizedRequestException) as context:
self.client.report_data(line_name=request['lineName'],
station_name=request['stationName'],
pass_result=request['passResult'],
process_time_seconds=request['processTimeSeconds'],
fail_reason=request['failReason'],
fail_notes=request['failNotes'])
self._assert_post_called(mock_post, self.report_data_url, request)
@patch('requests.post')
def test_fail_report_data_when_unknown_failure(self, mock_post):
self._expect_post(mock_post, 404, {})
request = {
'orgName': self.org_name,
'lineName': 'line 1',
'stationName': 'station 1',
'passResult': 'PASS',
'processTimeSeconds': 100,
'failReason': None,
'failNotes': None
}
with self.assertRaises(AndonAppException) as context:
self.client.report_data(line_name=request['lineName'],
station_name=request['stationName'],
pass_result=request['passResult'],
process_time_seconds=request['processTimeSeconds'],
fail_reason=request['failReason'],
fail_notes=request['failNotes'])
self._assert_post_called(mock_post, self.report_data_url, request)
@patch('requests.post')
def test_update_station_status_success(self, mock_post):
self._expect_post(mock_post, 200, {})
request = {
'orgName': self.org_name,
'lineName': 'line 1',
'stationName': 'station 1',
'statusColor': 'YELLOW',
'statusReason': 'Missing Parts',
'statusNotes': 'notes'
}
self.client.update_station_status(line_name=request['lineName'],
station_name=request['stationName'],
status_color=request['statusColor'],
status_reason=request['statusReason'],
status_notes=request['statusNotes'])
self._assert_post_called(mock_post, self.update_status_url, request)
@patch('requests.post')
def test_update_station_status_to_green_when_valid(self, mock_post):
self._expect_post(mock_post, 200, {})
request = {
'orgName': self.org_name,
'lineName': 'line 1',
'stationName': 'station 1',
'statusColor': 'GREEN',
'statusReason': None,
'statusNotes': None
}
self.client.update_station_status(line_name=request['lineName'],
station_name=request['stationName'],
status_color=request['statusColor'],
status_reason=request['statusReason'],
status_notes=request['statusNotes'])
self._assert_post_called(mock_post, self.update_status_url, request)
def _expect_post(self, mock, status_code, response):
mock.return_value.status_code = status_code
mock.return_value.json = lambda: response
def _assert_post_called(self, mock, url, request):
mock.assert_called_with(url, json=request, headers=self.headers)
|
# -*- coding: utf-8 -*-
import os
import glob
import socket
import socks
import paramiko
import zipfile
import logging
import threading
import shutil
import sshtunnel
import unicodedata
import pickle
from datetime import datetime
# Global variables
# ---------------------------------------------------------------------
CHUNK_SIZE = 100 # number of files to process
LOG_PATH = "C:\\Users\\WMurphy\\PycharmProjects\\DataProcessingPipeline\\logs\\SFTP_LOG.log"
LOCK = threading.Lock() # lock to ensure thread safety
def sftp_log(path, err_file):
fmt_str = '%(asctime)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt_str)
logging.basicConfig(filename=path, level=logging.INFO, format=fmt_str)
logging.info("Test of logging info.")
logging.critical("Test of logging critical.")
logging.exception("Test of logging exceptions.")
logging.error("File: {} could not be read".format(err_file))
class SftpCredentials(object):
"""
SftpCredentials
==============
Stores the sftp credentials that will be used
to create an sftp connection.
"""
def __init__(self):
# private attributes
# ------------------
self.__username = None
self.__password = None
self.__host = None
self.__port = None
self.__local_path = None
self.__remote_path = None
@property
def username__(self):
pass
@username__.setter
def username__(self, un):
self.__username = un
@username__.getter
def username__(self):
return self.__username
@username__.deleter
def username__(self):
del self.__username
@property
def password__(self):
pass
@password__.setter
def password__(self, pw):
self.__password = pw
@password__.getter
def password__(self):
return self.__password
@password__.deleter
def password__(self):
del self.__password
@property
def host__(self):
pass
@host__.setter
def host__(self, h):
self.__host = h
@host__.getter
def host__(self):
return self.__host
@host__.deleter
def host__(self):
del self.__host
@property
def port__(self):
pass
@port__.setter
def port__(self, p):
self.__port = p
@port__.getter
def port__(self):
return self.__port
@port__.deleter
def port__(self):
del self.__port
@property
def local_path__(self):
pass
@local_path__.setter
def local_path__(self, lp):
self.__local_path = lp
@local_path__.getter
def local_path__(self):
return self.__local_path
@local_path__.deleter
def local_path__(self):
del self.__local_path
@property
def remote_path__(self):
pass
@remote_path__.setter
def remote_path__(self, rp):
self.__remote_path = rp
@remote_path__.getter
def remote_path__(self):
return self.__remote_path
@remote_path__.deleter
def remote_path__(self):
del self.__remote_path
class DataTransfer:
"""
DataTransfer
============
Data that will be passed to the remote server.
"""
def __init__(self):
self.__name = None
self.__archive = None
self.__directory = None
self.__ext_type = None
self.files_ = []
self.processed_files = 0
self.zipped_files = 0
@property
def name__(self):
pass
@name__.setter
def name__(self, n):
self.__name = n
@name__.getter
def name__(self):
return self.__name
@name__.deleter
def name__(self):
del self.__name
@property
def archive__(self):
pass
@archive__.setter
def archive__(self, ar):
if self.name__ is not None:
self.__archive = None
@archive__.getter
def archive__(self):
return self.__archive
@archive__.deleter
def archive__(self):
del self.__archive
@property
def directory__(self):
pass
@directory__.setter
def directory__(self, d):
self.__directory = d
@directory__.getter
def directory__(self):
return self.__directory
@directory__.deleter
def directory__(self):
del self.__directory
@property
def ext_type__(self):
pass
@ext_type__.setter
def ext_type__(self, ext_type):
self.__ext_type = ext_type
@ext_type__.getter
def ext_type__(self):
return self.__ext_type
@ext_type__.deleter
def ext_type__(self):
del self.__ext_type
def files__(self):
try:
if self.directory__ is not None:
os.chdir(self.directory__)
self.files_ = glob.glob('*.{}'.format(self.ext_type__))
print("Number of docx files: {}".format(len(self.files_)))
else:
raise OSError('OSError: Directory, {} not found'.format(self.directory__))
except OSError as e:
print(e)
finally:
pass
def copy_files(self, local_dir):
"""
Copy files to a local directory.
:return:
"""
try:
if len(self.files_) > 0:
if os.path.exists(local_dir):
for f in self.files_:
try:
shutil.copy(f, local_dir)
self.processed_files += 1
except:
sftp_log(LOG_PATH, f)
finally:
pass
except ValueError:
print("No files found.")
finally:
pass
def build_archive(self, file_path, archive_name, extension):
"""
Create an archived directory to transfer data.
:param archive_location:
:return:
"""
archive = zipfile.ZipFile(archive_name, 'w')
if os.path.exists(file_path):
os.chdir(file_path)
print("Current Directory: {}".format(file_path))
file_counter = 0
files = glob.glob('*.{}'.format(extension))
if len(files) > 0:
for f in files:
try:
archive.write(os.path.join(os.getcwd(), f))
file_counter += 1
except:
print("WriteError: error writing {} to archive".format(files))
finally:
print("Number of files added to archive: {}".format(file_counter))
else:
return "No files of extension {} found".format(extension)
class SftpConnection(SftpCredentials, DataTransfer):
"""
SftpConnection
==============
Create a secure file transfer protocol connection.
SftpConnection inherits from the SftpCredentials
superclass.
"""
def __init__(self):
SftpCredentials.__init__(self) # initialize super-class
DataTransfer.__init__(self) # initialize super-class
self.sock = socks.socksocket() # current socket connection
self.sftp = None
self.client = None
self.is_connected = False
def connect(self):
"""
Connect to remote server.
:return:
"""
try:
assert self.host__ is not None
assert self.port__ is not None
assert self.username__ is not None
assert self.password__ is not None
#print("host = {}".format(self.host__()))
# set proxy connection values
self.sock.set_proxy(
proxy_type=None,
addr=self.host__,
port=self.port__,
username=self.username__,
password=self.password__
)
# connect the socket
self.sock.connect((self.host__, self.port__))
if socket.gethostname() is not None:
print("Connection Successful:\nHost: {}".format(socket.gethostname()))
# create transport
self.sftp = paramiko.Transport(self.sock)
try:
self.sftp.connect(
username=self.username__,
password=self.password__
)
if self.sftp.is_alive():
print("Transport is live.")
self.is_connected = True
# create client
self.client = paramiko.SFTPClient.from_transport(self.sftp)
# load in the files to be transferred
assert self.directory__ is not None
assert len(self.files_) > 0
#self.copy_files("C:\\Users\\WMurphy\\PycharmProjects\\DataProcessingPipeline\\docx_files")
self.build_archive('C:\\Users\\WMurphy\\PycharmProjects\\DataProcessingPipeline')
except:
pass
except:
pass
finally:
pass
|
from itertools import chain
import numpy as np
from c3nav.mapdata.render.engines import register_engine
from c3nav.mapdata.render.engines.base3d import Base3DEngine
@register_engine
class STLEngine(Base3DEngine):
filetype = 'stl'
facet_template = (b' facet normal %f %f %f\n'
b' outer loop\n'
b' vertex %.3f %.3f %.3f\n'
b' vertex %.3f %.3f %.3f\n'
b' vertex %.3f %.3f %.3f\n'
b' endloop\n'
b' endfacet')
def _create_facet(self, facet) -> bytes:
return self.facet_template % tuple(facet.flatten())
def render(self, filename=None) -> bytes:
facets = np.vstack(chain(*(chain(*v.values()) for v in self.vertices.values())))
facets = np.hstack((np.cross(facets[:, 1]-facets[:, 0], facets[:, 2]-facets[:, 1]).reshape((-1, 1, 3)),
facets))
return (b'solid c3nav_export\n' +
b'\n'.join((self._create_facet(facet) for facet in facets)) +
b'\nendsolid c3nav_export\n')
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Computes lifts between one feature and a set of categorical features.
We define the feature value lift(x_i, y_i) for features X and Y as:
P(Y=y_i|X=x_i) / P(Y=y_i)
This quantitatively captures the notion of probabilistic independence, such that
when X and Y are independent, the lift will be 1. It also indicates the degree
to which the presence of x_i increases or decreases the probablity of the
presence of y_i. When X or Y is multivalent, the expressions `X=x_i` and `Y=y_i`
are intepreted as the set membership checks, `x_i \in X` and `y_i \in Y`.
When Y is a label and Xs are the set of categorical features, lift can be used
to assess feature importance. However, in the presence of correlated features,
because lift is computed independently for each feature, it will not be a
reliable indicator of the expected impact on model quality from adding or
removing that feature.
This TransformStatsGenerator computes feature value lift for all pairs of X and
Y, where Y is a single, user-configured feature and X is either a manually
specified list of features, or all categorical features in the provided schema.
"""
import collections
import datetime
import operator
from typing import Any, Dict, Hashable, Iterator, Iterable, List, Optional, Sequence, Text, Tuple, TypeVar, Union
import apache_beam as beam
from apache_beam.utils import shared
import numpy as np
import pyarrow as pa
import six
from tensorflow_data_validation import constants
from tensorflow_data_validation import types
from tensorflow_data_validation.arrow import arrow_util
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.utils import bin_util
from tensorflow_data_validation.utils import schema_util
from tensorflow_data_validation.utils import stats_util
from tensorflow_data_validation.utils.example_weight_map import ExampleWeightMap
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
# TODO(b/170996403): Switch to`collections.namedtuple` or `typing.NamedTuple`
# once the Spark issue is resolved.
from tfx_bsl.types import tfx_namedtuple # pylint: disable=g-bad-import-order
_XType = Union[Text, bytes]
_YType = Union[Text, bytes, int]
_CountType = Union[int, float]
_JoinKeyType = TypeVar('_JoinKeyType')
_LeftJoinValueType = TypeVar('_LeftJoinValueType')
_RightJoinValueType = TypeVar('_RightJoinValueType')
_SlicedYKey = tfx_namedtuple.TypedNamedTuple('_SlicedYKey',
[('slice_key', types.SliceKey),
('y', _YType)])
# TODO(embr,zhuo): FeaturePathTuple is used instead of FeaturePath because:
# - FeaturePath does not have a deterministic coder
# - Even if it does, beam does not automatically derive a coder for a
# NamedTuple.
# Once the latter is supported we can change all FEaturePathTuples back to
# FeaturePaths.
_SlicedXKey = tfx_namedtuple.TypedNamedTuple(
'_SlicedXKey', [('slice_key', types.SliceKey),
('x_path', types.FeaturePathTuple), ('x', _XType)])
_SlicedXYKey = tfx_namedtuple.TypedNamedTuple(
'_SlicedXYKey', [('slice_key', types.SliceKey),
('x_path', types.FeaturePathTuple), ('x', _XType),
('y', _YType)])
_LiftSeriesKey = tfx_namedtuple.TypedNamedTuple(
'_LiftSeriesKey', [('slice_key', types.SliceKey),
('x_path', types.FeaturePathTuple), ('y', _YType),
('y_count', _CountType)])
_SlicedFeatureKey = tfx_namedtuple.TypedNamedTuple(
'_SlicedFeatureKey', [('slice_key', types.SliceKey),
('x_path', types.FeaturePathTuple)])
_ConditionalYRate = tfx_namedtuple.TypedNamedTuple(
'_ConditionalYRate', [('x_path', types.FeaturePathTuple), ('x', _XType),
('xy_count', _CountType), ('x_count', _CountType)])
_YRate = tfx_namedtuple.TypedNamedTuple('_YRate',
[('y_count', _CountType),
('example_count', _CountType)])
_LiftInfo = tfx_namedtuple.TypedNamedTuple('_LiftInfo',
[('x', _XType), ('y', _YType),
('lift', float),
('xy_count', _CountType),
('x_count', _CountType),
('y_count', _CountType)])
_LiftValue = tfx_namedtuple.TypedNamedTuple('_LiftValue',
[('x', _XType), ('lift', float),
('xy_count', _CountType),
('x_count', _CountType)])
_LiftSeries = tfx_namedtuple.TypedNamedTuple(
'_LiftSeries', [('y', _YType), ('y_count', _CountType),
('lift_values', Iterable[_LiftValue])])
_ValuePresence = tfx_namedtuple.TypedNamedTuple(
'_ValuePresence', [('example_indices', np.ndarray), ('values', np.ndarray),
('weights', np.ndarray)])
# Beam counter to track the number of non-utf8 values.
_NON_UTF8_VALUES_COUNTER = beam.metrics.Metrics.counter(
constants.METRICS_NAMESPACE, 'num_non_utf8_values_lift_generator')
def _get_example_value_presence(
record_batch: pa.RecordBatch, path: types.FeaturePath,
boundaries: Optional[Sequence[float]],
weight_column_name: Optional[Text]) -> Optional[_ValuePresence]:
"""Returns information about which examples contained which values.
This function treats all values for a given path within a single example
as a set and and returns a mapping between each example index and the distinct
values which are present in that example.
The result of calling this function for path 'p' on an arrow record batch with
the two records [{'p': ['a', 'a', 'b']}, {'p': [a]}] will be
pd.Series(['a', 'b', 'a'], index=[0, 0, 1]).
If the array retrieved from get_array is null, this function returns None.
Args:
record_batch: The RecordBatch in which to look up the path.
path: The FeaturePath for which to fetch values.
boundaries: Optionally, a set of bin boundaries to use for binning the array
values.
weight_column_name: Optionally, a weight column to return in addition to the
value and example index.
Returns:
A _ValuePresence tuple which contains three numpy arrays: example indices,
values, and weights.
"""
arr, example_indices = arrow_util.get_array(
record_batch, path, return_example_indices=True)
if stats_util.get_feature_type_from_arrow_type(path, arr.type) is None:
return
arr_flat, parent_indices = arrow_util.flatten_nested(
arr, return_parent_indices=True)
is_binary_like = arrow_util.is_binary_like(arr_flat.type)
assert boundaries is None or not is_binary_like, (
'Boundaries can only be applied to numeric columns')
if is_binary_like:
# use dictionary_encode so we can use np.unique on object arrays
dict_array = arr_flat.dictionary_encode()
arr_flat = dict_array.indices
arr_flat_dict = np.asarray(dict_array.dictionary)
example_indices_flat = example_indices[parent_indices]
if boundaries is not None:
element_indices, bins = bin_util.bin_array(arr_flat, boundaries)
rows = np.vstack([example_indices_flat[element_indices], bins])
else:
rows = np.vstack([example_indices_flat, np.asarray(arr_flat)])
if not rows.size:
return
# Deduplicate values which show up more than once in the same example. This
# makes P(X=x|Y=y) in the standard lift definition behave as
# P(x \in Xs | y \in Ys) if examples contain more than one value of X and Y.
unique_rows = np.unique(rows, axis=1)
example_indices = unique_rows[0, :]
values = unique_rows[1, :]
if is_binary_like:
# return binary like values a pd.Categorical wrapped in a Series. This makes
# subsqeuent operations like pd.Merge cheaper.
values = arr_flat_dict[values].tolist()
else:
values = values.tolist() # converts values to python native types.
if weight_column_name:
weights = arrow_util.get_weight_feature(record_batch, weight_column_name)
weights = np.asarray(weights)[example_indices].tolist()
else:
weights = np.ones(len(example_indices), dtype=int).tolist()
return _ValuePresence(example_indices.tolist(), values, weights)
def _to_partial_copresence_counts(
sliced_record_batch: types.SlicedRecordBatch,
y_path: types.FeaturePath,
x_paths: Iterable[types.FeaturePath],
y_boundaries: Optional[np.ndarray],
example_weight_map: ExampleWeightMap,
num_xy_pairs_batch_copresent: Optional[
beam.metrics.metric.Metrics.DelegatingDistribution] = None
) -> Iterator[Tuple[_SlicedXYKey, _CountType]]:
"""Yields per-(slice, path_x, x, y) counts of examples with x and y.
This method generates the number of times a given pair of y- and x-values
appear in the same record, for a slice_key and x_path. Records in which either
x or y is absent will be skipped.
Args:
sliced_record_batch: A tuple of (slice_key, record_batch) representing a
slice of examples
y_path: The path to use as Y in the lift expression: lift = P(Y=y|X=x) /
P(Y=y).
x_paths: A set of x_paths for which to compute lift.
y_boundaries: Optionally, a set of bin boundaries to use for binning y_path
values.
example_weight_map: an ExampleWeightMap that maps a FeaturePath to its
corresponding weight column.
num_xy_pairs_batch_copresent: A counter tracking the number of different xy
pairs that are copresent within each batch. If the same pair of xy values
are copresent in more than one batch, this counter will be incremented
once for each batch in which they are copresent.
Yields:
Tuples of the form (_SlicedXYKey(slice_key, x_path, x, y), count) for each
combination of x_path, x, and y in the input record batch.
"""
slice_key, record_batch = sliced_record_batch
y_presence = _get_example_value_presence(
record_batch, y_path, y_boundaries, weight_column_name=None)
if y_presence is None:
return
ys_by_example = collections.defaultdict(list)
for example_index, y in zip(y_presence.example_indices, y_presence.values):
ys_by_example[example_index].append(y)
for x_path in x_paths:
weight_column_name = example_weight_map.get(x_path)
x_presence = _get_example_value_presence(
record_batch,
x_path,
boundaries=None,
weight_column_name=weight_column_name)
if x_presence is None:
continue
if weight_column_name is not None:
copresence_counts = collections.defaultdict(float)
else:
copresence_counts = collections.defaultdict(int)
for example_index, x, weight in zip(x_presence.example_indices,
x_presence.values, x_presence.weights):
for y in ys_by_example[example_index]:
copresence_counts[(x, y)] += weight
if num_xy_pairs_batch_copresent:
num_xy_pairs_batch_copresent.update(len(copresence_counts))
for (x, y), count in copresence_counts.items():
sliced_xy_key = _SlicedXYKey(
slice_key=slice_key, x_path=x_path.steps(), x=x, y=y)
yield sliced_xy_key, count
def _to_partial_counts(
sliced_record_batch: types.SlicedRecordBatch, path: types.FeaturePath,
boundaries: Optional[np.ndarray], weight_column_name: Optional[Text]
) -> Iterator[Tuple[Tuple[types.SliceKey, Union[_XType, _YType]], _CountType]]:
"""Yields per-(slice, value) counts of the examples with value in path."""
slice_key, record_batch = sliced_record_batch
value_presence = _get_example_value_presence(record_batch, path, boundaries,
weight_column_name)
if value_presence is None:
return value_presence
if weight_column_name is not None:
grouped_values = collections.defaultdict(float)
else:
grouped_values = collections.defaultdict(int)
for value, weight in zip(value_presence.values, value_presence.weights):
grouped_values[value] += weight
for value, count in grouped_values.items():
yield (slice_key, value), count
def _to_partial_x_counts(
sliced_record_batch: types.SlicedRecordBatch,
x_paths: Iterable[types.FeaturePath], example_weight_map: ExampleWeightMap
) -> Iterator[Tuple[_SlicedXKey, _CountType]]:
"""Yields per-(slice, x_path, x) counts of the examples with x in x_path."""
for x_path in x_paths:
for (slice_key, x), x_count in _to_partial_counts(
sliced_record_batch,
x_path,
boundaries=None,
weight_column_name=example_weight_map.get(x_path)):
yield _SlicedXKey(slice_key, x_path.steps(), x), x_count
def _get_unicode_value(value: Union[Text, bytes]) -> Text:
"""Get feature value decoded as utf-8."""
decoded_value = stats_util.maybe_get_utf8(value)
# Check if we have a valid utf-8 string. If not, assign a placeholder.
if decoded_value is None:
_NON_UTF8_VALUES_COUNTER.inc()
decoded_value = constants.NON_UTF8_PLACEHOLDER
return decoded_value
def _make_dataset_feature_stats_proto(
lifts: Tuple[_SlicedFeatureKey, Iterable[_LiftSeries]],
y_path: types.FeaturePath, y_boundaries: Optional[np.ndarray],
weighted_examples: bool, output_custom_stats: bool
) -> Tuple[types.SliceKey, statistics_pb2.DatasetFeatureStatistics]:
"""Generates DatasetFeatureStatistics proto for a given x_path, y_path pair.
Args:
lifts: The result of two successive group bys of lift values. The innermost
grouping collects all the lift values for a given (slice, x_path and
y_value) tuple (corresponding to a single LiftSeries message). The
outermost grouping collects all the lift values for the same (slice,
x_path) tuple (corresponding to the set of the LiftSeries which share the
same value of y_path). The full structure of lifts is described by:
(slice, x_path), [(y, y_count, [(x, lift, xy_count, x_count)])]
y_path: The path used as Y in the lift expression: lift = P(Y=y|X=x) /
P(Y=y).
y_boundaries: Optionally, a set of bin boundaries used for binning y_path
values.
weighted_examples: Whether lift is computed over weighted examples, in which
case the proto will output weighted counts (as floats) rather than simple
counts (as ints).
output_custom_stats: Whether to output custom stats for use with Facets.
Returns:
The populated DatasetFeatureStatistics proto.
"""
key, lift_series_list = lifts
x_path = types.FeaturePath(key.x_path)
stats = statistics_pb2.DatasetFeatureStatistics()
cross_stats = stats.cross_features.add(
path_x=x_path.to_proto(), path_y=y_path.to_proto())
if output_custom_stats:
feature_stats = stats.features.add(path=x_path.to_proto())
for lift_series in sorted(lift_series_list):
lift_series_proto = (
cross_stats.categorical_cross_stats.lift.lift_series.add())
if weighted_examples:
lift_series_proto.weighted_y_count = lift_series.y_count
else:
lift_series_proto.y_count = lift_series.y_count
y = lift_series.y
if y_boundaries is not None and isinstance(y, int):
low_value, high_value = bin_util.get_boundaries(y, y_boundaries)
lift_series_proto.y_bucket.low_value = low_value
lift_series_proto.y_bucket.high_value = high_value
y_display_fmt = '[{},{}]' if high_value == float('inf') else '[{},{})'
y_display_val = y_display_fmt.format(low_value, high_value)
elif isinstance(y, six.text_type):
lift_series_proto.y_string = y
y_display_val = y
elif isinstance(y, six.binary_type):
y_string = _get_unicode_value(y)
lift_series_proto.y_string = y_string
y_display_val = y_string
else:
lift_series_proto.y_int = y
y_display_val = str(y)
if output_custom_stats:
hist = feature_stats.custom_stats.add(
name='Lift (Y={})'.format(y_display_val)).rank_histogram
# dedupe possibly overlapping top_k and bottom_k x values.
lift_values_deduped = {v.x: v for v in lift_series.lift_values}
# sort by lift DESC, x ASC
lift_values_sorted = sorted(lift_values_deduped.values(),
key=lambda v: (-v.lift, v.x))
for lift_value in lift_values_sorted:
lift_value_proto = lift_series_proto.lift_values.add(lift=lift_value.lift)
if weighted_examples:
lift_value_proto.weighted_x_count = lift_value.x_count
lift_value_proto.weighted_x_and_y_count = lift_value.xy_count
else:
lift_value_proto.x_count = lift_value.x_count
lift_value_proto.x_and_y_count = lift_value.xy_count
x = lift_value.x
if isinstance(x, six.text_type):
lift_value_proto.x_string = x
x_display_val = x
elif isinstance(x, six.binary_type):
x_string = _get_unicode_value(x)
lift_value_proto.x_string = x_string
x_display_val = x_string
else:
lift_value_proto.x_int = x
x_display_val = str(x)
if output_custom_stats:
hist.buckets.add(label=x_display_val, sample_count=lift_value.lift)
return key.slice_key, stats
def _make_placeholder_counts(
join_result: Tuple[types.SliceKey, Tuple[types.FeaturePathTuple, _XType,
_CountType], _YType]
) -> Tuple[_SlicedXYKey, _CountType]:
slice_key, x_path_value_and_count, y = join_result
x_path, x, _ = x_path_value_and_count
return _SlicedXYKey(slice_key=slice_key, x_path=x_path, x=x, y=y), 0
def _make_conditional_y_rates(
join_result: Tuple[_SlicedXKey, Tuple[_YType, _CountType], _CountType],
num_xy_pairs_distinct: beam.metrics.metric.Metrics.DelegatingCounter
) -> Tuple[_SlicedYKey, _ConditionalYRate]:
"""Creates conditional y rates from slice y rates and the per-x y rates."""
sliced_x_key, y_and_xy_count, x_count = join_result
y, xy_count = y_and_xy_count
num_xy_pairs_distinct.inc(1)
sliced_y_key = _SlicedYKey(sliced_x_key.slice_key, y)
conditional_y_rate = _ConditionalYRate(
x_path=sliced_x_key.x_path,
x=sliced_x_key.x,
xy_count=xy_count,
x_count=x_count)
return sliced_y_key, conditional_y_rate
def _make_y_rates(
join_result: Tuple[types.SliceKey, Tuple[_YType, _CountType], _CountType]
) -> Tuple[_SlicedYKey, _YRate]:
slice_key, y_and_count, example_count = join_result
y, y_count = y_and_count
sliced_y_key = _SlicedYKey(slice_key, y)
y_rate = _YRate(y_count=y_count, example_count=example_count)
return sliced_y_key, y_rate
def _compute_lifts(
join_info: Tuple[_SlicedYKey, Dict[Text, Sequence[Any]]]
# TODO(b/147153346) update dict value list element type annotation to:
# Sequence[Union[_YRate, _ConditionalYRate]]
) -> Iterator[Tuple[_SlicedFeatureKey, _LiftInfo]]:
"""Joins y_counts with all x-y pairs for that y and computes lift.
This function expects the result of a CoGroupByKey, in which the key is a
tuple of the form (slice_key, y), one of the grouped streams has just one
element, the y_rate for that value of y, and the other grouped stream is the
set of all conditional_y_rate values for that same value of y. Schematically,
join_info looks like:
(slice_key, y), {'y_rate': [y_count, example_count], 'conditional_y_rate': [
(x_path_1, x_1, x_1_y_count, x_1_count), ...,
(x_path_1, x_k, x_k_y_count, x_k_count)
...
(x_path_m, x_1, x_1_y_count, x_1_count), ...,
(x_path_m, x_k, x_k_y_count, x_k_count)]}
Args:
join_info: A CoGroupByKey result.
Yields:
Per-(slice, x_path) tuples of the form ((slice_key, x_path),
_LiftInfo(x, y, lift, xy_count, x_count, y_count)).
"""
(slice_key, y), join_inputs = join_info
y_rate = join_inputs['y_rate'][0]
for conditional_y_rate in join_inputs['conditional_y_rate']:
lift = ((float(conditional_y_rate.xy_count) / conditional_y_rate.x_count) /
(float(y_rate.y_count) / y_rate.example_count))
yield (_SlicedFeatureKey(slice_key, conditional_y_rate.x_path),
_LiftInfo(
x=conditional_y_rate.x,
y=y,
lift=lift,
xy_count=conditional_y_rate.xy_count,
x_count=conditional_y_rate.x_count,
y_count=y_rate.y_count))
class _WeakRefFrozenMapping(collections.abc.Mapping, object):
"""A weakly-referencable dict, necessary to allow use with shared.Shared.
Note that the mapping will not be frozen until freeze() is called.
"""
def __init__(self):
self._dict = {}
self._is_frozen = False
def __setitem__(self, key: Hashable, value: Any):
assert not self._is_frozen
self._dict[key] = value
def freeze(self):
self._is_frozen = True
def __getitem__(self, key: Hashable) -> Any:
return self._dict[key]
def __iter__(self) -> Iterator[Hashable]:
return iter(self._dict)
def __len__(self) -> int:
return len(self._dict)
class _LookupInnerJoinDoFn(beam.DoFn):
"""A DoFn which performs a lookup inner join using a side input."""
def __init__(self):
self._shared_handle = shared.Shared()
self._right_lookup_contruction_seconds_distribution = (
beam.metrics.Metrics.distribution(constants.METRICS_NAMESPACE,
'right_lookup_construction_seconds'))
# These should be gauges, but not all runners support gauges so they are
# made distributions, which are equivalent.
# TODO(b/130840752): support gauges in the internal runner.
self._right_lookup_num_keys = (
beam.metrics.Metrics.distribution(constants.METRICS_NAMESPACE,
'right_lookup_num_keys'))
self._right_lookup_num_values = (
beam.metrics.Metrics.distribution(constants.METRICS_NAMESPACE,
'right_lookup_num_values'))
def process(
self, left_element: Tuple[_JoinKeyType, _LeftJoinValueType],
right_iterable: Iterable[Tuple[_JoinKeyType, _RightJoinValueType]]
) -> Iterator[Tuple[_JoinKeyType, _LeftJoinValueType, _RightJoinValueType]]:
def construct_lookup():
start = datetime.datetime.now()
result = _WeakRefFrozenMapping()
num_values = 0
for key, value in right_iterable:
lst = result.get(key, None)
if lst is None:
lst = []
result[key] = lst
lst.append(value)
num_values += 1
result.freeze()
self._right_lookup_contruction_seconds_distribution.update(
int((datetime.datetime.now() - start).total_seconds()))
self._right_lookup_num_keys.update(len(result))
self._right_lookup_num_values.update(num_values)
return result
right_lookup = self._shared_handle.acquire(construct_lookup)
key, left_value = left_element
right_values = right_lookup.get(key)
if right_values is None:
return
for right_value in right_values:
yield key, left_value, right_value
@beam.typehints.with_input_types(Tuple[_SlicedFeatureKey, _LiftInfo])
@beam.typehints.with_output_types(Tuple[_SlicedFeatureKey, _LiftSeries])
class _FilterLifts(beam.PTransform):
"""A PTransform for filtering and truncating lift values."""
def __init__(self, top_k_per_y: Optional[int], bottom_k_per_y: Optional[int]):
self._top_k_per_y = top_k_per_y
self._bottom_k_per_y = bottom_k_per_y
def expand(self, lifts: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
"""Takes top k and bottom k x values (sorted by lift) per slice and y value.
Args:
lifts: A PCollection of tuples of the form: (
_SlicedFeatureKey(slice_key, x_path),
_LiftInfo(x, y, lift, xy_count, x_count, y_count)).
Returns:
A PCollection resulting from a group by with the keys of the form
(slice_key, x_path) and a stream of values of the form
(y, y_count, [(x, lift, xy_count, x_count)], in which the stream of values
has been limited to the top k and bottom k elements per key.
"""
def move_y_info_to_key(key, value):
slice_key, x_path = key
lift_series_key = _LiftSeriesKey(
slice_key=slice_key, x_path=x_path, y=value.y, y_count=value.y_count)
lift_value = _LiftValue(
x=value.x,
lift=value.lift,
xy_count=value.xy_count,
x_count=value.x_count)
return lift_series_key, lift_value
# Push y_* into key so that we get per-slice, per-x-path, per-y top and
# bottom k when calling {Largest,Smallest}PerKey.
# (_LiftSequenceKey(slice, x_path, y, y_count),
# _LiftValue(x, lift, xy_count, x_count))
lifts = lifts | 'MoveYToKey' >> beam.MapTuple(move_y_info_to_key)
top_key = operator.attrgetter('lift', 'x')
if self._top_k_per_y:
# (_LiftSequenceKey(slice, x_path, y, y_count),
# [_LiftValue(x, lift, xy_count, x_count)])
top_k = (
lifts
| 'TopK' >> beam.transforms.combiners.Top.PerKey(
n=self._top_k_per_y, key=top_key))
if self._bottom_k_per_y:
# (_LiftSequenceKey(slice, x_path, y, y_count),
# [_LiftValue(x, lift, xy_count, x_count)])
bottom_k = (
lifts
| 'BottomK' >> beam.transforms.combiners.Top.PerKey(
n=self._bottom_k_per_y, reverse=True, key=top_key))
if self._top_k_per_y and self._bottom_k_per_y:
# (_LiftSeriesKey(slice, x_path, y, y_count),
# [_LiftValue(x, lift, xy_count, x_count)])
grouped_lifts = ((top_k, bottom_k)
| 'MergeTopAndBottom' >> beam.Flatten()
| 'FlattenTopAndBottomLifts' >>
beam.FlatMapTuple(lambda k, vs: ((k, v) for v in vs))
| 'ReGroupTopAndBottom' >> beam.CombinePerKey(
beam.combiners.ToListCombineFn()))
elif self._top_k_per_y:
grouped_lifts = top_k
elif self._bottom_k_per_y:
grouped_lifts = bottom_k
else:
grouped_lifts = lifts | 'CombinePerY' >> beam.CombinePerKey(
beam.combiners.ToListCombineFn())
def move_y_info_to_value(
key: _LiftSeriesKey,
lift_values: List[_LiftValue]) -> Tuple[_SlicedFeatureKey, _LiftSeries]:
return (_SlicedFeatureKey(key.slice_key, key.x_path),
_LiftSeries(
y=key.y, y_count=key.y_count, lift_values=lift_values))
# (_SlicedFeatureKey(slice, x_path),
# _LiftSeries(y, y_count, [_LiftValue(x, lift, xy_count, x_count)]))
return (grouped_lifts
| 'MoveYInfoToValue' >> beam.MapTuple(move_y_info_to_value))
class _GetPlaceholderCopresenceCounts(beam.PTransform):
"""A PTransform for computing all possible x-y pairs, to support 0 lifts."""
def __init__(self, x_paths: Iterable[types.FeaturePath], min_x_count: int):
self._x_paths = x_paths
self._min_x_count = min_x_count
def expand(
self, x_counts_and_ys: Tuple[beam.PCollection[Tuple[_SlicedXKey,
_CountType]],
beam.PCollection[_SlicedYKey]]
) -> beam.PCollection[Tuple[_SlicedXYKey, _CountType]]:
x_counts, y_keys = x_counts_and_ys
# slice, y
y_keys_by_slice = (
y_keys
| 'MoveYToValue_YKey' >> beam.Map(lambda k: (k.slice_key, k.y)))
# slice, (x_path, x, x_count)
x_counts_by_slice = (
x_counts
| 'MoveXToValue_XCountsKey' >> beam.MapTuple(
lambda k, v: (k.slice_key, (k.x_path, k.x, v))))
# TODO(b/201480787): consider creating the cross product of all distinct
# x-values and y-values in the entire dataset (rather than per slice)
# _SlicedXYKey(slice, x_path, x, y), 0
return (x_counts_by_slice
| 'JoinWithPlaceholderYRates' >> beam.ParDo(
_LookupInnerJoinDoFn(),
right_iterable=beam.pvalue.AsIter(y_keys_by_slice))
| 'MakePlaceholderCounts' >> beam.Map(_make_placeholder_counts))
class _GetConditionalYRates(beam.PTransform):
"""A PTransform for computing the rate of each y value, given an x value."""
def __init__(self, y_path: types.FeaturePath,
y_boundaries: Optional[np.ndarray],
x_paths: Iterable[types.FeaturePath], min_x_count: int,
example_weight_map: Optional[ExampleWeightMap]):
self._y_path = y_path
self._y_boundaries = y_boundaries
self._x_paths = x_paths
self._min_x_count = min_x_count
self._example_weight_map = example_weight_map
self._num_xy_pairs_distinct = beam.metrics.Metrics.counter(
constants.METRICS_NAMESPACE, 'num_xy_pairs_distinct')
self._num_xy_pairs_batch_copresent = beam.metrics.Metrics.distribution(
constants.METRICS_NAMESPACE, 'num_xy_pairs_batch_copresent')
def expand(
self, sliced_record_batchs_and_ys: Tuple[
beam.PCollection[types.SlicedRecordBatch],
beam.PCollection[_SlicedYKey]]
) -> beam.PCollection[Tuple[_SlicedYKey, _ConditionalYRate]]:
sliced_record_batchs, y_keys = sliced_record_batchs_and_ys
# _SlicedXYKey(slice, x_path, x, y), xy_count
partial_copresence_counts = (
sliced_record_batchs
| 'ToPartialCopresenceCounts' >> beam.FlatMap(
_to_partial_copresence_counts, self._y_path, self._x_paths,
self._y_boundaries, self._example_weight_map,
self._num_xy_pairs_batch_copresent))
# Compute placeholder copresence counts.
# partial_copresence_counts will only include x-y pairs that are present,
# but we would also like to keep track of x-y pairs that never appear, as
# long as x and y independently occur in the slice.
# _SlicedXKey(slice, x_path, x), x_count
x_counts = (
sliced_record_batchs
| 'ToPartialXCounts' >> beam.FlatMap(
_to_partial_x_counts, self._x_paths, self._example_weight_map)
| 'SumXCounts' >> beam.CombinePerKey(sum))
if self._min_x_count:
x_counts = x_counts | 'FilterXCounts' >> beam.Filter(
lambda kv: kv[1] > self._min_x_count)
# _SlicedXYKey(slice, x_path, x, y), 0
placeholder_copresence_counts = (
(x_counts, y_keys)
| 'GetPlaceholderCopresenceCounts' >> _GetPlaceholderCopresenceCounts(
self._x_paths, self._min_x_count))
def move_y_to_value(key, xy_count):
return _SlicedXKey(key.slice_key, key.x_path, key.x), (key.y, xy_count)
# _SlicedXKey(slice, x_path, x), (y, xy_count)
copresence_counts = (
(placeholder_copresence_counts, partial_copresence_counts)
| 'FlattenCopresenceCounts' >> beam.Flatten()
| 'SumCopresencePairs' >> beam.CombinePerKey(sum)
| 'MoveYToValue' >> beam.MapTuple(move_y_to_value))
# _SlicedYKey(slice, y), _ConditionalYRate(x_path, x, xy_count, x_count)
return (
copresence_counts
| 'JoinXCounts' >> beam.ParDo(
_LookupInnerJoinDoFn(), right_iterable=beam.pvalue.AsIter(x_counts))
| 'MakeConditionalYRates' >> beam.Map(
_make_conditional_y_rates,
num_xy_pairs_distinct=self._num_xy_pairs_distinct))
class _GetYRates(beam.PTransform):
"""A PTransform for computing the rate of each y value within each slice."""
def __init__(self, y_path: types.FeaturePath,
y_boundaries: Optional[np.ndarray],
weight_column_name: Optional[Text]):
self._y_path = y_path
self._y_boundaries = y_boundaries
self._weight_column_name = weight_column_name
def expand(
self, sliced_record_batchs: beam.PCollection[types.SlicedRecordBatch]
) -> beam.PCollection[Tuple[_SlicedYKey, _YRate]]:
# slice, example_count
example_counts = (
sliced_record_batchs
| 'ToExampleCounts' >> beam.MapTuple(lambda k, v: (k, v.num_rows))
| 'SumExampleCounts' >> beam.CombinePerKey(sum))
def move_y_to_value(slice_and_y, y_count):
slice_key, y = slice_and_y
return slice_key, (y, y_count)
# slice, (y, y_count)
y_counts = (
sliced_record_batchs
| 'ToPartialYCounts' >>
beam.FlatMap(_to_partial_counts, self._y_path, self._y_boundaries,
self._weight_column_name)
| 'SumYCounts' >> beam.CombinePerKey(sum)
| 'MoveYToValue' >> beam.MapTuple(move_y_to_value))
# _SlicedYKey(slice, y), _YRate(y_count, example_count)
return (y_counts
| 'JoinExampleCounts' >> beam.ParDo(
_LookupInnerJoinDoFn(),
right_iterable=beam.pvalue.AsIter(example_counts))
| 'MakeYRates' >> beam.Map(_make_y_rates))
@beam.typehints.with_input_types(types.SlicedRecordBatch)
@beam.typehints.with_output_types(Tuple[types.SliceKey,
statistics_pb2.DatasetFeatureStatistics]
)
class _LiftStatsGenerator(beam.PTransform):
"""A PTransform implementing a TransformStatsGenerator to compute lift.
This transform computes lift for a set of feature pairs (y, x_1), ... (y, x_k)
for a collection of x_paths, and a single y_path. The y_path must be either
a categorical feature, or numeric feature (in which case binning boundaries
are also required). The x_paths can be manually provided or will be
automatically inferred as the set of categorical features in the schema
(excluding y_path).
"""
def __init__(self, y_path: types.FeaturePath,
schema: Optional[schema_pb2.Schema],
x_paths: Optional[Iterable[types.FeaturePath]],
y_boundaries: Optional[Sequence[float]], min_x_count: int,
top_k_per_y: Optional[int], bottom_k_per_y: Optional[int],
example_weight_map: ExampleWeightMap,
output_custom_stats: bool, name: Text) -> None:
"""Initializes a lift statistics generator.
Args:
y_path: The path to use as Y in the lift expression: lift = P(Y=y|X=x) /
P(Y=y).
schema: An optional schema for the dataset. If not provided, x_paths must
be specified. If x_paths are not specified, the schema is used to
identify all categorical columns for which Lift should be computed.
x_paths: An optional list of path to use as X in the lift expression: lift
= P(Y=y|X=x) / P(Y=y). If None (default), all categorical features,
exluding the feature passed as y_path, will be used.
y_boundaries: An optional list of boundaries to be used for binning
y_path. If provided with b boundaries, the binned values will be treated
as a categorical feature with b+1 different values. For example, the
y_boundaries value [0.1, 0.8] would lead to three buckets: [-inf, 0.1),
[0.1, 0.8) and [0.8, inf].
min_x_count: The minimum number of examples in which a specific x value
must appear, in order for its lift to be output.
top_k_per_y: Optionally, the number of top x values per y value, ordered
by descending lift, for which to output lift. If both top_k_per_y and
bottom_k_per_y are unset, all values will be output.
bottom_k_per_y: Optionally, the number of bottom x values per y value,
ordered by descending lift, for which to output lift. If both
top_k_per_y and bottom_k_per_y are unset, all values will be output.
example_weight_map: Optionally, an ExampleWeightMap that maps a
FeaturePath to its corresponding weight column. If provided and if
it's not an empty map (i.e. no feature has a corresponding weight column
), unweighted lift stats will be populated, otherwise weighted lift
stats will be populated.
output_custom_stats: Whether to output custom stats for use with Facets.
name: An optional unique name associated with the statistics generator.
"""
self._name = name
self._schema = schema
self._y_path = y_path
self._min_x_count = min_x_count
self._top_k_per_y = top_k_per_y
self._bottom_k_per_y = bottom_k_per_y
self._output_custom_stats = output_custom_stats
self._y_boundaries = (
np.array(sorted(set(y_boundaries))) if y_boundaries else None)
self._example_weight_map = example_weight_map
# If a schema is provided, we can do some additional validation of the
# provided y_feature and boundaries.
if self._schema is not None:
y_feature = schema_util.get_feature(self._schema, y_path)
y_is_categorical = schema_util.is_categorical_feature(y_feature)
if self._y_boundaries is not None:
if y_is_categorical:
raise ValueError(
'Boundaries cannot be applied to a categorical y_path')
else:
if not y_is_categorical:
raise ValueError('Boundaries must be provided with a non-categorical '
'y_path.')
if x_paths is not None:
self._x_paths = x_paths
elif self._schema is not None:
self._x_paths = (
set(schema_util.get_categorical_features(schema)) - set([y_path]))
else:
raise ValueError('Either a schema or x_paths must be provided.')
def expand(
self,
sliced_record_batchs: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
# Compute P(Y=y)
# _SlicedYKey(slice, y), _YRate(y_count, example_count)
y_rates = sliced_record_batchs | 'GetYRates' >> _GetYRates(
self._y_path, self._y_boundaries,
self._example_weight_map.get(self._y_path))
y_keys = y_rates | 'ExtractYKeys' >> beam.Keys()
# Compute P(Y=y | X=x)
# _SlicedYKey(slice, y), _ConditionalYRate(x_path, x, xy_count, x_count)
conditional_y_rates = ((sliced_record_batchs, y_keys)
| 'GetConditionalYRates' >> _GetConditionalYRates(
self._y_path, self._y_boundaries, self._x_paths,
self._min_x_count, self._example_weight_map))
return (
{
'conditional_y_rate': conditional_y_rates,
'y_rate': y_rates
}
| 'CoGroupByForLift' >> beam.CoGroupByKey()
| 'ComputeLifts' >> beam.FlatMap(_compute_lifts)
| 'FilterLifts' >> _FilterLifts(self._top_k_per_y, self._bottom_k_per_y)
| 'GroupLiftsForOutput' >> beam.GroupByKey()
| 'MakeProtos' >> beam.Map(
_make_dataset_feature_stats_proto, self._y_path, self._y_boundaries,
bool(self._example_weight_map.all_weight_features()),
self._output_custom_stats))
@beam.typehints.with_input_types(types.SlicedRecordBatch)
@beam.typehints.with_output_types(Tuple[types.SliceKey,
statistics_pb2.DatasetFeatureStatistics]
)
class _UnweightedAndWeightedLiftStatsGenerator(beam.PTransform):
"""A PTransform to compute both unweighted and weighted lift.
This simply wraps the logic in _LiftStatsGenerator and, depending on the value
of weight_column_name, either calls it once to compute unweighted lift, or
twice to compute both the unweighted and weighted lift. The result will be a
PCollection of stats per slice, with possibly two stats protos for the same
slice: one for the unweighted lift and one for the weighted lift.
"""
def __init__(self, example_weight_map: ExampleWeightMap, **kwargs):
"""Initializes a weighted lift statistics generator.
Args:
example_weight_map: an ExampleWeightMap that maps a FeaturePath to its
corresponding weight column.
**kwargs: The set of args to be passed to _LiftStatsGenerator.
"""
self._unweighted_generator = _LiftStatsGenerator(
example_weight_map=ExampleWeightMap(), **kwargs)
self._has_any_weight = bool(example_weight_map.all_weight_features())
if self._has_any_weight:
self._weighted_generator = _LiftStatsGenerator(
example_weight_map=example_weight_map, **kwargs)
def expand(
self,
sliced_record_batchs: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
unweighted_protos = (
sliced_record_batchs
| 'ComputeUnweightedLift' >> self._unweighted_generator)
if not self._has_any_weight:
# If no weight column name is given, only compute unweighted lift.
return unweighted_protos
weighted_protos = (
sliced_record_batchs
| 'ComputeWeightedLift' >> self._weighted_generator)
return ((unweighted_protos, weighted_protos)
| 'MergeUnweightedAndWeightedProtos' >> beam.Flatten())
class LiftStatsGenerator(stats_generator.TransformStatsGenerator):
"""A transform stats generator for computing lift between two features."""
def __init__(self,
y_path: types.FeaturePath,
schema: Optional[schema_pb2.Schema] = None,
x_paths: Optional[Iterable[types.FeaturePath]] = None,
y_boundaries: Optional[Sequence[float]] = None,
min_x_count: int = 0,
top_k_per_y: Optional[int] = None,
bottom_k_per_y: Optional[int] = None,
example_weight_map: ExampleWeightMap = ExampleWeightMap(),
output_custom_stats: Optional[bool] = False,
name: Text = 'LiftStatsGenerator') -> None:
super(LiftStatsGenerator, self).__init__(
name,
ptransform=_UnweightedAndWeightedLiftStatsGenerator(
example_weight_map=example_weight_map,
schema=schema,
y_path=y_path,
x_paths=x_paths,
y_boundaries=y_boundaries,
min_x_count=min_x_count,
top_k_per_y=top_k_per_y,
bottom_k_per_y=bottom_k_per_y,
output_custom_stats=output_custom_stats,
name=name),
schema=schema)
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^getlink$', views.getlink, name = 'getlink'),
url(r'^getanswer', views.getanswer, name = 'getanswer'),
url(r'^saveanswer', views.saveanswer, name = 'saveanswer'),
url(r'^getblog', views.getblog, name = 'getblog'),
url(r'^saveblog', views.saveblog, name = 'saveblog'),
url(r'deleteblog', views.deleteblog, name = 'deleteblog'),
url(r'^save-lang1', views.savelang1, name = 'savelang1'),
url(r'^save-lang2', views.savelang2, name = 'savelang2'),
url(r'^save-lang3', views.savelang3, name = 'savelang3'),
url(r'^save-lang4', views.savelang4, name = 'savelang4'),
url(r'^cfgetlink', views.cfgetlink, name = 'cfgetlink'),
url(r'^cfgetanswer', views.cfgetanswer, name = 'cfgetanswer'),
url(r'^cfdone', views.cfdone, name = 'cfdone'),
url(r'^saverating1', views.saverating1, name = 'saverating1'),
url(r'^saverating2', views.saverating2, name = 'saverating2'),
url(r'^saverating3', views.saverating3, name = 'saverating3'),
url(r'^saverating4', views.saverating4, name = 'saverating4'),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-11-24 16:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cohapp', '0012_auto_20171013_1521'),
]
operations = [
migrations.AddField(
model_name='textdata',
name='accuracy_draft_global',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.AddField(
model_name='textdata',
name='accuracy_draft_local',
field=models.PositiveSmallIntegerField(default=0),
),
]
|
# -*- coding: utf-8 -*-
# * Copyright (c) 2018.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='LandmarkTools',
version='0.0.4b',
description='Tools for the Landmark Detection algorithms.',
long_description=long_description,
long_description_content_type="text/markdown",
packages=['ldmtools'],
url='http://uliege.cytomine.org',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: OS Independent'
],
install_requires=['numpy', 'scipy', 'pillow', 'joblib', 'imageio'],
license='LICENSE',
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Tools for Exponential Descent Algorithm, as discussed in
' The Ordered Subsets Mirror Descent Optimization ... '
by Ben-Tal et al
and
' Mirror descent and nonlinear projected subgradient methods ...'
by Beck & Teboulle
NB: EMDA MUST be initialized with an interior point of the simplex.
"""
import numpy as np
from baseOptimizer import BaseOptimizer
def EMDA(obj_func, obj_func_grad, step_size, x0, num_iters, tol):
'''
Simple version of EDA, mainly for timing purposes. We recommend using the
class based implementation for most purposes.
'''
x = x0
err = obj_func(x)
ii = 0
while err > tol and ii <= num_iters:
grad = obj_func_grad(x)
xtemp = x*np.exp(-step_size*grad)
tempsum = np.sum(xtemp)
x = xtemp/tempsum
err = obj_func(x)
ii += 1
return err, ii
class ExpDescentAlg(BaseOptimizer):
'''
Class for EDA algorithm on simplex.
'''
def __init__(self, objfunc, objfuncGrad, step_size, x0):
self._objfunc = objfunc
self._objfuncGrad = objfuncGrad
self._step_size = step_size
self._x = x0
self._k = 0 # iterate counter
self._fVals = [self._objfunc(self._x)]
self._iterates = [self._x]
def prox_step(self, x, grad):
'''
Apply the MD prox operator
'''
xtemp = x*np.exp(-self._step_size*grad/np.sqrt(self._k+1))
tempsum = np.sum(xtemp)
xnew = xtemp/tempsum
return xnew
def step(self):
'''
Take a single step of MDA
'''
x_k = self._x
grad_k = self._objfuncGrad(x_k)
xnew = self.prox_step(x_k, grad_k)
self._x = xnew
self._iterates.append(xnew)
self._fVals.append(self._objfunc(xnew))
self._k +=1
print(self._objfunc(xnew))
|
import os
import platform
import sys
import warnings
# The following code is copied from
# https://github.com/tornadoweb/tornado/blob/master/setup.py
# to support installing without the extension on platforms where
# no compiler is available.
from distutils.command.build_ext import build_ext # type: ignore
from setuptools import Extension, find_packages, setup
class custom_build_ext(build_ext):
"""Allow C extension building to fail.
The C extension speeds up (de)serialization, but is not essential.
"""
warning_message = """
********************************************************************
WARNING: %s could not
be compiled. No C extensions are essential for apischema to run,
although they do result in significant speed improvements for
(de)serialization.
%s
Here are some hints for popular operating systems:
If you are seeing this message on Linux you probably need to
install GCC and/or the Python development package for your
version of Python.
Debian and Ubuntu users should issue the following command:
$ sudo apt-get install build-essential python-dev
RedHat and CentOS users should issue the following command:
$ sudo yum install gcc python-devel
Fedora users should issue the following command:
$ sudo dnf install gcc python-devel
MacOS users should run:
$ xcode-select --install
********************************************************************
"""
def run(self):
try:
build_ext.run(self)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write("%s\n" % str(e))
warnings.warn(
self.warning_message
% (
"Extension modules",
"There was an issue with "
"your platform configuration"
" - see above.",
)
)
def build_extension(self, ext):
name = ext.name
try:
build_ext.build_extension(self, ext)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write("%s\n" % str(e))
warnings.warn(
self.warning_message
% (
"The %s extension " "module" % (name,),
"The output above "
"this warning shows how "
"the compilation "
"failed.",
)
)
README = None
# README cannot be read by older python version run by tox
if "TOX_ENV_NAME" not in os.environ:
with open("README.md") as f:
README = f.read()
ext_modules = None
# Cythonization makes apischema a lot slower using PyPy
if platform.python_implementation() != "PyPy":
ext_modules = [
Extension(
f"apischema.{package}.methods", sources=[f"apischema/{package}/methods.c"]
)
for package in ("deserialization", "serialization")
]
setup(
name="apischema",
version="0.17.0",
url="https://github.com/wyfo/apischema",
author="Joseph Perez",
author_email="joperez@hotmail.fr",
license="MIT",
packages=find_packages(include=["apischema*"]),
package_data={"apischema": ["py.typed"]},
description="JSON (de)serialization, *GraphQL* and JSON schema generation using Python typing.",
long_description=README,
long_description_content_type="text/markdown",
python_requires=">=3.6",
install_requires=["dataclasses>=0.7;python_version<'3.7'"],
extras_require={
"graphql": ["graphql-core>=3.1.2"],
"examples": [
"graphql-core>=3.1.2",
"attrs",
"docstring_parser",
"bson",
"orjson",
"pydantic",
"pytest",
"sqlalchemy",
],
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Software Development :: Libraries :: Python Modules",
],
cmdclass={"build_ext": custom_build_ext},
ext_modules=ext_modules,
)
|
import argparse
def get_arg_parser(parser=None):
"""Parse the command line arguments for merge using argparse
Args:
parser (argparse.ArgumentParser or CompliantArgumentParser):
an argument parser instance
Returns:
ArgumentParser: the argument parser instance
Notes:
if parser is None, creates a new parser instance
"""
# add arguments that are specific to the component
if parser is None:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--value",
required=False,
type=int,
default=100,
help="the value on which we will operate",
)
return parser
def main():
"""The main function"""
# get the arguments
parser = get_arg_parser()
args = parser.parse_args()
args = vars(args)
# this shows a basic operation on the value passed as parameter
value = args["value"]
operand = 1000
result = value + operand
print(
"The value passed as parameter is: "
+ str(value)
+ ". We computed: "
+ str(value)
+ " + "
+ str(operand)
+ " = "
+ str(result)
+ "."
)
if __name__ == "__main__":
main()
|
import os
from setuptools import setup
package_data = {'': ['*.so*',
'*.cuh',
]}
include_dirs = ['python/akg',
'third_party/incubator-tvm/python/tvm',
'third_party/incubator-tvm/topi/python/topi',
'tests/fuzz',
'tests/common',
'src/akg_reduce',
'src/paris_reduce']
def find_files(where=['.']):
"""
Return a package list
'where' is the root directory list
"""
dirs = [path.replace(os.path.sep, '.') for path in where]
for selected_root in where:
for root, all_dirs, files in os.walk(selected_root, followlinks=True):
for dir in all_dirs:
full_path = os.path.join(root, dir)
package = full_path.replace(os.path.sep, '.')
if '.' in dir:
continue
dirs.append(package)
dirs.append('build')
return dirs
setup(name='akg',
version='1.0',
description='akg python libs',
package_data=package_data,
packages=find_files(include_dirs))
|
# Generated by Django 2.2.14 on 2020-08-06 11:36
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("wallet", "0031_cashoutrequest"),
]
operations = [
migrations.AddField(
model_name="transaction",
name="notes",
field=models.TextField(blank=True, editable=False),
),
migrations.AlterField(
model_name="transaction",
name="from_wallet",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.DO_NOTHING,
related_name="from_transactions",
to="wallet.Wallet",
),
),
migrations.AlterField(
model_name="transaction",
name="operation_hash",
field=models.CharField(blank=True, editable=False, max_length=128),
),
migrations.AlterField(
model_name="transaction",
name="submitted_to_chain_at",
field=models.DateTimeField(blank=True, editable=False, null=True),
),
migrations.AlterField(
model_name="wallet",
name="wallet_id",
field=models.CharField(
blank=True, editable=False, max_length=128, unique=True
),
),
]
|
#compdef wwf-chat-parser.py
local arguments
arguments=(
'(- * :)'{-h,--help}'[show this help message and exit]'
'-d[wordsFramework database input file]'
'-o[chat output in Tab Separated format]'
'*:filename:_files'
)
_arguments -s $arguments
|
import ansible_runner
import pathlib
import json
import time
import uuid
import datetime
import logging
from celery import shared_task
logger = logging.getLogger('django')
def _make_events(p):
if p.exists():
def gen():
for f in sorted(p.iterdir(), key=(lambda x: int(x.name.split('-', 1)[0]))):
print(f)
try:
yield dict(json.load(open(f, 'r', encoding='utf-8')))
except json.decoder.JSONDecodeError:
# 위에서 partial 파일을 이미 걸렀기 때문에
# 여기서 완성되지 못한 json 을 읽어서
# JSONDecodeError 가 발생할 수 없다. 그런데
# 실제로 발생한 이슈가 있었음. 파일을 쓰는
# 쪽에서 buffer flush 전에 filename rename 을
# 했으면 가능하긴 함..
pass
return gen()
else:
return None
def openFile(p):
return open(p, 'r', encoding='utf-8')
# shared_task 데코레이터를 통해, run celery task 가 등록된다.
@shared_task(name='run_ansible', bind=True)
def run_ansible(self):
default_path = pathlib.Path(__file__).parent.parent.parent / 'ansible'
params = dict(
private_data_dir=str(default_path),
playbook='playbooks/test.yaml',
inventory=str(default_path / 'hosts.ini'),
quiet=True,
artifact_dir=str(default_path / 'ansible_runner' / 'default'),
rotate_artifacts=200,
ident=datetime.datetime.now().strftime('%Y%m%d-%H%M%S-') + str(uuid.uuid4()),
)
result = ansible_runner.interface.run(**params)
ident = result.config.ident
command_path = default_path / 'ansible_runner' / 'default' / ident / 'command'
job_events_path = default_path / 'ansible_runner' / 'default' / ident / 'job_events'
events = list(_make_events(job_events_path))
new_events = list(map(lambda x: x['stdout'], events))
json_list = json.dumps(new_events)
return json_list
|
"""Constants for mobile_app."""
import voluptuous as vol
from homeassistant.components.device_tracker import SERVICE_SEE_PAYLOAD_SCHEMA
from homeassistant.const import (ATTR_DOMAIN, ATTR_SERVICE, ATTR_SERVICE_DATA)
from homeassistant.helpers import config_validation as cv
DOMAIN = 'mobile_app'
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONF_CLOUDHOOK_URL = 'cloudhook_url'
CONF_SECRET = 'secret'
CONF_USER_ID = 'user_id'
DATA_DELETED_IDS = 'deleted_ids'
DATA_REGISTRATIONS = 'registrations'
DATA_STORE = 'store'
ATTR_APP_COMPONENT = 'app_component'
ATTR_APP_DATA = 'app_data'
ATTR_APP_ID = 'app_id'
ATTR_APP_NAME = 'app_name'
ATTR_APP_VERSION = 'app_version'
ATTR_DEVICE_NAME = 'device_name'
ATTR_MANUFACTURER = 'manufacturer'
ATTR_MODEL = 'model'
ATTR_OS_NAME = 'os_name'
ATTR_OS_VERSION = 'os_version'
ATTR_SUPPORTS_ENCRYPTION = 'supports_encryption'
ATTR_EVENT_DATA = 'event_data'
ATTR_EVENT_TYPE = 'event_type'
ATTR_TEMPLATE = 'template'
ATTR_TEMPLATE_VARIABLES = 'variables'
ATTR_WEBHOOK_DATA = 'data'
ATTR_WEBHOOK_ENCRYPTED = 'encrypted'
ATTR_WEBHOOK_ENCRYPTED_DATA = 'encrypted_data'
ATTR_WEBHOOK_TYPE = 'type'
ERR_INVALID_COMPONENT = 'invalid_component'
ERR_RENDER_FAILURE = 'render_failure'
ERR_SAVE_FAILURE = 'save_failure'
WEBHOOK_TYPE_CALL_SERVICE = 'call_service'
WEBHOOK_TYPE_FIRE_EVENT = 'fire_event'
WEBHOOK_TYPE_RENDER_TEMPLATE = 'render_template'
WEBHOOK_TYPE_UPDATE_LOCATION = 'update_location'
WEBHOOK_TYPE_UPDATE_REGISTRATION = 'update_registration'
WEBHOOK_TYPES = [WEBHOOK_TYPE_CALL_SERVICE, WEBHOOK_TYPE_FIRE_EVENT,
WEBHOOK_TYPE_RENDER_TEMPLATE, WEBHOOK_TYPE_UPDATE_LOCATION,
WEBHOOK_TYPE_UPDATE_REGISTRATION]
REGISTRATION_SCHEMA = vol.Schema({
vol.Optional(ATTR_APP_COMPONENT): cv.string,
vol.Optional(ATTR_APP_DATA, default={}): dict,
vol.Required(ATTR_APP_ID): cv.string,
vol.Required(ATTR_APP_NAME): cv.string,
vol.Required(ATTR_APP_VERSION): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_MANUFACTURER): cv.string,
vol.Required(ATTR_MODEL): cv.string,
vol.Required(ATTR_OS_NAME): cv.string,
vol.Optional(ATTR_OS_VERSION): cv.string,
vol.Required(ATTR_SUPPORTS_ENCRYPTION, default=False): cv.boolean,
})
UPDATE_REGISTRATION_SCHEMA = vol.Schema({
vol.Optional(ATTR_APP_DATA, default={}): dict,
vol.Required(ATTR_APP_VERSION): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_MANUFACTURER): cv.string,
vol.Required(ATTR_MODEL): cv.string,
vol.Optional(ATTR_OS_VERSION): cv.string,
})
WEBHOOK_PAYLOAD_SCHEMA = vol.Schema({
vol.Required(ATTR_WEBHOOK_TYPE): cv.string, # vol.In(WEBHOOK_TYPES)
vol.Required(ATTR_WEBHOOK_DATA, default={}): dict,
vol.Optional(ATTR_WEBHOOK_ENCRYPTED, default=False): cv.boolean,
vol.Optional(ATTR_WEBHOOK_ENCRYPTED_DATA): cv.string,
})
CALL_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_DOMAIN): cv.string,
vol.Required(ATTR_SERVICE): cv.string,
vol.Optional(ATTR_SERVICE_DATA, default={}): dict,
})
FIRE_EVENT_SCHEMA = vol.Schema({
vol.Required(ATTR_EVENT_TYPE): cv.string,
vol.Optional(ATTR_EVENT_DATA, default={}): dict,
})
RENDER_TEMPLATE_SCHEMA = vol.Schema({
vol.Required(ATTR_TEMPLATE): cv.string,
vol.Optional(ATTR_TEMPLATE_VARIABLES, default={}): dict,
})
WEBHOOK_SCHEMAS = {
WEBHOOK_TYPE_CALL_SERVICE: CALL_SERVICE_SCHEMA,
WEBHOOK_TYPE_FIRE_EVENT: FIRE_EVENT_SCHEMA,
WEBHOOK_TYPE_RENDER_TEMPLATE: RENDER_TEMPLATE_SCHEMA,
WEBHOOK_TYPE_UPDATE_LOCATION: SERVICE_SEE_PAYLOAD_SCHEMA,
WEBHOOK_TYPE_UPDATE_REGISTRATION: UPDATE_REGISTRATION_SCHEMA,
}
|
import urllib, urllib2
from web import BaseServiceHandler
import json
class ServiceEchoHandler(BaseServiceHandler):
services = ['echo', 'json']
def process_request(self, request, service):
if service == 'echo':
if self.validate_required_args(request, ['message']):
message = request.form['message']
message = urllib.unquote(message)
self.set_result(message, 200)
else:
self.set_result('invalid arguments', 400)
elif service == 'json':
j = self.get_json_param(request)
self.set_result(str(j), 200)
|
from urllib import request
from django.shortcuts import render, redirect
from .forms import CustomUserCreationForm
from django.http import HttpResponse
def temporal_home(request):
return render(request, 'account/temporal_home.html')
def register(request):
form = CustomUserCreationForm()
if request.method == "POST":
form = CustomUserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('login')
context = {
'form': form,
}
return render(request, 'account/register.html', context)
|
from selenium import webdriver
import time
import sys
import getpass
# reload(sys)
# sys.setdefaultencoding('utf-8')
input_username = input("enter github username: ")
input_password = getpass.getpass()
time.sleep(1)
# Chrome used
driver = webdriver.Chrome(executable_path='C:/Selenium/chromedriver.exe')
# base url
driver.get("http://github.com/login")
username = driver.find_element_by_id("login_field")
password = driver.find_element_by_id("password")
# password and username need to go into these values
username.send_keys(f"{input_username}")
time.sleep(1)
password.send_keys(f"{input_password}")
time.sleep(1)
login_form = driver.find_element_by_xpath("//input[@value='Sign in']")
time.sleep(1)
login_form.click()
time.sleep(1)
# These are some of the most popular users on github
# prepend = ["jlord", "daimajia", "mdo", "schacon", "mattt",
# "sindresorhus", "defunkt", "douglascrockford", "mbostock", "jeresig",
# "mojombo", "addyosmani", "paulirish", "vczh", "romannurik", "tenderlove", "chriscoyier", "johnpapa",
# "josevalim","charliesome", "CoderMJLee", "ry", "antirez", "muan", "isaacs", "angusshire",
# "hadley", "hakimel", "yyx990803", "fat", "fabpot", "ibireme", "tekkub",
# "BYVoid", "laruence", "onevcat", "tpope", "mrdoob", "LeaVerou", "chrisbanes", "wycats", "lifesinger",
# "cloudwu", "mitsuhiko", "michaelliao", "ryanb", "clowwindy", "JacksonTian", "yinwang0", "Trinea",
# "pjhyett", "dhh", "gaearon"]
prepend=["fabpot",
"andrew",
"taylorotwell",
"egoist",
"HugoGiraudel",
"ornicar",
"bebraw",
"nelsonic",
"alexcrichton",
"jonathanong",
"mikermcneil",
"benbalter",
"jxnblk",
"yegor256",
"orta",
"rstacruz",
"GrahamCampbell",
"afc163",
"kamranahmedse",
"joshaber",
"bkeepers",
"kennethreitz",
"kytrinyx",
"STRML",
"atmos",
"weierophinney",
"agentzh",
"steipete",
"ai",
"mikepenz",
"nvie",
"hadley",
"appleboy",
"Rich-Harris",
"drnic",
"rafaelfranca",
"Ocramius",
"mitchellh",
"stof",
"IgorMinar",
"phodal",
"jwiegley",
"geerlingguy",
"dcramer",
"sebastianbergmann",
"brunocvcunha",
"ljharb",
"jdalton",
"sevilayha",
"paulmillr",
"tmm1",
"c9s",
"zcbenz",
"holman",
"kevinsawicki",
"yihui",
"buckyroberts",
"kbrsh",
"dmalan",
"mhevery",
"mgechev",
"kylef",
"chjj",
"ayende",
"mcollina",
"mdo",
"yoshuawuyts",
"muan",
"kentcdodds",
"jskeet",
"mitsuhiko",
"steveklabnik",
"hzoo",
"Caged",
"dlew",
"technoweenie",
"gaearon",
"soumith",
"feross",
"michalbe",
"brianleroux",
"willdurand",
"alexjlockwood",
"matsko",
"stefanpenner",
"adamwathan",
"Haacked",
"curran",
"rauchg",
"ianstormtaylor",
"KrauseFx",
"tj",
"jgm",
"jverkoey",
"chenglou",
"DataTables",
"SamyPesse",
"mjhea0",
"0x00A",
"tmcw",
"brentvatne",
"carpedm20",
"benjamn",
"notwaldorf",
"miyagawa",
"rnystrom",
"photonstorm",
"mattn",
"dougwilson",
"JakeWharton",
"yyx990803",
"krzysztofzablocki",
"eduardolundgren",
"vjeux",
"mxcl",
"domenic",
"alex",
"mrdoob",
"josegonzalez",
"fzaninotto",
"pissang",
"jamesmontemagno",
"paulcbetts",
"paulirish",
"samdark",
"madskristensen",
"sokra",
"marijnh",
"alanhamlett",
"wesm",
"josevalim",
"jennybc",
"BurntSushi",
"zenorocha",
"contra",
"jaredhanson",
"radar",
"bevacqua",
"xudafeng",
"j2kun",
"dominictarr",
"avelino",
"vinta",
"developit",
"ashleygwilliams",
"ashfurrow",
"f",
"onevcat",
"toddmotto",
"gdi2290",
"EisenbergEffect",
"ankane",
"keijiro",
"nolimits4web",
"davidfowl",
"biezhi",
"LeaVerou",
"davidtmiller",
"odersky",
"vhf",
"soffes",
"mxstbr",
"Jinjiang",
"happypeter",
"yeasy",
"mafintosh",
"vczh",
"Draveness",
"ded",
"vladikoff",
"bnoordhuis",
"jendewalt",
"jessfraz",
"indutny",
"olivergierke",
"i5ting",
"laanwj",
"drakeet",
"thejameskyle",
"ahmetb",
"sdiehl",
"jaywcjlove",
"gitster",
"djspiewak",
"evilsocket",
"mariotaku",
"shama",
"CamDavidsonPilon",
"dsyer",
"winterbe",
"dennybritz",
"arun-gupta",
"maryrosecook",
"killme2008",
"IanLunn",
"ruanyf",
"binux",
"nfultz",
"leah",
"suissa",
"anishathalye",
"pkrumins",
"isaacs",
"staltz",
"KittenYang",
"leebyron",
"fengmk2",
"passy",
"gorhill",
"phuslu",
"mrmrs",
"siddontang",
"daylerees",
"weavejester",
"zce",
"philsturgeon",
"oldratlee",
"josephmisiti",
"atian25",
"ebidel",
"overtrue",
"connors",
"eliben",
"Seldaek",
"joyeecheung",
"bailicangdu",
"nikic",
"codahale",
"amitshekhariitbhu",
"rakyll",
"junyanz",
"simurai",
"nicolasgramlich",
"shiffman",
"purcell",
"evanphx",
"ericelliott",
"matyhtf",
"StephenGrider",
"AdamBien",
"rtomayko",
"tomchristie",
"be5invis",
"amueller",
"dead-horse",
"remy",
"Unknwon",
"jiyinyiyong",
"Shougo",
"arunoda",
"benoitc",
"ask",
"JohnSundell",
"Raynos",
"samuelclay",
"unicodeveloper"]
for user in prepend:
for t in range(1,3):
string = "https://github.com/{}?page={}&tab=followers".format(user,t)
driver.get(string)
time.sleep(1)
# make sure to pick the correct directory to save the files to
# follow_button = driver.find_elements_by_xpath("//button[@type='submit']")
follow_button = driver.find_elements_by_xpath("//input[@aria-label='Follow this person']")
# Once page is loaded this clicks all buttons for follow
try:
for i in follow_button:
i.submit()
except:
pass
time.sleep(1)
driver.close()
|
from unittest import TestCase
from unittest.mock import MagicMock, patch
from dataforseo_sdk.api_client.api_credentials import APICredentials
from dataforseo_sdk.locations.location_mixin import LocationMixin
class TestLocationMixin(TestCase):
def setUp(self) -> None:
self.en_us_locale = "en_us"
self.en_us_location_code = 2840
self.en_us_language_code = "en"
self.en_us_country_iso_code = "US"
self.mock_location_mixer = MockLocationMixer()
self.mock_location_mixer.credentials = APICredentials(**self.api_credentials())
return super().setUp()
def api_credentials(self):
return {"username": "api_username", "password": "api_password"}
def mock_location_service(self, mock_location_service):
self.mock_location_service_instance = MagicMock()
self.mock_location_service_instance.locales = {
self.en_us_locale: (
self.en_us_location_code,
self.en_us_language_code,
self.en_us_country_iso_code,
)
}
mock_location_service.return_value = self.mock_location_service_instance
def test_locale__existing_locale(self):
expected_locale = "en_ca"
self.mock_location_mixer._locale = expected_locale
assert self.mock_location_mixer.locale == expected_locale
@patch("dataforseo_sdk.locations.location_mixin.Config")
def test_locale(self, mock_config):
expected_locale = "en_gb"
mock_config.config = {"locale": expected_locale}
assert self.mock_location_mixer.locale == expected_locale
def test_location_code__existing(self):
expected_location_code = self.en_us_location_code
self.mock_location_mixer._location_code = expected_location_code
assert self.mock_location_mixer.location_code == expected_location_code
@patch("dataforseo_sdk.locations.location_mixin.Config")
@patch("dataforseo_sdk.locations.location_mixin.LocationService")
def test_location_code(self, mock_location_service, mock_config):
mock_config.config = {"locale": self.en_us_locale}
self.mock_location_service(mock_location_service)
assert self.mock_location_mixer.location_code == self.en_us_location_code
def test_language_code__existing(self):
expected_language_code = self.en_us_language_code
self.mock_location_mixer._language_code = expected_language_code
assert self.mock_location_mixer.language_code == expected_language_code
@patch("dataforseo_sdk.locations.location_mixin.Config")
@patch("dataforseo_sdk.locations.location_mixin.LocationService")
def test_language_code(self, mock_location_service, mock_config):
mock_config.config = {"locale": self.en_us_locale}
self.mock_location_service(mock_location_service)
assert self.mock_location_mixer.language_code == self.en_us_language_code
def test_country_iso_code__existing(self):
expected_country_iso_code = self.en_us_country_iso_code
self.mock_location_mixer._country_iso_code = expected_country_iso_code
assert self.mock_location_mixer.country_iso_code == expected_country_iso_code
@patch("dataforseo_sdk.locations.location_mixin.Config")
@patch("dataforseo_sdk.locations.location_mixin.LocationService")
def test_country_iso_code(self, mock_location_service, mock_config):
mock_config.config = {"locale": self.en_us_locale}
self.mock_location_service(mock_location_service)
assert self.mock_location_mixer.country_iso_code == self.en_us_country_iso_code
class MockLocationMixer(LocationMixin):
pass
|
from time import time
import libsysrepoPython3 as sr
# Tests are created to follow already existing performance test implemented in c in tests/measure_performance.c
# They are meant to compare python2 bindings for sysrepo with the original and as an overall measure for
# bindings clients.
# Class stub which bundles test function and additional data for running a test.
class TestContext:
def __init__(self, function, op_name, op_count, setup=None, teardown=None):
self.function = function
self.op_name = op_name
self.op_count = op_count
self.setup = setup
self.teardown = teardown
# Count how many times will some test run its defining function.
OP_COUNT = 50000
# function to populate ietf-interfaces yang model
def clearDataTree(module_name, datastore):
"""
Clear yang model.
"""
conn = sr.Connection("clear")
sess = sr.Session(conn, datastore)
subs = sr.Subscribe(sess)
xpath = "/" + module_name + ":*//*"
values = sess.get_items(xpath)
if values == None:
return
for i in range(values.val_cnt()):
sess.delete_item(values.val(i).xpath())
sess.commit()
# function to populate ietf-interfaces yang model
def createDataTreeLargeIETFinterfacesModule(count, datastore):
"""
Add data to ietf-interfaces.
"""
conn = sr.Connection("load test")
sess = sr.Session(conn, datastore)
subs = sr.Subscribe(sess)
for i in range(count):
xpath = "/ietf-interfaces:interfaces/interface[name='eth" + str(i) + "']"
xpath_ip = xpath + "/ietf-ip:ipv4/address[ip='192.168.1." + str(i) + "]"
x_name = xpath + "/name"
x_type = xpath + "/type"
x_desc = xpath + "/description"
x_enabled = xpath + "/enabled"
x_ipv4_enabled = xpath + "/ietf-ip:ipv4/enabled"
x_ipv4_mtu = xpath + "/ietf-ip:ipv4/mtu"
x_ipv4_forward = xpath + "/ietf-ip:ipv4/forwarding"
x_prefix_len = xpath_ip + "/prefix-length"
val = sr.Val("Ethernet 0", sr.SR_STRING_T)
sess.set_item(x_desc, val)
val = sr.Val("iana-if-type:ethernetCsmacd", sr.SR_IDENTITYREF_T)
sess.set_item(x_type, val)
val = sr.Val(True, sr.SR_BOOL_T)
sess.set_item(x_enabled, val)
val = sr.Val(True, sr.SR_BOOL_T)
sess.set_item(x_ipv4_enabled, val)
val = sr.Val(1500, sr.SR_UINT16_T)
sess.set_item(x_ipv4_mtu, val)
val = sr.Val(False, sr.SR_BOOL_T)
sess.set_item(x_ipv4_forward, val)
sess.commit()
# function to populate example-module yang model
def createDataTreeLargeExampleModule(count, datastore):
"""
Add data to example-module.
"""
conn = sr.Connection("load test")
sess = sr.Session(conn, datastore)
subs = sr.Subscribe(sess)
for i in range(count):
xpath = "/example-module:container/list[key1='key" + str(i) + "'][key2='key" + str(i) +"']/leaf"
val = sr.Val("leaf" + str(i), sr.SR_STRING_T)
sess.set_item(xpath, val)
sess.commit()
def sysrepo_setup(state):
"""
Initialize sysrepo context which program uses
"""
state['connection'] = sr.Connection("measure performance")
assert state['connection'] is not None
return
def measure(test_f, name, op_count, setup_f, teardown_f):
"""
Function which calculates and prints running time for a single test.
It setups and tear downs resources if necessary;
"""
t_start = time()
items = test_f(state, op_count, 1)
t_end = time()
seconds = t_end - t_start
print("%40s| %10.0f | %10d | %13d | %10.0f | %10.2f\n" % \
(name, (float(op_count))/ seconds, items, op_count, (float(op_count * items))/ seconds, seconds));
def perf_get_item_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
sess = sr.Session(conn, state['datastore'])
assert sess is not None, "Unable to get session."
xpath = "/example-module:container/list[key1='key0'][key2='key0']/leaf"
for i in range(op_num):
val = sess.get_item(xpath)
assert val.type() is sr.SR_STRING_T, "check value type"
return 1
# All other testing functions are similar, named after corresponding c functions.
def perf_get_item_first_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
sess = sr.Session(conn, state['datastore'])
assert sess is not None, "Unable to get session."
xpath = "/example-module:container"
for i in range(op_num):
val = sess.get_item(xpath)
assert val.type() is sr.SR_CONTAINER_T, "check value type"
return 1
def perf_get_item_with_data_load_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
xpath = "/example-module:container/list[key1='key0'][key2='key0']/leaf"
for i in range(op_num):
sess = sr.Session(conn, state['datastore'])
val = sess.get_item(xpath)
assert val.type() is sr.SR_STRING_T, "check value type"
return 1
def perf_get_items_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
sess = sr.Session(conn, state['datastore'])
assert sess is not None, "Unable to get session."
xpath = "/example-module:container/list/leaf"
for i in range(op_num):
val = sess.get_items(xpath)
return 1
def perf_get_items_iter_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
sess = sr.Session(conn, state['datastore'])
assert sess is not None, "Unable to get session."
xpath = "/example-module:container/list/leaf"
count = 0
for i in range(op_num):
it = sess.get_items_iter(xpath)
assert it is not None, "Iterator not found"
while True:
val = sess.get_item_next(it)
if val == None: break
count = count + 1
return count
def perf_get_ietf_interfaces_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
sess = sr.Session(conn, state['datastore'])
assert sess is not None, "Unable to get session."
xpath = "/ietf-interfaces:interfaces//*"
count = 0
for i in range(op_num):
it = sess.get_items_iter(xpath)
assert it is not None, "Iterator not found"
while True:
val = sess.get_item_next(it)
if val == None: break
count = count + 1
return count
def perf_get_subtree_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
sess = sr.Session(conn, state['datastore'])
assert sess is not None, "Unable to get session."
xpath = "/example-module:container/list[key1='key0'][key2='key0']/leaf"
for i in range(op_num):
tree = sess.get_subtree(xpath)
assert tree is not None, "check if empty"
return 1
def perf_get_subtree_with_data_load_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
xpath = "/example-module:container/list[key1='key0'][key2='key0']/leaf"
for i in range(op_num):
sess = sr.Session(conn, state['datastore'])
tree = sess.get_subtree(xpath)
assert tree is not None, "check if empty"
return 1
def perf_get_subtrees_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
sess = sr.Session(conn, state['datastore'])
assert sess is not None, "Unable to get session."
xpath = "/example-module:container/list/leaf"
for i in range(op_num):
trees = sess.get_subtrees(xpath)
assert trees.tree(0) is not None, "check if empty"
return trees.tree_cnt()
def get_nodes_cnt(trees):
count = 0
for i in range(trees.tree_cnt()):
node = trees.tree(0)
count_children = True
while(True):
if (count_children):
while (node.first_child()):
node = node.first_child()
count = count + 1
if (node.next()):
node = node.next()
count_children = True
else:
node = node.parent();
count_children = False
if node is None:
break
return count
def perf_get_ietf_intefaces_tree_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
sess = sr.Session(conn, state['datastore'])
assert sess is not None, "Unable to get session."
count = 0
xpath = "/ietf-interfaces:interfaces/."
for i in range(op_num):
trees = sess.get_subtrees(xpath)
assert trees.tree(0) is not None, "check if empty"
count = count + get_nodes_cnt(trees)
return count
def perf_set_delete_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
sess = sr.Session(conn, state['datastore'])
assert sess is not None, "Unable to get session."
xpath = "/example-module:container/list[key1='set_del'][key2='set_1']/leaf"
for i in range(op_num):
val = sr.Val("Leaf", sr.SR_STRING_T)
sess.set_item(xpath, val)
sess.delete_item(xpath)
return 1 * 3 * 2
def perf_set_delete_100_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
sess = sr.Session(conn, state['datastore'])
assert sess is not None, "Unable to get session."
xpath = "/example-module:container/list[key1='set_del'][key2='set_1']/leaf"
for i in range(op_num):
for j in range(100):
xpath = "/example-module:container/list[key1='set_del'][key2='set_" + str(j) + "']/leaf"
val = sr.Val("Leaf", sr.SR_STRING_T)
sess.set_item(xpath, val)
for j in range(100):
xpath = "/example-module:container/list[key1='set_del'][key2='set_" + str(j) + "']/leaf"
sess.delete_item(xpath)
return 100 * 1 * 3 * 2
def perf_commit_test(state, op_num, items):
conn = state["connection"]
assert conn is not None, "Unable to get connection."
sess = sr.Session(conn, state['datastore'])
assert sess is not None, "Unable to get session."
xpath = "/example-module:container/list[key1='key0'][key2='key0']/leaf"
for i in range(op_num):
if (i % 2 == 0):
val = sr.Val("Leaf", sr.SR_STRING_T)
sess.set_item(xpath, val)
else:
sess.delete_item(xpath)
sess.commit()
return 1
def print_measure_header(title):
print ("\n\n\t\t%s") % (title),
print ("\n%-40s| %10s | %10s | %13s | %10s | %10s.\n") % ("Operation", "ops/sec", "items/op", "ops performed", "items/sec", "test time"),
print ("---------------------------------------------------------------------------------------------------\n"),
def test_perf(ts, test_count, title, selection):
print_measure_header(title)
for i in range(test_count):
if -1 == selection or i == selection:
t = ts[i]
measure(t.function, t.op_name, t.op_count, t.setup, t.teardown)
if __name__ == "__main__":
op_count = 5000
tests = [TestContext(perf_get_item_test, "Get item one leaf", op_count),
TestContext(perf_get_item_first_test, "Get item first leaf", op_count),
TestContext(perf_get_item_with_data_load_test, "Get item (including session start)", op_count),
TestContext(perf_get_items_test, "Get all items of a list", op_count),
TestContext(perf_get_items_iter_test, "Get items iter all list", op_count),
TestContext(perf_get_ietf_interfaces_test, "Get subtrees ietf-if config", op_count),
TestContext(perf_get_subtree_test, "Get subtree one leaf", op_count),
TestContext(perf_get_subtree_with_data_load_test, "Get subtree (including session start)", op_count),
TestContext(perf_get_subtrees_test, "Get subtree all leaf", op_count),
TestContext(perf_get_ietf_intefaces_tree_test, "Get subtrees ietf-if config", op_count),
TestContext(perf_set_delete_test, "Set & delete one list", op_count),
TestContext(perf_set_delete_100_test, "Set & delete 100 lists", op_count),
TestContext(perf_commit_test, "Commit one leaf change", op_count),
]
state = {}
state['datastore'] = sr.SR_DS_STARTUP
sysrepo_setup(state)
elements = [1, 20, 100]
datastores = [sr.SR_DS_STARTUP]
try:
for el in elements:
for datastore in datastores:
state['datastore'] = datastore
clearDataTree("ietf-interfaces", state['datastore'])
clearDataTree("example-module", state['datastore'])
createDataTreeLargeIETFinterfacesModule(el, state['datastore'])
createDataTreeLargeExampleModule(el, state['datastore'])
if (state["datastore"] == sr.SR_DS_RUNNING):
test_perf(tests, len(tests), "Data file " + str(el) + " list instance in datastore running", -1)
elif (state["datastore"] == sr.SR_DS_STARTUP):
test_perf(tests, len(tests), "Data file " + str(el) + " list instance in datastore startup", -1)
except Exception as e:
print e
# clean
try:
clearDataTree("ietf-interfaces", state['datastore'])
except Exception as e:
print e
print "End"
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020-2022 by SCICO Developers
# All rights reserved. BSD 3-clause License.
# This file is part of the SCICO package. Details of the copyright and
# user license can be found in the 'LICENSE' file distributed with the
# package.
"""ADMM auxiliary classes."""
# Needed to annotate a class method that returns the encapsulating class;
# see https://www.python.org/dev/peps/pep-0563/
from __future__ import annotations
from functools import reduce
from typing import Any, Optional, Union
import jax
from jax.scipy.sparse.linalg import cg as jax_cg
import scico.numpy as snp
import scico.optimize.admm as soa
from scico.linop import CircularConvolve, Identity, LinearOperator
from scico.loss import SquaredL2Loss
from scico.numpy import BlockArray
from scico.numpy.linalg import norm
from scico.numpy.util import ensure_on_device, is_real_dtype
from scico.solver import cg as scico_cg
from scico.solver import minimize
from scico.typing import JaxArray
class SubproblemSolver:
r"""Base class for solvers for the non-separable ADMM step.
The ADMM solver implemented by :class:`.ADMM` addresses a general
problem form for which one of the corresponding ADMM algorithm
subproblems is separable into distinct subproblems for each of the
:math:`g_i`, and another that is non-separable, involving function
:math:`f` and a sum over :math:`\ell_2` norm terms involving all
operators :math:`C_i`. This class is a base class for solvers of
the latter subproblem
.. math::
\argmin_{\mb{x}} \; f(\mb{x}) + \sum_i \frac{\rho_i}{2}
\norm{\mb{z}^{(k)}_i - \mb{u}^{(k)}_i - C_i \mb{x}}_2^2 \;.
Attributes:
admm (:class:`.ADMM`): ADMM solver object to which the
solver is attached.
"""
def internal_init(self, admm: soa.ADMM):
"""Second stage initializer to be called by :meth:`.ADMM.__init__`.
Args:
admm: Reference to :class:`.ADMM` object to which the
:class:`.SubproblemSolver` object is to be attached.
"""
self.admm = admm
class GenericSubproblemSolver(SubproblemSolver):
"""Solver for generic problem without special structure.
Attributes:
admm (:class:`.ADMM`): ADMM solver object to which the solver is
attached.
minimize_kwargs (dict): Dictionary of arguments for
:func:`scico.solver.minimize`.
"""
def __init__(self, minimize_kwargs: dict = {"options": {"maxiter": 100}}):
"""Initialize a :class:`GenericSubproblemSolver` object.
Args:
minimize_kwargs: Dictionary of arguments for
:func:`scico.solver.minimize`.
"""
self.minimize_kwargs = minimize_kwargs
self.info: dict = {}
def solve(self, x0: Union[JaxArray, BlockArray]) -> Union[JaxArray, BlockArray]:
"""Solve the ADMM step.
Args:
x0: Initial value.
Returns:
Computed solution.
"""
x0 = ensure_on_device(x0)
@jax.jit
def obj(x):
out = 0.0
for rhoi, Ci, zi, ui in zip(
self.admm.rho_list, self.admm.C_list, self.admm.z_list, self.admm.u_list
):
out = out + 0.5 * rhoi * norm(zi - ui - Ci(x)) ** 2
if self.admm.f is not None:
out = out + self.admm.f(x)
return out
res = minimize(obj, x0, **self.minimize_kwargs)
for attrib in ("success", "status", "message", "nfev", "njev", "nhev", "nit", "maxcv"):
self.info[attrib] = getattr(res, attrib, None)
return res.x
class LinearSubproblemSolver(SubproblemSolver):
r"""Solver for quadratic functionals.
Solver for the case in which :code:`f` is a quadratic function of
:math:`\mb{x}`. It is a specialization of :class:`.SubproblemSolver`
for the case where :code:`f` is an :math:`\ell_2` or weighted
:math:`\ell_2` norm, and :code:`f.A` is a linear operator, so that
the subproblem involves solving a linear equation. This requires that
`f.functional` be an instance of :class:`.SquaredL2Loss` and for
the forward operator :code:`f.A` to be an instance of
:class:`.LinearOperator`.
The :math:`\mb{x}`-update step is
.. math::
\mb{x}^{(k+1)} = \argmin_{\mb{x}} \; \frac{1}{2}
\norm{\mb{y} - A \mb{x}}_W^2 + \sum_i \frac{\rho_i}{2}
\norm{\mb{z}^{(k)}_i - \mb{u}^{(k)}_i - C_i \mb{x}}_2^2 \;,
where :math:`W` a weighting :class:`.Diagonal` operator
or an :class:`.Identity` operator (i.e. no weighting).
This update step reduces to the solution of the linear system
.. math::
\left(A^H W A + \sum_{i=1}^N \rho_i C_i^H C_i \right)
\mb{x}^{(k+1)} = \;
A^H W \mb{y} + \sum_{i=1}^N \rho_i C_i^H ( \mb{z}^{(k)}_i -
\mb{u}^{(k)}_i) \;.
Attributes:
admm (:class:`.ADMM`): ADMM solver object to which the solver is
attached.
cg_kwargs (dict): Dictionary of arguments for CG solver.
cg (func): CG solver function (:func:`scico.solver.cg` or
:func:`jax.scipy.sparse.linalg.cg`) lhs (type): Function
implementing the linear operator needed for the
:math:`\mb{x}` update step.
"""
def __init__(self, cg_kwargs: Optional[dict[str, Any]] = None, cg_function: str = "scico"):
"""Initialize a :class:`LinearSubproblemSolver` object.
Args:
cg_kwargs: Dictionary of arguments for CG solver. See
documentation for :func:`scico.solver.cg` or
:func:`jax.scipy.sparse.linalg.cg`,
including how to specify a preconditioner.
Default values are the same as those of
:func:`scico.solver.cg`, except for
`"tol": 1e-4` and `"maxiter": 100`.
cg_function: String indicating which CG implementation to
use. One of "jax" or "scico"; default "scico". If
"scico", uses :func:`scico.solver.cg`. If "jax", uses
:func:`jax.scipy.sparse.linalg.cg`. The "jax" option is
slower on small-scale problems or problems involving
external functions, but can be differentiated through.
The "scico" option is faster on small-scale problems, but
slower on large-scale problems where the forward
operator is written entirely in jax.
"""
default_cg_kwargs = {"tol": 1e-4, "maxiter": 100}
if cg_kwargs:
default_cg_kwargs.update(cg_kwargs)
self.cg_kwargs = default_cg_kwargs
self.cg_function = cg_function
if cg_function == "scico":
self.cg = scico_cg
elif cg_function == "jax":
self.cg = jax_cg
else:
raise ValueError(
f"Parameter cg_function must be one of 'jax', 'scico'; got {cg_function}"
)
self.info = None
def internal_init(self, admm: soa.ADMM):
if admm.f is not None:
if not isinstance(admm.f, SquaredL2Loss):
raise ValueError(
"LinearSubproblemSolver requires f to be a scico.loss.SquaredL2Loss; "
f"got {type(admm.f)}"
)
if not isinstance(admm.f.A, LinearOperator):
raise ValueError(
f"LinearSubproblemSolver requires f.A to be a scico.linop.LinearOperator; "
f"got {type(admm.f.A)}"
)
super().internal_init(admm)
# Set lhs_op = \sum_i rho_i * Ci.H @ CircularConvolve
# Use reduce as the initialization of this sum is messy otherwise
lhs_op = reduce(
lambda a, b: a + b, [rhoi * Ci.gram_op for rhoi, Ci in zip(admm.rho_list, admm.C_list)]
)
if admm.f is not None:
# hessian = A.T @ W @ A; W may be identity
lhs_op = lhs_op + admm.f.hessian
lhs_op.jit()
self.lhs_op = lhs_op
def compute_rhs(self) -> Union[JaxArray, BlockArray]:
r"""Compute the right hand side of the linear equation to be solved.
Compute
.. math::
A^H W \mb{y} + \sum_{i=1}^N \rho_i C_i^H ( \mb{z}^{(k)}_i -
\mb{u}^{(k)}_i) \;.
Returns:
Computed solution.
"""
C0 = self.admm.C_list[0]
rhs = snp.zeros(C0.input_shape, C0.input_dtype)
if self.admm.f is not None:
ATWy = self.admm.f.A.adj(self.admm.f.W.diagonal * self.admm.f.y) # type: ignore
rhs += 2.0 * self.admm.f.scale * ATWy # type: ignore
for rhoi, Ci, zi, ui in zip(
self.admm.rho_list, self.admm.C_list, self.admm.z_list, self.admm.u_list
):
rhs = rhs + rhoi * Ci.adj(zi - ui)
return rhs
def solve(self, x0: Union[JaxArray, BlockArray]) -> Union[JaxArray, BlockArray]:
"""Solve the ADMM step.
Args:
x0: Initial value.
Returns:
Computed solution.
"""
x0 = ensure_on_device(x0)
rhs = self.compute_rhs()
x, self.info = self.cg(self.lhs_op, rhs, x0, **self.cg_kwargs) # type: ignore
return x
class CircularConvolveSolver(LinearSubproblemSolver):
r"""Solver for linear operators diagonalized in the DFT domain.
Specialization of :class:`.LinearSubproblemSolver` for the case
where :code:`f` is an instance of :class:`.SquaredL2Loss`, the
forward operator :code:`f.A` is either an instance of
:class:`.Identity` or :class:`.CircularConvolve`, and the
:code:`C_i` are all instances of :class:`.Identity` or
:class:`.CircularConvolve`. None of the instances of
:class:`.CircularConvolve` may sum over any of their axes.
Attributes:
admm (:class:`.ADMM`): ADMM solver object to which the solver is
attached.
lhs_f (array): Left hand side, in the DFT domain, of the linear
equation to be solved.
"""
def __init__(self):
"""Initialize a :class:`CircularConvolveSolver` object."""
def internal_init(self, admm: soa.ADMM):
if admm.f is not None:
if not isinstance(admm.f, SquaredL2Loss):
raise ValueError(
"CircularConvolveSolver requires f to be a scico.loss.SquaredL2Loss; "
f"got {type(admm.f)}"
)
if not isinstance(admm.f.A, (CircularConvolve, Identity)):
raise ValueError(
"CircularConvolveSolver requires f.A to be a scico.linop.CircularConvolve "
f"or scico.linop.Identity; got {type(admm.f.A)}"
)
super().internal_init(admm)
self.real_result = is_real_dtype(admm.C_list[0].input_dtype)
lhs_op_list = [
rho * CircularConvolve.from_operator(C.gram_op)
for rho, C in zip(admm.rho_list, admm.C_list)
]
A_lhs = reduce(lambda a, b: a + b, lhs_op_list)
if self.admm.f is not None:
A_lhs += 2.0 * admm.f.scale * CircularConvolve.from_operator(admm.f.A.gram_op)
self.A_lhs = A_lhs
def solve(self, x0: Union[JaxArray, BlockArray]) -> Union[JaxArray, BlockArray]:
"""Solve the ADMM step.
Args:
x0: Initial value.
Returns:
Computed solution.
"""
x0 = ensure_on_device(x0)
rhs = self.compute_rhs()
rhs_dft = snp.fft.fftn(rhs, axes=self.A_lhs.x_fft_axes)
x_dft = rhs_dft / self.A_lhs.h_dft
x = snp.fft.ifftn(x_dft, axes=self.A_lhs.x_fft_axes)
if self.real_result:
x = x.real
return x
|
# -*- encoding: utf-8 -*-
"""
@File : creator_server.py
@Time : 2021/1/18 15:43
@Author : chise
@Email : chise123@live.com
@Software: PyCharm
@info :
"""
controls=[
]
|
import datetime
import xbmc
import xbmcgui
import simplecache
from resources.lib import logger, ADDON
from resources.lib.language import get_string as _
cache = simplecache.SimpleCache()
settings_storage = {}
def read_settings():
settings_storage['disable_connection_message'] = ADDON.getSettingBool("disableConnectionMessage")
settings_storage['reloadFlash'] = ADDON.getSettingBool("reloadFlash")
settings_storage['initialFlash'] = ADDON.getSettingBool("initialFlash")
settings_storage['forceOnSunset'] = ADDON.getSettingBool("forceOnSunset")
settings_storage['daylightDisable'] = ADDON.getSettingBool("daylightDisable")
settings_storage['enable_if_already_active'] = ADDON.getSettingBool("enable_if_already_active")
settings_storage['keep_lights_off'] = ADDON.getSettingBool("keep_lights_off")
cache.set("script.service.hue.daylightDisable", ADDON.getSettingBool("daylightDisable"))
settings_storage['enableSchedule'] = ADDON.getSettingBool("enableSchedule")
settings_storage['startTime'] = ADDON.getSetting("startTime") # string HH:MM
settings_storage['endTime'] = ADDON.getSetting("endTime") # string HH:MM
settings_storage['disableConnectionMessage'] = ADDON.getSettingBool("disableConnectionMessage")
settings_storage['videoMinimumDuration'] = ADDON.getSettingInt("video_MinimumDuration") # Setting in Minutes. Kodi library uses seconds, needs to be converted.
settings_storage['video_enableMovie'] = ADDON.getSettingBool("video_Movie")
settings_storage['video_enableMusicVideo'] = ADDON.getSettingBool("video_MusicVideo")
settings_storage['video_enableEpisode'] = ADDON.getSettingBool("video_Episode")
settings_storage['video_enablePVR'] = ADDON.getSettingBool("video_PVR")
settings_storage['video_enableOther'] = ADDON.getSettingBool("video_Other")
settings_storage['ambiEnabled'] = ADDON.getSettingBool("group3_enabled")
settings_storage['show500Error'] = ADDON.getSettingBool("show500Error")
_validate_schedule()
_validate_ambilight()
def _validate_ambilight():
logger.debug("Validate ambilight config. Enabled: {}".format(settings_storage['ambiEnabled']))
if settings_storage['ambiEnabled']:
light_ids = ADDON.getSetting("group3_Lights")
if light_ids == "-1":
logger.error("No ambilights selected")
xbmcgui.Dialog().notification(_("Hue Service"), _("No lights selected for Ambilight."), icon=xbmcgui.NOTIFICATION_ERROR)
ADDON.setSettingBool("group3_enabled", False)
settings_storage['ambiEnabled'] = False
def _validate_schedule():
logger.debug("Validate schedule. Schedule Enabled: {}".format(settings_storage['enableSchedule']))
if settings_storage['enableSchedule']:
try:
convert_time(settings_storage['startTime'])
convert_time(settings_storage['endTime'])
logger.debug("Time looks valid")
except ValueError as e:
logger.error("Invalid time settings: {}".format(e))
xbmcgui.Dialog().notification(_("Hue Service"), _("Invalid start or end time, schedule disabled"), icon=xbmcgui.NOTIFICATION_ERROR)
ADDON.setSettingBool("EnableSchedule", False)
settings_storage['enableSchedule'] = False
def convert_time(time):
hour = int(time.split(":")[0])
minute = int(time.split(":")[1])
return datetime.time(hour, minute)
|
from types import MappingProxyType
from typing import Any, Dict, Iterable, Literal, Mapping, Optional, Sequence, Tuple, Type, Union
import numpy as np
import scanpy as sc
from anndata import AnnData
from leidenalg.VertexPartition import MutableVertexPartition
from scanpy._utils import AnyRandom
from scipy.sparse import spmatrix
from ehrapy.api.preprocessing._scanpy_pp_api import pca # noqa: E402,F403,F401
def tsne(
adata: AnnData,
n_pcs: Optional[int] = None,
use_rep: Optional[str] = None,
perplexity: Union[float, int] = 30,
early_exaggeration: Union[float, int] = 12,
learning_rate: Union[float, int] = 1000,
random_state: AnyRandom = 0,
n_jobs: Optional[int] = None,
copy: bool = False,
metric: str = "euclidean",
) -> Optional[AnnData]: # pragma: no cover
"""Calculates t-SNE [Maaten08]_ [Amir13]_ [Pedregosa11]_.
t-distributed stochastic neighborhood embedding (tSNE) [Maaten08]_ has been
proposed for visualizing complex by [Amir13]_. Here, by default, we use the implementation of *scikit-learn* [Pedregosa11]_.
Args:
adata: :class:`~anndata.AnnData` object object containing all observations.
n_pcs: Use this many PCs. If `n_pcs==0` use `.X` if `use_rep is None`.
use_rep: Use the indicated representation. `'X'` or any key for `.obsm` is valid.
If `None`, the representation is chosen automatically:
For `.n_vars` < 50, `.X` is used, otherwise 'X_pca' is used.
If 'X_pca' is not present, it’s computed with default parameters.
perplexity: The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets usually require a larger perplexity.
Consider selecting a value between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration: Controls how tight natural clusters in the original space are in the
embedded space and how much space will be between them. For larger
values, the space between natural clusters will be larger in the
embedded space. Again, the choice of this parameter is not very
critical. If the cost function increases during initial optimization,
the early exaggeration factor or the learning rate might be too high.
learning_rate: Note that the R-package "Rtsne" uses a default of 200.
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
random_state: Change this to use different intial states for the optimization.
If `None`, the initial state is not reproducible.
n_jobs: Number of jobs for parallel computation.
`None` means using :attr:`scanpy._settings.ScanpyConfig.n_jobs`.
copy: Return a copy instead of writing to `adata`.
metric: Distance metric calculate neighbors on.
Returns:
"""
return sc.tl.tsne(
adata=adata,
n_pcs=n_pcs,
use_rep=use_rep,
perplexity=perplexity,
early_exaggeration=early_exaggeration,
learning_rate=learning_rate,
random_state=random_state,
n_jobs=n_jobs,
copy=copy,
metric=metric,
)
_InitPos = Literal["paga", "spectral", "random"]
def umap(
adata: AnnData,
min_dist: float = 0.5,
spread: float = 1.0,
n_components: int = 2,
maxiter: Optional[int] = None,
alpha: float = 1.0,
gamma: float = 1.0,
negative_sample_rate: int = 5,
init_pos: Union[_InitPos, np.ndarray, None] = "spectral",
random_state: AnyRandom = 0,
a: Optional[float] = None,
b: Optional[float] = None,
copy: bool = False,
method: Literal["umap", "rapids"] = "umap",
neighbors_key: Optional[str] = None,
) -> Optional[AnnData]: # pragma: no cover
"""Embed the neighborhood graph using UMAP [McInnes18]_.
UMAP (Uniform Manifold Approximation and Projection) is a manifold learning
technique suitable for visualizing high-dimensional data. Besides tending to
be faster than tSNE, it optimizes the embedding such that it best reflects
the topology of the data, which we represent throughout ehrapy using a
neighborhood graph. tSNE, by contrast, optimizes the distribution of
nearest-neighbor distances in the embedding such that these best match the
distribution of distances in the high-dimensional space. We use the
implementation of `umap-learn <https://github.com/lmcinnes/umap>`__
[McInnes18]_. For a few comparisons of UMAP with tSNE, see this `preprint
<https://doi.org/10.1101/298430>`__.
Args:
adata: :class:`~anndata.AnnData` object object containing all observations.
min_dist: The effective minimum distance between embedded points. Smaller values
will result in a more clustered/clumped embedding where nearby points on
the manifold are drawn closer together, while larger values will result
on a more even dispersal of points. The value should be set relative to
the ``spread`` value, which determines the scale at which embedded
points will be spread out. The default of in the `umap-learn` package is 0.1.
spread: The effective scale of embedded points.
In combination with `min_dist` this determines how clustered/clumped the embedded points are.
n_components: The number of dimensions of the embedding.
maxiter: The number of iterations (epochs) of the optimization. Called `n_epochs` in the original UMAP.
alpha: The initial learning rate for the embedding optimization.
gamma: Weighting applied to negative samples in low dimensional embedding optimization.
Values higher than one will result in greater weight being given to negative samples.
negative_sample_rate: The number of negative edge/1-simplex samples to use per positive
edge/1-simplex sample in optimizing the low dimensional embedding.
init_pos: How to initialize the low dimensional embedding. Called `init` in the original UMAP. Options are:
* Any key for `adata.obsm`.
* 'paga': positions from :func:`~scanpy.pl.paga`.
* 'spectral': use a spectral embedding of the graph.
* 'random': assign initial embedding positions at random.
* A numpy array of initial embedding positions.
random_state: Random state for the initialization.
* If `int`, `random_state` is the seed used by the random number generator;
* If `RandomState` or `Generator`, `random_state` is the random number generator;
* If `None`, the random number generator is the `RandomState` instance used by `np.random`.
a: More specific parameters controlling the embedding.
If `None` these values are set automatically as determined by `min_dist` and `spread`.
b: More specific parameters controlling the embedding.
If `None` these values are set automatically as determined by `min_dist` and `spread`.
copy: Return a copy instead of writing to adata.
method: Use the original 'umap' implementation, or 'rapids' (experimental, GPU only)
neighbors_key: If not specified, umap looks .uns['neighbors'] for neighbors settings
and .obsp['connectivities'] for connectivities (default storage places for pp.neighbors).
If specified, umap looks .uns[neighbors_key] for neighbors settings and
.obsp[.uns[neighbors_key]['connectivities_key']] for connectivities.
Returns:
Depending on `copy`, returns or updates `adata` with the following fields.
**X_umap** : `adata.obsm` field UMAP coordinates of data.
"""
return sc.tl.umap(
adata=adata,
min_dist=min_dist,
spread=spread,
n_components=n_components,
maxiter=maxiter,
alpha=alpha,
gamma=gamma,
negative_sample_rate=negative_sample_rate,
init_pos=init_pos,
random_state=random_state,
a=a,
b=b,
copy=copy,
method=method,
neighbors_key=neighbors_key,
)
_LAYOUTS = ("fr", "drl", "kk", "grid_fr", "lgl", "rt", "rt_circular", "fa")
_Layout = Literal[_LAYOUTS] # type: ignore
def draw_graph(
adata: AnnData,
layout: _Layout = "fa",
init_pos: Union[str, bool, None] = None,
root: Optional[int] = None,
random_state: AnyRandom = 0,
n_jobs: Optional[int] = None,
adjacency: Optional[spmatrix] = None,
key_added_ext: Optional[str] = None,
neighbors_key: Optional[str] = None,
obsp: Optional[str] = None,
copy: bool = False,
**kwds,
) -> Optional[AnnData]: # pragma: no cover
"""Force-directed graph drawing [Islam11]_ [Jacomy14]_ [Chippada18]_.
.. _fa2: https://github.com/bhargavchippada/forceatlas2
.. _Force-directed graph drawing: https://en.wikipedia.org/wiki/Force-directed_graph_drawing
.. _fruchterman-reingold: http://igraph.org/python/doc/igraph.Graph-class.html#layout_fruchterman_reingold
An alternative to tSNE that often preserves the topology of the data
better. This requires to run :func:`~ehrapy.pp.neighbors`, first.
The default layout ('fa', `ForceAtlas2`) [Jacomy14]_ uses the package `fa2`_
[Chippada18]_, which can be installed via `pip install fa2`.
`Force-directed graph drawing`_ describes a class of long-established
algorithms for visualizing graphs.
It has been suggested for visualizing single-cell data by [Islam11]_.
Many other layouts as implemented in igraph [Csardi06]_ are available.
Similar approaches have been used by [Zunder15]_ or [Weinreb17]_.
Args:
adata: :class:`~anndata.AnnData` object object containing all observations.
layout: 'fa' (`ForceAtlas2`) or any valid `igraph layout
<http://igraph.org/c/doc/igraph-Layout.html>`__. Of particular interest
are 'fr' (Fruchterman Reingold), 'grid_fr' (Grid Fruchterman Reingold,
faster than 'fr'), 'kk' (Kamadi Kawai', slower than 'fr'), 'lgl' (Large
Graph, very fast), 'drl' (Distributed Recursive Layout, pretty fast) and
'rt' (Reingold Tilford tree layout).
init_pos: `'paga'`/`True`, `None`/`False`, or any valid 2d-`.obsm` key.
Use precomputed coordinates for initialization.
If `False`/`None` (the default), initialize randomly.
root: Root for tree layouts.
random_state: For layouts with random initialization like 'fr', change this to use
different intial states for the optimization. If `None`, no seed is set.
n_jobs: Number of jobs for parallel computation.
`None` means using :attr:`scanpy._settings.ScanpyConfig.n_jobs`.
adjacency: Sparse adjacency matrix of the graph, defaults to neighbors connectivities.
key_added_ext: By default, append `layout`.
neighbors_key: If not specified, draw_graph looks .obsp['connectivities'] for connectivities
(default storage place for pp.neighbors).
If specified, draw_graph looks .obsp[.uns[neighbors_key]['connectivities_key']] for connectivities.
obsp: Use .obsp[obsp] as adjacency. You can't specify both `obsp` and `neighbors_key` at the same time.
copy: Whether to return a copy instead of writing to adata.
**kwds: Parameters of chosen igraph layout. See e.g. `fruchterman-reingold`_
[Fruchterman91]_. One of the most important ones is `maxiter`.
Returns:
Depending on `copy`, returns or updates `adata` with the following field.
**X_draw_graph_layout** : `adata.obsm`
Coordinates of graph layout. E.g. for layout='fa' (the default), the field is called 'X_draw_graph_fa'
"""
return sc.tl.draw_graph(
adata=adata,
layout=layout,
init_pos=init_pos,
root=root,
random_state=random_state,
n_jobs=n_jobs,
adjacency=adjacency,
key_added_ext=key_added_ext,
neighbors_key=neighbors_key,
obsp=obsp,
copy=copy,
**kwds,
)
def diffmap(
adata: AnnData,
n_comps: int = 15,
neighbors_key: Optional[str] = None,
random_state: AnyRandom = 0,
copy: bool = False,
) -> Optional[AnnData]: # pragma: no cover
"""Diffusion Maps [Coifman05]_ [Haghverdi15]_ [Wolf18]_.
Diffusion maps [Coifman05]_ has been proposed for visualizing single-cell
data by [Haghverdi15]_. The tool uses the adapted Gaussian kernel suggested
by [Haghverdi16]_ in the implementation of [Wolf18]_.
The width ("sigma") of the connectivity kernel is implicitly determined by
the number of neighbors used to compute the single-cell graph in
:func:`~ehrapy.pp.neighbors`. To reproduce the original implementation
using a Gaussian kernel, use `method=='gauss'` in
:func:`~ehrapy.pp.neighbors`. To use an exponential kernel, use the default
`method=='umap'`. Differences between these options shouldn't usually be dramatic.
Args:
adata: :class:`~anndata.AnnData` object object containing all observations.
n_comps: The number of dimensions of the representation.
neighbors_key: If not specified, diffmap looks .uns['neighbors'] for neighbors settings
and .obsp['connectivities'], .obsp['distances'] for connectivities and
distances respectively (default storage places for pp.neighbors).
If specified, diffmap looks .uns[neighbors_key] for neighbors settings and
.obsp[.uns[neighbors_key]['connectivities_key']],
.obsp[.uns[neighbors_key]['distances_key']] for connectivities and distances respectively.
random_state: Random seed for the initialization.
copy: Whether to return a copy of the :class:`~anndata.AnnData` object.
Returns:
Depending on `copy`, returns or updates `adata` with the following fields.
`X_diffmap` : :class:`numpy.ndarray` (`adata.obsm`)
Diffusion map representation of data, which is the right eigen basis of the transition matrix with eigenvectors as columns.
`diffmap_evals` : :class:`numpy.ndarray` (`adata.uns`)
Array of size (number of eigen vectors). Eigenvalues of transition matrix.
"""
return sc.tl.diffmap(
adata=adata, n_comps=n_comps, neighbors_key=neighbors_key, random_state=random_state, copy=copy
)
def embedding_density(
adata: AnnData,
basis: str = "umap", # was positional before 1.4.5
groupby: Optional[str] = None,
key_added: Optional[str] = None,
components: Union[str, Sequence[str]] = None,
) -> None: # pragma: no cover
"""Calculate the density of observation in an embedding (per condition).
Gaussian kernel density estimation is used to calculate the density of
observations in an embedded space. This can be performed per category over a
categorical observation annotation. The cell density can be plotted using the
`sc.pl.embedding_density()` function.
Note that density values are scaled to be between 0 and 1. Thus, the
density value at each cell is only comparable to other densities in
the same condition category.
Args:
adata: :class:`~anndata.AnnData` object object containing all observations.
basis: The embedding over which the density will be calculated. This embedded
representation should be found in `adata.obsm['X_[basis]']``.
groupby: Keys for categorical observation/cell annotation for which densities
are calculated per category. Columns with up to ten categories are accepted.
key_added: Name of the `.obs` covariate that will be added with the density estimates.
components: The embedding dimensions over which the density should be calculated.
This is limited to two components.
Returns:
Updates `adata.obs` with an additional field specified by the `key_added`
parameter. This parameter defaults to `[basis]_density_[groupby]`, where
where `[basis]` is one of `umap`, `diffmap`, `pca`, `tsne`, or `draw_graph_fa`
and `[groupby]` denotes the parameter input.
Updates `adata.uns` with an additional field `[key_added]_params`.
Example:
.. code-block:: python
import ehrapy.api as ep
adata = ep.data.mimic_2(encode=True)
ep.tl.umap(adata)
ep.tl.embedding_density(adata, basis='umap', groupby='phase')
ep.pl.embedding_density(adata, basis='umap', key='umap_density_phase', group='G1')
"""
sc.tl.embedding_density(adata=adata, basis=basis, groupby=groupby, key_added=key_added, components=components)
def leiden(
adata: AnnData,
resolution: float = 1,
restrict_to: Optional[Tuple[str, Sequence[str]]] = None,
random_state: AnyRandom = 0,
key_added: str = "leiden",
adjacency: Optional[spmatrix] = None,
directed: bool = True,
use_weights: bool = True,
n_iterations: int = -1,
partition_type: Optional[Type[MutableVertexPartition]] = None,
neighbors_key: Optional[str] = None,
obsp: Optional[str] = None,
copy: bool = False,
**partition_kwargs,
) -> Optional[AnnData]: # pragma: no cover
"""Cluster observations into subgroups [Traag18]_.
Cluster observations using the Leiden algorithm [Traag18]_,
an improved version of the Louvain algorithm [Blondel08]_.
It has been proposed for single-cell analysis by [Levine15]_.
This requires having ran :func:`~ehrapy.pp.neighbors` or :func:`~ehrapy.pp.bbknn` first.
Args:
adata: :class:`~anndata.AnnData` object object containing all observations.
resolution: A parameter value controlling the coarseness of the clustering. Higher values lead to more clusters.
Set to `None` if overriding `partition_type` to one that doesn’t accept a `resolution_parameter`.
restrict_to: Restrict the clustering to the categories within the key for sample
annotation, tuple needs to contain `(obs_key, list_of_categories)`.
random_state: Random seed of the initialization of the optimization.
key_added: `adata.obs` key under which to add the cluster labels.
adjacency: Sparse adjacency matrix of the graph, defaults to neighbors connectivities.
directed: Whether to treat the graph as directed or undirected.
use_weights: If `True`, edge weights from the graph are used in the computation
(placing more emphasis on stronger edges).
n_iterations: How many iterations of the Leiden clustering algorithm to perform.
Positive values above 2 define the total number of iterations to perform,
-1 has the algorithm run until it reaches its optimal clustering.
partition_type: Type of partition to use.
Defaults to :class:`~leidenalg.RBConfigurationVertexPartition`.
For the available options, consult the documentation for
:func:`~leidenalg.find_partition`.
neighbors_key: Use neighbors connectivities as adjacency.
If not specified, leiden looks .obsp['connectivities'] for connectivities
(default storage place for pp.neighbors).
If specified, leiden looks .obsp[.uns[neighbors_key]['connectivities_key']] for connectivities.
obsp: Use .obsp[obsp] as adjacency. You can't specify both `obsp` and `neighbors_key` at the same time.
copy: Whether to copy `adata` or modify it inplace.
**partition_kwargs: Any further arguments to pass to `~leidenalg.find_partition`
(which in turn passes arguments to the `partition_type`).
Returns:
`adata.obs[key_added]`
Array of dim (number of samples) that stores the subgroup id (`'0'`, `'1'`, ...) for each cell.
`adata.uns['leiden']['params']`
A dict with the values for the parameters `resolution`, `random_state`, and `n_iterations`.
"""
return sc.tl.leiden(
adata=adata,
resolution=resolution,
restrict_to=restrict_to,
random_state=random_state,
key_added=key_added,
adjacency=adjacency,
directed=directed,
use_weights=use_weights,
n_iterations=n_iterations,
partition_type=partition_type,
neighbors_key=neighbors_key,
obsp=obsp,
copy=copy,
**partition_kwargs,
)
def louvain(
adata: AnnData,
resolution: Optional[float] = None,
random_state: AnyRandom = 0,
restrict_to: Optional[Tuple[str, Sequence[str]]] = None,
key_added: str = "louvain",
adjacency: Optional[spmatrix] = None,
flavor: Literal["vtraag", "igraph", "rapids"] = "vtraag",
directed: bool = True,
use_weights: bool = False,
partition_type: Optional[Type[MutableVertexPartition]] = None,
partition_kwargs: Mapping[str, Any] = MappingProxyType({}),
neighbors_key: Optional[str] = None,
obsp: Optional[str] = None,
copy: bool = False,
) -> Optional[AnnData]: # pragma: no cover
"""Cluster observations into subgroups [Blondel08]_ [Levine15]_ [Traag17]_.
Cluster observations using the Louvain algorithm [Blondel08]_ in the implementation of [Traag17]_.
The Louvain algorithm has been proposed for single-cell analysis by [Levine15]_.
This requires having ran :func:`~ehrapy.pp.neighbors` or
:func:`~ehrapy.pp.bbknn` first, or explicitly passing a ``adjacency`` matrix.
Args:
adata: :class:`~anndata.AnnData` object object containing all observations.
resolution: For the default flavor (``'vtraag'``), you can provide a resolution
(higher resolution means finding more and smaller clusters),
which defaults to 1.0. See “Time as a resolution parameter” in [Lambiotte09]_.
random_state: Random seed of the initialization of the optimization.
restrict_to: Restrict the clustering to the categories within the key for sample
annotation, tuple needs to contain ``(obs_key, list_of_categories)``.
key_added: Key under which to add the cluster labels. (default: ``'louvain'``)
adjacency: Sparse adjacency matrix of the graph, defaults to neighbors connectivities.
flavor: Choose between to packages for computing the clustering.
``'vtraag'`` is much more powerful, and the default.
directed: Interpret the ``adjacency`` matrix as directed graph?
use_weights: Use weights from knn graph.
partition_type: Type of partition to use. Only a valid argument if ``flavor`` is ``'vtraag'``.
partition_kwargs: Key word arguments to pass to partitioning, if ``vtraag`` method is being used.
neighbors_key: Use neighbors connectivities as adjacency.
If not specified, louvain looks .obsp['connectivities'] for connectivities
(default storage place for pp.neighbors).
If specified, louvain looks .obsp[.uns[neighbors_key]['connectivities_key']] for connectivities.
obsp: Use .obsp[obsp] as adjacency. You can't specify both `obsp` and `neighbors_key` at the same time.
copy: Whether to copy `adata` or modify it inplace.
Returns:
By default (``copy=False``), updates ``adata`` with the following fields:
``adata.obs['louvain']`` (:class:`pandas.Series`, dtype ``category``)
Array of dim (number of samples) that stores the subgroup id (``'0'``, ``'1'``, ...) for each observation.
:class:`~anndata.AnnData`
When ``copy=True`` is set, a copy of ``adata`` with those fields is returned.
"""
return sc.tl.louvain(
adata=adata,
resolution=resolution,
random_state=random_state,
restrict_to=restrict_to,
key_added=key_added,
adjacency=adjacency,
flavor=flavor,
directed=directed,
use_weights=use_weights,
partition_type=partition_type,
partition_kwargs=partition_kwargs,
neighbors_key=neighbors_key,
obsp=obsp,
copy=copy,
)
def dendrogram(
adata: AnnData,
groupby: str,
n_pcs: Optional[int] = None,
use_rep: Optional[str] = None,
var_names: Optional[Sequence[str]] = None,
cor_method: str = "pearson",
linkage_method: str = "complete",
optimal_ordering: bool = False,
key_added: Optional[str] = None,
inplace: bool = True,
) -> Optional[Dict[str, Any]]: # pragma: no cover
"""Computes a hierarchical clustering for the given `groupby` categories.
By default, the PCA representation is used unless `.X` has less than 50 variables.
Alternatively, a list of `var_names` (e.g. genes) can be given.
Average values of either `var_names` or components are used to compute a correlation matrix.
The hierarchical clustering can be visualized using
:func:`ehrapy.pl.dendrogram` or multiple other visualizations that can
include a dendrogram: :func:`~ehrapy.pl.matrixplot`,
:func:`~ehrapy.pl.heatmap`, :func:`~ehrapy.pl.dotplot`,
and :func:`~ehrapy.pl.stacked_violin`.
.. note::
The computation of the hierarchical clustering is based on predefined
groups and not per observation. The correlation matrix is computed using by
default pearson but other methods are available.
Args:
adata: :class:`~anndata.AnnData` object object containing all observations.
groupby: Key to group by
n_pcs: Use this many PCs. If `n_pcs==0` use `.X` if `use_rep is None`.
use_rep: Use the indicated representation. `'X'` or any key for `.obsm` is valid.
If `None`, the representation is chosen automatically:
For `.n_vars` < 50, `.X` is used, otherwise 'X_pca' is used.
If 'X_pca' is not present, it’s computed with default parameters.
var_names: List of var_names to use for computing the hierarchical clustering.
If `var_names` is given, then `use_rep` and `n_pcs` is ignored.
cor_method: correlation method to use.
Options are 'pearson', 'kendall', and 'spearman'
linkage_method: linkage method to use. See :func:`scipy.cluster.hierarchy.linkage` for more information.
optimal_ordering: Same as the optimal_ordering argument of :func:`scipy.cluster.hierarchy.linkage`
which reorders the linkage matrix so that the distance between successive leaves is minimal.
key_added: By default, the dendrogram information is added to
`.uns[f'dendrogram_{{groupby}}']`.
Notice that the `groupby` information is added to the dendrogram.
inplace: If `True`, adds dendrogram information to `adata.uns[key_added]`,
else this function returns the information.
Returns:
If `inplace=False`, returns dendrogram information, else `adata.uns[key_added]` is updated with it.
Example:
.. code-block:: python
import ehrapy.api as ep
adata = ep.data.mimic_2(encode=True)
ep.tl.dendrogram(adata, groupby='service_unit')
ep.pl.dendrogram(adata)
"""
return sc.tl.dendrogram(
adata=adata,
groupby=groupby,
n_pcs=n_pcs,
use_rep=use_rep,
var_names=var_names,
use_raw=False,
cor_method=cor_method,
linkage_method=linkage_method,
optimal_ordering=optimal_ordering,
key_added=key_added,
inplace=inplace,
)
def dpt(
adata: AnnData,
n_dcs: int = 10,
n_branchings: int = 0,
min_group_size: float = 0.01,
allow_kendall_tau_shift: bool = True,
neighbors_key: Optional[str] = None,
copy: bool = False,
) -> Optional[AnnData]: # pragma: no cover
"""Infer progression of observations through geodesic distance along the graph [Haghverdi16]_ [Wolf19]_.
Reconstruct the progression of a biological process from snapshot
data. `Diffusion Pseudotime` has been introduced by [Haghverdi16]_ and
implemented within Scanpy [Wolf18]_. Here, we use a further developed
version, which is able to deal with disconnected graphs [Wolf19]_ and can
be run in a `hierarchical` mode by setting the parameter `n_branchings>1`.
We recommend, however, to only use :func:`~ehrapy.tl.dpt` for computing pseudotime (`n_branchings=0`) and
to detect branchings via :func:`~scanpy.tl.paga`. For pseudotime, you need
to annotate your data with a root cell. For instance `adata.uns['iroot'] = np.flatnonzero(adata.obs['cell_types'] == 'Stem')[0]`
This requires to run :func:`~ehrapy.pp.neighbors`, first. In order to
reproduce the original implementation of DPT, use `method=='gauss'` in
this. Using the default `method=='umap'` only leads to minor quantitative differences, though.
Args:
adata: :class:`~anndata.AnnData` object object containing all observations.
n_dcs: The number of diffusion components to use.
n_branchings: Number of branchings to detect.
min_group_size: During recursive splitting of branches ('dpt groups') for `n_branchings`
> 1, do not consider groups that contain less than `min_group_size` data
points. If a float, `min_group_size` refers to a fraction of the total number of data points.
allow_kendall_tau_shift: If a very small branch is detected upon splitting, shift away from
maximum correlation in Kendall tau criterion of [Haghverdi16]_ to stabilize the splitting.
neighbors_key: If not specified, dpt looks `.uns['neighbors']` for neighbors settings
and `.obsp['connectivities']`, `.obsp['distances']` for connectivities and
distances respectively (default storage places for pp.neighbors).
If specified, dpt looks .uns[neighbors_key] for neighbors settings and
`.obsp[.uns[neighbors_key]['connectivities_key']]`,
`.obsp[.uns[neighbors_key]['distances_key']]` for connectivities and distances respectively.
copy: Copy instance before computation and return a copy. Otherwise, perform computation in place and return `None`.
Returns:
Depending on `copy`, returns or updates `adata` with the following fields.
If `n_branchings==0`, no field `dpt_groups` will be written.
* `dpt_pseudotime` : :class:`pandas.Series` (`adata.obs`, dtype `float`)
Array of dim (number of samples) that stores the pseudotime of each
observation, that is, the DPT distance with respect to the root observation.
* `dpt_groups` : :class:`pandas.Series` (`adata.obs`, dtype `category`)
Array of dim (number of samples) that stores the subgroup id ('0', '1', ...) for each observation.
"""
return sc.tl.dpt(
adata=adata,
n_dcs=n_dcs,
n_branchings=n_branchings,
min_group_size=min_group_size,
allow_kendall_tau_shift=allow_kendall_tau_shift,
neighbors_key=neighbors_key,
copy=copy,
)
def paga(
adata: AnnData,
groups: Optional[str] = None,
use_rna_velocity: bool = False,
model: Literal["v1.2", "v1.0"] = "v1.2",
neighbors_key: Optional[str] = None,
copy: bool = False,
) -> Optional[AnnData]: # pragma: no cover
"""Mapping out the coarse-grained connectivity structures of complex manifolds [Wolf19]_.
By quantifying the connectivity of partitions (groups, clusters),
partition-based graph abstraction (PAGA) generates a much
simpler abstracted graph (*PAGA graph*) of partitions, in which edge weights
represent confidence in the presence of connections. By tresholding this
confidence in :func:`~ehrapy.pl.paga`, a much simpler representation of the
manifold data is obtained, which is nonetheless faithful to the topology of
the manifold.
The confidence should be interpreted as the ratio of the actual versus the
expected value of connections under the null model of randomly connecting
partitions. We do not provide a p-value as this null model does not
precisely capture what one would consider "connected" in real data, hence it
strongly overestimates the expected value. See an extensive discussion of this in [Wolf19]_.
.. note::
Note that you can use the result of :func:`~ehrapy.pl.paga` in
:func:`~ehrapy.tl.umap` and :func:`~ehrapy.tl.draw_graph` via
`init_pos='paga'` to get embeddings that are typically more faithful to the global topology.
Args:
adata: :class:`~anndata.AnnData` object object containing all observations.
groups: Key for categorical in `adata.obs`. You can pass your predefined groups
by choosing any categorical annotation of observations. Default:
The first present key of `'leiden'` or `'louvain'`.
model: The PAGA connectivity model.
neighbors_key: If not specified, paga looks `.uns['neighbors']` for neighbors settings
and `.obsp['connectivities']`, `.obsp['distances']` for connectivities and
distances respectively (default storage places for `pp.neighbors`).
If specified, paga looks `.uns[neighbors_key]` for neighbors settings and
`.obsp[.uns[neighbors_key]['connectivities_key']]`,
`.obsp[.uns[neighbors_key]['distances_key']]` for connectivities and distances respectively.
copy: Copy `adata` before computation and return a copy. Otherwise, perform computation in place and return `None`.
Returns:
**connectivities** : :class:`numpy.ndarray` (adata.uns['connectivities'])
The full adjacency matrix of the abstracted graph, weights correspond to confidence in the connectivities of partitions.
**connectivities_tree** : :class:`scipy.sparse.csr_matrix` (adata.uns['connectivities_tree'])
The adjacency matrix of the tree-like subgraph that best explains the topology.
Notes:
Together with a random walk-based distance measure (e.g. :func:`ehrapy.tl.dpt`)
this generates a partial coordinatization of data useful for exploring and explaining its variation.
"""
return sc.tl.paga(
adata=adata,
groups=groups,
use_rna_velocity=use_rna_velocity,
model=model,
neighbors_key=neighbors_key,
copy=copy,
)
def ingest(
adata: AnnData,
adata_ref: AnnData,
obs: Optional[Union[str, Iterable[str]]] = None,
embedding_method: Union[str, Iterable[str]] = ("umap", "pca"),
labeling_method: str = "knn",
neighbors_key: Optional[str] = None,
inplace: bool = True,
**kwargs,
) -> Optional[AnnData]: # pragma: no cover
"""Map labels and embeddings from reference data to new data.
Integrates embeddings and annotations of an `adata` with a reference dataset
`adata_ref` through projecting on a PCA (or alternate model) that has been fitted on the reference data.
The function uses a knn classifier for mapping labels and the UMAP package [McInnes18]_ for mapping the embeddings.
.. note::
We refer to this *asymmetric* dataset integration as *ingesting*
annotations from reference data to new data. This is different from
learning a joint representation that integrates both datasets in an
unbiased way, as CCA (e.g. in Seurat) or a conditional VAE (e.g. in
scVI) would do.
You need to run :func:`~ehrapy.pp.neighbors` on `adata_ref` before passing it.
Args:
adata: :class:`~anndata.AnnData` object object containing all observations.
adata_ref: The annotated data matrix of shape `n_obs` × `n_vars`. Rows correspond to observations and columns to features.
Variables (`n_vars` and `var_names`) of `adata_ref` should be the same as in `adata`.
This is the dataset with labels and embeddings which need to be mapped to `adata`.
obs: Labels' keys in `adata_ref.obs` which need to be mapped to `adata.obs` (inferred for observation of `adata`).
embedding_method: Embeddings in `adata_ref` which need to be mapped to `adata`. The only supported values are 'umap' and 'pca'.
labeling_method: The method to map labels in `adata_ref.obs` to `adata.obs`. The only supported value is 'knn'.
neighbors_key: If not specified, ingest looks adata_ref.uns['neighbors'] for neighbors settings and adata_ref.obsp['distances'] for
distances (default storage places for pp.neighbors). If specified, ingest looks adata_ref.uns[neighbors_key] for
neighbors settings and adata_ref.obsp[adata_ref.uns[neighbors_key]['distances_key']] for distances.
inplace: Only works if `return_joint=False`.
Add labels and embeddings to the passed `adata` (if `True`) or return a copy of `adata` with mapped embeddings and labels.
**kwargs: Further keyword arguments for the Neighbor calculation
Returns:
* if `inplace=False` returns a copy of `adata` with mapped embeddings and labels in `obsm` and `obs` correspondingly
* if `inplace=True` returns `None` and updates `adata.obsm` and `adata.obs` with mapped embeddings and labels
Example:
.. code-block:: python
import ehrapy.api as ep
ep.pp.neighbors(adata_ref)
ep.tl.umap(adata_ref)
ep.tl.ingest(adata, adata_ref, obs="service_unit")
"""
return sc.tl.ingest(
adata=adata,
adata_ref=adata_ref,
obs=obs,
embedding_method=embedding_method,
labeling_method=labeling_method,
neighbors_key=neighbors_key,
inplace=inplace,
**kwargs,
)
_rank_features_groups_method = Optional[Literal["logreg", "t-test", "wilcoxon", "t-test_overestim_var"]]
_corr_method = Literal["benjamini-hochberg", "bonferroni"]
def rank_features_groups(
adata: AnnData,
groupby: str,
groups: Union[Literal["all"], Iterable[str]] = "all",
reference: str = "rest",
n_features: Optional[int] = None,
rankby_abs: bool = False,
pts: bool = False,
key_added: Optional[str] = "rank_features_groups",
copy: bool = False,
method: _rank_features_groups_method = None,
corr_method: _corr_method = "benjamini-hochberg",
tie_correct: bool = False,
layer: Optional[str] = None,
**kwds,
) -> None: # pragma: no cover
"""Rank features for characterizing groups.
Expects logarithmized data.
Args:
adata: Annotated data matrix.
groupby: The key of the observations grouping to consider.
groups: Subset of groups, e.g. [`'g1'`, `'g2'`, `'g3'`], to which comparison
shall be restricted, or `'all'` (default), for all groups.
reference: If `'rest'`, compare each group to the union of the rest of the group.
If a group identifier, compare with respect to this group.
n_features: The number of features that appear in the returned tables. Defaults to all features.
rankby_abs: Rank genes by the absolute value of the score, not by the score.
The returned scores are never the absolute values.
pts: Compute the fraction of observations containing the features.
key_added: The key in `adata.uns` information is saved to.
copy: Whether to return a copy of the AnnData object.
method: The default method is `'t-test'`,
`'t-test_overestim_var'` overestimates variance of each group,
`'wilcoxon'` uses Wilcoxon rank-sum,
`'logreg'` uses logistic regression.
corr_method: p-value correction method.
Used only for `'t-test'`, `'t-test_overestim_var'`, and `'wilcoxon'`.
tie_correct: Use tie correction for `'wilcoxon'` scores. Used only for `'wilcoxon'`.
layer: Key from `adata.layers` whose value will be used to perform tests on.
**kwds: Are passed to test methods. Currently this affects only parameters that
are passed to :class:`sklearn.linear_model.LogisticRegression`.
For instance, you can pass `penalty='l1'` to try to come up with a
minimal set of genes that are good predictors (sparse solution meaning few non-zero fitted coefficients).
Returns:
*names*: structured `np.ndarray` (`.uns['rank_features_groups']`)
Structured array to be indexed by group id storing the gene
names. Ordered according to scores.
*scores*: structured `np.ndarray` (`.uns['rank_features_groups']`)
Structured array to be indexed by group id storing the z-score
underlying the computation of a p-value for each gene for each group.
Ordered according to scores.
*logfoldchanges*: structured `np.ndarray` (`.uns['rank_features_groups']`)
Structured array to be indexed by group id storing the log2
fold change for each gene for each group. Ordered according to scores.
Only provided if method is 't-test' like.
Note: this is an approximation calculated from mean-log values.
*pvals*: structured `np.ndarray` (`.uns['rank_features_groups']`)
p-values.
*pvals_adj* : structured `np.ndarray` (`.uns['rank_features_groups']`)
Corrected p-values.
*pts*: `pandas.DataFrame` (`.uns['rank_features_groups']`)
Fraction of cells expressing the genes for each group.
*pts_rest*: `pandas.DataFrame` (`.uns['rank_features_groups']`)
Only if `reference` is set to `'rest'`.
Fraction of observations from the union of the rest of each group containing the features.
Example:
.. code-block:: python
import ehrapy.api as ep
adata = eh.dt.mimic_2(encode=True)
ep.tl.rank_features_groups(adata, "service_unit")
ep.pl.rank_features_groups(adata)
"""
return sc.tl.rank_genes_groups(
adata=adata,
groupby=groupby,
use_raw=False,
groups=groups,
reference=reference,
n_genes=n_features,
rankby_abs=rankby_abs,
pts=pts,
key_added=key_added,
copy=copy,
method=method,
corr_method=corr_method,
tie_correct=tie_correct,
layer=layer,
**kwds,
)
def filter_rank_features_groups(
adata: AnnData,
key="rank_features_groups",
groupby=None,
key_added="rank_features_groups_filtered",
min_in_group_fraction=0.25,
min_fold_change=1,
max_out_group_fraction=0.5,
) -> None: # pragma: no cover
"""Filters out features based on fold change and fraction of features containing the feature within and outside the `groupby` categories.
See :func:`~ehrapy.tl.rank_features_groups`.
Results are stored in `adata.uns[key_added]`
(default: 'rank_genes_groups_filtered').
To preserve the original structure of adata.uns['rank_genes_groups'],
filtered genes are set to `NaN`.
Args:
adata: Annotated data matrix.
key: Key previously added by :func:`~ehrapy.tl.rank_features_groups`
groupby: The key of the observations grouping to consider.
key_added: The key in `adata.uns` information is saved to.
min_in_group_fraction: Minimum in group fraction (default: 0.25).
min_fold_change: Miniumum fold change (default: 1).
max_out_group_fraction: Maximum out group fraction (default: 0.5).
Returns:
Same output as :func:`ehrapy.tl.rank_features_groups` but with filtered feature names set to `nan`
Example:
.. code-block:: python
import ehrapy.api as ep
adata = eh.dt.mimic_2(encode=True)
ep.tl.rank_features_groups(adata, "service_unit")
ep.pl.rank_features_groups(adata)
"""
return sc.tl.filter_rank_genes_groups(
adata=adata,
key=key,
groupby=groupby,
use_raw=False,
key_added=key_added,
min_in_group_fraction=min_in_group_fraction,
min_fold_change=min_fold_change,
max_out_group_fraction=max_out_group_fraction,
)
_marker_feature_overlap_methods = Literal["overlap_count", "overlap_coef", "jaccard"]
def marker_feature_overlap(
adata: AnnData,
reference_markers: Union[Dict[str, set], Dict[str, list]],
*,
key: str = "rank_features_groups",
method: _marker_feature_overlap_methods = "overlap_count",
normalize: Optional[Literal["reference", "data"]] = None,
top_n_markers: Optional[int] = None,
adj_pval_threshold: Optional[float] = None,
key_added: str = "feature_overlap",
inplace: bool = False,
): # pragma: no cover
"""Calculate an overlap score between data-deriven features and provided marker features.
Marker feature overlap scores can be quoted as overlap counts, overlap
coefficients, or jaccard indices. The method returns a pandas dataframe
which can be used to annotate clusters based on feature overlaps.
Args:
adata: Annotated data matrix.
reference_markers: A marker gene dictionary object. Keys should be strings with the
cell identity name and values are sets or lists of strings which match format of `adata.var_name`.
key: The key in `adata.uns` where the rank_features_groups output is stored (default: rank_features_groups).
method: Method to calculate marker gene overlap. `'overlap_count'` uses the
intersection of the feature set, `'overlap_coef'` uses the overlap
coefficient, and `'jaccard'` uses the Jaccard index (default: `overlap_count`).
normalize: Normalization option for the feature overlap output. This parameter
can only be set when `method` is set to `'overlap_count'`. `'reference'`
normalizes the data by the total number of marker features given in the
reference annotation per group. `'data'` normalizes the data by the
total number of marker genes used for each cluster.
top_n_markers: The number of top data-derived marker genes to use. By default the top
100 marker features are used. If `adj_pval_threshold` is set along with
`top_n_markers`, then `adj_pval_threshold` is ignored.
adj_pval_threshold: A significance threshold on the adjusted p-values to select marker features.
This can only be used when adjusted p-values are calculated by `ep.tl.rank_features_groups`.
If `adj_pval_threshold` is set along with `top_n_markers`, then `adj_pval_threshold` is ignored.
key_added: Name of the `.uns` field that will contain the marker overlap scores.
inplace: Return a marker gene dataframe or store it inplace in `adata.uns`.
Returns:
A pandas dataframe with the marker gene overlap scores if `inplace=False`.
For `inplace=True` `adata.uns` is updated with an additional field
specified by the `key_added` parameter (default = 'marker_gene_overlap').
Example:
TODO
"""
return sc.tl.marker_gene_overlap(
adata=adata,
reference_markers=reference_markers,
key=key,
method=method,
normalize=normalize,
top_n_markers=top_n_markers,
adj_pval_threshold=adj_pval_threshold,
key_added=key_added,
inplace=inplace,
)
|
import ast
n = int(input())
s=""
for i in range(0, n):
s="".join(["#", s])
print(s.rjust(n))
|
"""Prepare test environment and provide useful fixtures.
Set all environment variables needed and provide some fixture useful for different tests in this package.
"""
import os
import shutil
import pytest
from _pytest.fixtures import FixtureRequest
from dynaconf import settings
from jobs.models import Base
def get_test_data_folder() -> str:
"""Return absolute path to test data folder."""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
def get_dags_folder() -> str:
"""Return absolute path to test dags folder."""
return os.path.join(get_test_data_folder(), 'dags')
def get_sync_results_folder() -> str:
"""Return absolute path to sync-results folder."""
return os.path.join(get_test_data_folder(), 'sync-results')
def get_test_job_folder() -> str:
"""Return path to specific job folder."""
return os.path.join(get_test_data_folder(), 'jb-12345')
os.environ["ENV_FOR_DYNACONF"] = "unittest"
os.environ["OEO_OPENEO_VERSION"] = "v1.0"
os.environ["OEO_AIRFLOW_HOST"] = "http://airflow-webserver:8080"
os.environ["OEO_AIRFLOW_OUTPUT"] = get_test_data_folder()
os.environ["OEO_AIRFLOW_DAGS"] = get_dags_folder()
os.environ["OEO_SYNC_DEL_DELAY"] = "5"
os.environ["OEO_SYNC_RESULTS_FOLDER"] = get_sync_results_folder()
os.environ["OEO_CSW_SERVER"] = "http://localhost:8000"
os.environ["OEO_JOB_FOLDER"] = get_test_job_folder()
os.environ["OEO_WEKEO_STORAGE"] = "/usr/local/airflow/wekeo_storage"
@pytest.fixture()
def dag_folder(request: FixtureRequest) -> None:
"""Create dag folder and add finalizer to remove it again after running the test."""
folder = get_dags_folder()
if not os.path.isdir(folder):
os.mkdir(folder)
def fin() -> None:
shutil.rmtree(folder)
request.addfinalizer(fin)
@pytest.fixture(scope='session')
def model_base() -> Base:
"""Return database model Base."""
return Base
@pytest.fixture()
def sync_result(request: FixtureRequest) -> None:
"""Create sync-results and add finalizer to remove the folders after running the test."""
# For sync job test
if not os.path.isdir(settings.SYNC_RESULTS_FOLDER):
os.makedirs(settings.SYNC_RESULTS_FOLDER)
def fin() -> None:
shutil.rmtree(settings.SYNC_RESULTS_FOLDER)
request.addfinalizer(fin)
@pytest.fixture()
def job_folder(request: FixtureRequest) -> None:
"""Create job folder and add finalizer to remove the folders after running the test."""
if not os.path.isdir(settings.JOB_FOLDER):
os.makedirs(settings.JOB_FOLDER)
def fin() -> None:
shutil.rmtree(settings.JOB_FOLDER)
request.addfinalizer(fin)
@pytest.fixture()
def job_output(request: FixtureRequest) -> None:
"""Create job folder with output and add finalizer to remove the folders after running the test."""
job_results = os.path.join(settings.JOB_FOLDER, "result")
if not os.path.isdir(job_results):
os.makedirs(job_results)
# Create empty file (mock job output)
open(os.path.join(job_results, "sample-output.tif"), 'w').close()
shutil.copyfile(
os.path.join(get_test_data_folder(), "results_metadata.json"),
os.path.join(job_results, "results_metadata.json")
)
def fin() -> None:
shutil.rmtree(settings.JOB_FOLDER)
request.addfinalizer(fin)
|
##########################################################################
#
# Copyright (c) 2013-2014, John Haddon. All rights reserved.
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import IECoreScene
import Gaffer
import GafferUI
import GafferOSL
import imath
import functools
_primitiveVariableNamesOptions = {
"P" : IECore.V3fData( imath.V3f(0), IECore.GeometricData.Interpretation.Point ),
"Pref" : IECore.V3fData( imath.V3f(0), IECore.GeometricData.Interpretation.Point ),
"N" : IECore.V3fData( imath.V3f(0), IECore.GeometricData.Interpretation.Normal ),
"velocity" : IECore.V3fData( imath.V3f(0), IECore.GeometricData.Interpretation.Vector ),
"uv" : IECore.V3fData( imath.V3f(0), IECore.GeometricData.Interpretation.UV ),
"width" : IECore.FloatData(),
"Cs" : IECore.Color3fData(),
"customInt" : IECore.IntData(),
"customFloat" : IECore.FloatData(),
"customVector" : IECore.V3fData( imath.V3f(0), IECore.GeometricData.Interpretation.Vector ),
"customNormal" : IECore.V3fData( imath.V3f(0), IECore.GeometricData.Interpretation.Normal ),
"customPoint" : IECore.V3fData( imath.V3f(0), IECore.GeometricData.Interpretation.Point ),
"customUV" : IECore.V3fData( imath.V3f(0), IECore.GeometricData.Interpretation.UV ),
"customColor" : IECore.Color3fData(),
"customMatrix" : IECore.M44fData(),
"customString" : IECore.StringData(),
"closure" : None,
}
##########################################################################
# _PrimitiveVariablesFooter
##########################################################################
class _PrimitiveVariablesFooter( GafferUI.PlugValueWidget ) :
def __init__( self, plug ) :
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal )
GafferUI.PlugValueWidget.__init__( self, row, plug )
with row :
GafferUI.Spacer( imath.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) )
menuButton = GafferUI.MenuButton(
image = "plus.png",
hasFrame = False,
menu = GafferUI.Menu(
Gaffer.WeakMethod( self.__menuDefinition ),
title = "Add Input"
),
toolTip = "Add Input"
)
menuButton.setEnabled( not Gaffer.MetadataAlgo.readOnly( plug ) )
GafferUI.Spacer( imath.V2i( 1 ), imath.V2i( 999999, 1 ), parenting = { "expand" : True } )
def _updateFromPlug( self ) :
self.setEnabled( self._editable() )
def __menuDefinition( self ) :
result = IECore.MenuDefinition()
usedNames = set()
for p in self.getPlug().children():
# TODO - this method for checking if a plug variesWithContext should probably live in PlugAlgo
# ( it's based on Switch::variesWithContext )
sourcePlug = p["name"].source()
variesWithContext = sourcePlug.direction() == Gaffer.Plug.Direction.Out and isinstance( ComputeNode, sourcePlug.node() )
if not variesWithContext:
usedNames.add( p["name"].getValue() )
categories = { "Standard" : [], "Custom" : [], "Advanced" : [] }
for label, defaultData in sorted( _primitiveVariableNamesOptions.items() ):
if label.startswith( "custom" ):
primVarName = label
if primVarName in usedNames:
suffix = 2
while True:
primVarName = label + str( suffix )
if not primVarName in usedNames:
break
suffix += 1
categories["Custom"].append( ( label[6:], primVarName, defaultData ) )
elif label == "closure":
categories["Advanced"].append( ( label, label, defaultData ) )
else:
if label in usedNames:
continue
categories["Standard"].append( ( label, label, defaultData ) )
for category in [ "Standard", "Custom", "Advanced" ]:
for ( menuLabel, primVarName, defaultData ) in categories[category]:
result.append(
"/" + category + "/" + menuLabel,
{
"command" : functools.partial( Gaffer.WeakMethod( self.__addPlug ), primVarName, defaultData ),
}
)
return result
def __addPlug( self, name, defaultData ) :
if defaultData == None:
plugName = "closure"
name = ""
valuePlug = GafferOSL.ClosurePlug( "value", Gaffer.Plug.Direction.In, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
else:
plugName = "primitiveVariable"
valuePlug = Gaffer.PlugAlgo.createPlugFromData( "value", Gaffer.Plug.Direction.In, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, defaultData )
plug = Gaffer.NameValuePlug( name, valuePlug, True, plugName )
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().addChild( plug )
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNode(
GafferOSL.OSLObject,
"description",
"""
Executes OSL shaders to perform object processing. Use the shaders from
the OSL/ObjectProcessing menu to read primitive variables from the input
object and then write primitive variables back to it.
""",
"plugAdderOptions", IECore.CompoundData( _primitiveVariableNamesOptions ),
plugs = {
"primitiveVariables" : [
"description",
"""
Define primitive varibles to output by adding child plugs and connecting
corresponding OSL shaders. Supported plug types are :
- FloatPlug
- IntPlug
- ColorPlug
- V3fPlug ( outputting vector, normal or point )
- M44fPlug
- StringPlug
If you want to add multiple outputs at once, you can also add a closure plug,
which can accept a connection from an OSLCode with a combined output closure.
""",
"layout:customWidget:footer:widgetType", "GafferOSLUI.OSLObjectUI._PrimitiveVariablesFooter",
"layout:customWidget:footer:index", -1,
"nodule:type", "GafferUI::CompoundNodule",
"noduleLayout:section", "left",
"noduleLayout:spacing", 0.2,
"plugValueWidget:type", "GafferUI.LayoutPlugValueWidget",
# Add + button for showing and hiding parameters in the GraphEditor
"noduleLayout:customGadget:addButton:gadgetType", "GafferOSLUI.OSLObjectUI.PlugAdder",
],
"primitiveVariables.*" : [
# Although the parameters plug is positioned
# as we want above, we must also register
# appropriate values for each individual parameter,
# for the case where they get promoted to a box
# individually.
"noduleLayout:section", "left",
"nodule:type", "GafferUI::CompoundNodule",
"nameValuePlugPlugValueWidget:ignoreNamePlug", lambda plug : isinstance( plug["value"], GafferOSL.ClosurePlug ),
],
"primitiveVariables.*.name" : [
"nodule:type", "",
],
"primitiveVariables.*.enabled" : [
"nodule:type", "",
],
"primitiveVariables.*.value" : [
# Although the parameters plug is positioned
# as we want above, we must also register
# appropriate values for each individual parameter,
# for the case where they get promoted to a box
# individually.
"noduleLayout:section", "left",
"nodule:type", "GafferUI::StandardNodule",
"noduleLayout:label", lambda plug : plug.parent().getName() if plug.typeId() == GafferOSL.ClosurePlug.staticTypeId() else plug.parent()["name"].getValue(),
"ui:visibleDimensions", lambda plug : 2 if hasattr( plug, "interpretation" ) and plug.interpretation() == IECore.GeometricData.Interpretation.UV else None,
],
"interpolation" : [
"description",
"""
The interpolation type of the primitive variables created by this node.
For instance, Uniform interpolation means that the shader is run once per face on a mesh, allowing it to output primitive variables with a value per face.
All non-constant input primitive variables are resampled to match the selected interpolation so that they can be accessed from the shader.
""",
"preset:Uniform", IECoreScene.PrimitiveVariable.Interpolation.Uniform,
"preset:Vertex", IECoreScene.PrimitiveVariable.Interpolation.Vertex,
"preset:FaceVarying", IECoreScene.PrimitiveVariable.Interpolation.FaceVarying,
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
]
}
)
#########################################################################
# primitiveVariable plug menu
##########################################################################
def __deletePlug( plug ) :
with Gaffer.UndoScope( plug.ancestor( Gaffer.ScriptNode ) ) :
plug.parent().removeChild( plug )
def __plugPopupMenu( menuDefinition, plugValueWidget ) :
plug = plugValueWidget.getPlug()
if not isinstance( plug.node(), GafferOSL.OSLObject ):
return
relativeName = plug.relativeName( plug.node() ).split( "." )
if relativeName[0] != "primitiveVariables" or len( relativeName ) < 2:
return
primVarPlug = plug.node()["primitiveVariables"][relativeName[1]]
menuDefinition.append( "/DeleteDivider", { "divider" : True } )
menuDefinition.append(
"/Delete",
{
"command" : functools.partial( __deletePlug, primVarPlug ),
"active" : not plugValueWidget.getReadOnly() and not Gaffer.MetadataAlgo.readOnly( primVarPlug ),
}
)
GafferUI.PlugValueWidget.popupMenuSignal().connect( __plugPopupMenu, scoped = False )
|
# from rest_framework import serializers
# from django.contrib.auth.models import User
# from django.contrib.auth import authenticate
# from django.contrib import auth
#
# class ResetPasswordSerializer(serializers.Serializer):
# email = serializers.EmailField(min_length=2)
# class Meta:
# fields = ['email']
# class ChangePasswordSerializer(serializers.Serializer):
# model :User
#
# # old_password = serializers.CharField(required=True)
# # new_password = serializers.CharField(required=True)
# #
# def save(self):
# username = self.validated_data['username']
# password = self.validated_data['password']
# if username and password:
# user = authenticate(username=username, password=password)
# if user:
# if user.is_active:
# return user
# else:
# raise serializers.ValidationError({'user': 'user is not active'})
# else:
# raise serializers.ValidationError({'user': 'please enter valid user credentails'})
# else:
# raise serializers.ValidationError({'error': 'username and password not to be blank'})
# class resetpasswordSerializer(serializers.ModelSerializer):
# username = serializers.CharField(max_length=100)
# password = serializers.CharField(max_length=100)
# class Meta:
# model = User
# fields = '__all__'
#
# def save(self):
# username = self.validated_data['username']
# password = self.validated_data['password']
# # filtering out whethere username is existing or not, if your username is existing then if condition will allow your username
# if User.objects.filter(username=username).exists():
# # if your username is existing get the query of your specific username
# user = User.objects.get(username=username)
# # then set the new password for your username
# user.set_password(password)
# user.save()
# return user
# else:
# raise serializers.ValidationError({'error': 'please enter valid crendentials'})
#
|
__copyright__ = "Copyright 2017-2019, http://radical.rutgers.edu"
__author__ = "RADICAL Team <radical@rutgers.edu>"
__license__ = "MIT"
import os
import json
import pika
import time
import threading
import radical.utils as ru
# EnTK imports
from .. import states, Task
# ------------------------------------------------------------------------------
#
class WFprocessor(object):
"""
An WFprocessor (workflow processor) takes the responsibility of dispatching
tasks from the various pipelines of the workflow according to their relative
order to the TaskManager. All state updates are relflected in the AppManager
as we operate on the reference of the same workflow object. The WFprocessor
also retrieves completed tasks from the TaskManager and updates states of
PST accordingly.
:Arguments:
:sid: (str) session id used by the profiler and loggers
:workflow: (set) REFERENCE of the AppManager's workflow
:pending_queue: (list) queues to hold pending tasks
:completed_queue: (list) queues to hold completed tasks
:resubmit_failed: (bool) True if failed tasks should be resubmitted
:rmq_conn_params: (pika.connection.ConnectionParameters) object of
parameters necessary to connect to RabbitMQ
"""
# --------------------------------------------------------------------------
#
def __init__(self,
sid,
workflow,
pending_queue,
completed_queue,
resubmit_failed,
rmq_conn_params):
# Mandatory arguments
self._sid = sid
self._pending_queue = pending_queue
self._completed_queue = completed_queue
self._resubmit_failed = resubmit_failed
self._rmq_conn_params = rmq_conn_params
# Assign validated workflow
self._workflow = workflow
# Create logger and profiler at their specific locations using the sid
self._path = os.getcwd() + '/' + self._sid
self._uid = ru.generate_id('wfprocessor.%(item_counter)04d',
ru.ID_CUSTOM, namespace=self._sid)
name = 'radical.entk.%s' % self._uid
self._logger = ru.Logger (name, path=self._path)
self._prof = ru.Profiler(name, path=self._path)
self._report = ru.Reporter(name)
# Defaults
self._wfp_process = None
self._enqueue_thread = None
self._dequeue_thread = None
self._rmq_ping_interval = os.getenv('RMQ_PING_INTERVAL', 10)
self._logger.info('Created WFProcessor object: %s' % self._uid)
self._prof.prof('create_wfp', uid=self._uid)
# --------------------------------------------------------------------------
#
def _advance(self, obj, obj_type, new_state):
'''
transition `obj` of type `obj_type` into state `new_state`
'''
# NOTE: this is a local operation, no queue communication is involved
# (other than the `_advance()` in the TaskManager classes, which
if obj_type == 'Task' : msg = obj.parent_stage['uid']
elif obj_type == 'Stage': msg = obj.parent_pipeline['uid']
else : msg = None
obj.state = new_state
self._prof.prof('advance', uid=obj.uid, state=obj.state, msg=msg)
self._report.ok('Update: ')
self._report.info('%s state: %s\n' % (obj.luid, obj.state))
self._logger.info('Transition %s to state %s' % (obj.uid, new_state))
# --------------------------------------------------------------------------
# Getter
#
@property
def workflow(self):
return self._workflow
# --------------------------------------------------------------------------
# Private Methods
#
def _create_workload(self):
# We iterate through all pipelines to collect tasks from
# stages that are pending scheduling. Once collected, these tasks
# will be communicated to the tmgr in bulk.
# Initial empty list to store executable tasks across different
# pipelines
workload = list()
# The executable tasks can belong to different pipelines, and
# hence different stages. Empty list to store the stages so that
# we can update the state of stages accordingly
scheduled_stages = list()
for pipe in self._workflow:
with pipe.lock:
# If Pipeline is in the final state or suspended, we
# skip processing it.
if pipe.state in states.FINAL or \
pipe.completed or \
pipe.state == states.SUSPENDED:
continue
if pipe.state == states.INITIAL:
# Set state of pipeline to SCHEDULING if it is in INITIAL
self._advance(pipe, 'Pipeline', states.SCHEDULING)
# Get the next stage of this pipeline to process
exec_stage = pipe.stages[pipe.current_stage - 1]
if not exec_stage.uid:
# TODO: Move parent uid, name assignment to assign_uid()
exec_stage.parent_pipeline['uid'] = pipe.uid
exec_stage.parent_pipeline['name'] = pipe.name
exec_stage._assign_uid(self._sid)
# If its a new stage, update its state
if exec_stage.state == states.INITIAL:
self._advance(exec_stage, 'Stage', states.SCHEDULING)
# Get all tasks of a stage in SCHEDULED state
exec_tasks = list()
if exec_stage.state == states.SCHEDULING:
exec_tasks = exec_stage.tasks
for exec_task in exec_tasks:
state = exec_task.state
if state == states.INITIAL or \
(state == states.FAILED and self._resubmit_failed):
# Set state of Tasks in current Stage
# to SCHEDULING
self._advance(exec_task, 'Task', states.SCHEDULING)
# Store the tasks from different pipelines
# into our workload list. All tasks will
# be submitted in bulk and their states
# will be updated accordingly
workload.append(exec_task)
# We store the stages since the stages the
# above tasks belong to also need to be
# updated. If its a task that failed, the
# stage is already in the correct state
if exec_task.state == states.FAILED:
continue
if exec_stage not in scheduled_stages:
scheduled_stages.append(exec_stage)
return workload, scheduled_stages
# --------------------------------------------------------------------------
#
def _execute_workload(self, workload, scheduled_stages):
# Tasks of the workload need to be converted into a dict
# as pika can send and receive only json/dict data
wl_json = json.dumps([task.to_dict() for task in workload])
# Acquire a connection+channel to the rmq server
mq_connection = pika.BlockingConnection(self._rmq_conn_params)
mq_channel = mq_connection.channel()
# Send the workload to the pending queue
mq_channel.basic_publish(exchange = '',
routing_key=self._pending_queue[0],
body=wl_json
# TODO: Make durability parameters
# as a config parameter and then
# enable the following accordingly
# properties=pika.BasicProperties(
# make message persistent
# delivery_mode = 2)
)
self._logger.debug('Workload submitted to Task Manager')
# Update the state of the tasks in the workload
for task in workload:
# Set state of Tasks in current Stage to SCHEDULED
self._advance(task, 'Task', states.SCHEDULED)
# Update the state of the stages from which tasks have
# been scheduled
if scheduled_stages:
for executable_stage in scheduled_stages:
self._advance(executable_stage, 'Stage', states.SCHEDULED)
# --------------------------------------------------------------------------
#
def _enqueue(self):
"""
**Purpose**: This is the function that is run in the enqueue thread.
This function extracts Tasks from the workflow that exists in
the WFprocessor object and pushes them to the queues in the pending_q
list.
"""
try:
self._prof.prof('enq_start', uid=self._uid)
self._logger.info('enqueue-thread started')
while not self._enqueue_thread_terminate.is_set():
workload, scheduled_stages = self._create_workload()
# If there are tasks to be executed
if workload:
self._execute_workload(workload, scheduled_stages)
self._logger.info('Enqueue thread terminated')
self._prof.prof('enq_stop', uid=self._uid)
except KeyboardInterrupt:
self._logger.exception('Execution interrupted by user (you \
probably hit Ctrl+C), trying to cancel \
enqueuer thread gracefully...')
raise KeyboardInterrupt
except Exception:
self._logger.exception('Error in enqueue-thread')
raise
# --------------------------------------------------------------------------
#
def _update_dequeued_task(self, deq_task):
# Traverse the entire workflow to find out the correct Task
# TODO: Investigate whether we can change the DS of the
# workflow so that we don't have this expensive search
# for each task.
# First search across all pipelines
# Note: deq_task is not the same as the task that exists in this process,
# they are different objects and have different state histories.
for pipe in self._workflow:
with pipe.lock:
# Skip pipelines that have completed or are
# currently suspended
if pipe.completed or pipe.state == states.SUSPENDED:
continue
# Skip pipelines that don't match the UID
# There will be only one pipeline that matches
if pipe.uid != deq_task.parent_pipeline['uid']:
continue
self._logger.debug('Found parent pipeline: %s' %
pipe.uid)
# Next search across all stages of a matching
# pipelines
for stage in pipe.stages:
# Skip stages that don't match the UID
# There will be only one stage that matches
if stage.uid != deq_task.parent_stage['uid']:
continue
self._logger.debug('Found parent stage: %s' %
stage.uid)
# Search across all tasks of matching stage
for task in stage.tasks:
# Skip tasks that don't match the UID
# There will be only one task that matches
if task.uid != deq_task.uid:
continue
# If there is no exit code, we assume success
# We are only concerned about state of task and not
# deq_task
if not deq_task.exit_code:
task_state = states.DONE
else:
task_state = states.FAILED
if task.state == states.FAILED and \
self._resubmit_failed:
task_state = states.INITIAL
self._advance(task, 'Task', task_state)
# Found the task and processed it -- no more
# iterations needed
break
# Found the stage and processed it -- no more
# iterations needed for the current task
break
# Check if current stage has completed
# If yes, we need to (i) check for post execs to
# be executed and (ii) check if it is the last
# stage of the pipeline -- update pipeline
# state if yes.
if stage._check_stage_complete():
self._advance(stage, 'Stage', states.DONE)
# Check if the current stage has a post-exec
# that needs to be executed
if stage.post_exec:
self._execute_post_exec(pipe, stage)
# If pipeline has completed, make state
# change
if pipe.completed:
self._advance(pipe, 'Pipeline', states.DONE)
# Found the pipeline and processed it -- no more
# iterations needed for the current task
break
# --------------------------------------------------------------------------
#
def _execute_post_exec(self, pipe, stage):
try:
self._logger.info('Executing post-exec for stage %s' % stage.uid)
self._prof.prof('post_exec_start', uid=self._uid)
resumed_pipe_uids = stage.post_exec()
self._logger.info('Post-exec executed for stage %s' % stage.uid)
self._prof.prof('post_exec_stop', uid=self._uid)
except Exception:
self._logger.exception('post_exec of stage %s failed' % stage.uid)
self._prof.prof('post_exec_fail', uid=self._uid)
raise
if resumed_pipe_uids:
for r_pipe in self._workflow:
if r_pipe == pipe:
continue
with r_pipe.lock:
if r_pipe.uid in resumed_pipe_uids:
# Resumed pipelines already have the correct state,
# they just need to be synced with the AppMgr.
r_pipe._increment_stage()
if r_pipe.completed:
self._advance(r_pipe, 'Pipeline', states.DONE)
else:
self._advance(r_pipe, 'Pipeline', r_pipe.state)
if pipe.state == states.SUSPENDED:
self._advance(pipe, 'Pipeline', states.SUSPENDED)
else:
pipe._increment_stage()
if pipe.completed:
self._advance(pipe, 'Pipeline', states.DONE)
# --------------------------------------------------------------------------
#
def _dequeue(self):
"""
**Purpose**: This is the function that is run in the dequeue thread.
This function extracts Tasks from the completed queues and updates the
workflow.
"""
try:
self._prof.prof('deq_start', uid=self._uid)
self._logger.info('Dequeue thread started')
# Acquire a connection+channel to the rmq server
mq_connection = pika.BlockingConnection(self._rmq_conn_params)
mq_channel = mq_connection.channel()
last = time.time()
while not self._dequeue_thread_terminate.is_set():
method_frame, header_frame, body = mq_channel.basic_get(
queue=self._completed_queue[0])
# When there is no msg received, body is None
if not body:
continue
# Acknowledge the received message
mq_channel.basic_ack(delivery_tag=method_frame.delivery_tag)
# Create a task from the received msg
deq_task = Task()
deq_task.from_dict(json.loads(body))
self._logger.info('Got finished task %s from queue'
% (deq_task.uid))
self._update_dequeued_task(deq_task)
# Appease pika cos it thinks the connection is dead
now = time.time()
if now - last >= self._rmq_ping_interval:
mq_connection.process_data_events()
last = now
self._logger.info('Terminated dequeue thread')
self._prof.prof('deq_stop', uid=self._uid)
except KeyboardInterrupt:
self._logger.exception('Execution interrupted by user (you \
probably hit Ctrl+C), trying to exit \
gracefully...')
raise KeyboardInterrupt
except Exception:
self._logger.exception('Error in dequeue-thread')
raise
finally:
try:
mq_connection.close()
except Exception as ex:
self._logger.warning('mq_connection close failed, %s' % ex)
self._logger.debug('closed mq_connection')
# --------------------------------------------------------------------------
#
# Public Methods
#
def initialize_workflow(self):
"""
**Purpose**: Initialize the PST of the workflow with a uid and type
checks
"""
try:
self._prof.prof('wf_init_start', uid=self._uid)
for p in self._workflow:
p._assign_uid(self._sid)
self._prof.prof('wf_init_stop', uid=self._uid)
except Exception:
self._logger.exception('Fatal error when initializing workflow')
raise
# --------------------------------------------------------------------------
#
def start_processor(self):
"""
**Purpose**: Method to start the wfp process. The wfp function
is not to be accessed directly. The function is started in a separate
process using this method.
"""
try:
self._logger.info('Starting WFprocessor')
self._prof.prof('wfp_start', uid=self._uid)
self._enqueue_thread_terminate = threading.Event()
self._dequeue_thread_terminate = threading.Event()
# Start dequeue thread
self._dequeue_thread = threading.Thread(target=self._dequeue,
name='dequeue-thread')
self._logger.info('Starting dequeue-thread')
self._prof.prof('starting dequeue-thread', uid=self._uid)
self._dequeue_thread.start()
# Start enqueue thread
self._enqueue_thread = threading.Thread(target=self._enqueue,
name='enqueue-thread')
self._logger.info('Starting enqueue-thread')
self._prof.prof('starting enqueue-thread', uid=self._uid)
self._enqueue_thread.start()
self._logger.info('WFprocessor started')
self._prof.prof('wfp_started', uid=self._uid)
except:
self._logger.exception('WFprocessor failed')
self.terminate_processor()
raise
# --------------------------------------------------------------------------
#
def terminate_processor(self):
"""
**Purpose**: Method to terminate the wfp process. This method is
blocking as it waits for the wfp process to terminate (aka join).
"""
try:
if self._enqueue_thread:
if not self._enqueue_thread_terminate.is_set():
self._logger.info('Terminating enqueue-thread')
self._enqueue_thread_terminate.set()
self._enqueue_thread.join()
self._enqueue_thread = None
if self._dequeue_thread:
if not self._dequeue_thread_terminate.is_set():
self._logger.info('Terminating dequeue-thread')
self._dequeue_thread_terminate.set()
self._dequeue_thread.join()
self._dequeue_thread = None
self._logger.info('WFprocessor terminated')
self._prof.prof('wfp_stop', uid=self._uid)
self._prof.close()
except:
self._logger.exception('Could not terminate wfprocessor process')
raise
# --------------------------------------------------------------------------
#
def workflow_incomplete(self):
"""
**Purpose**: Method to check if the workflow execution is incomplete.
"""
try:
for pipe in self._workflow:
with pipe.lock:
if pipe.completed:
pass
else:
return True
return False
except Exception, ex:
self._logger.exception(
'Could not check if workflow is incomplete, error:%s' % ex)
raise
# --------------------------------------------------------------------------
#
def check_processor(self):
if self._enqueue_thread is None or self._dequeue_thread is None:
return False
if not self._enqueue_thread.is_alive() or \
not self._dequeue_thread.is_alive():
return False
return True
# ------------------------------------------------------------------------------
|
from django.contrib import messages
from django.core.paginator import Paginator
from django.shortcuts import redirect, render
from .models import EventPost, Skill
from .utils import filter_events
from .forms import EventPostForm
from account.models import Account
def expert_event_list_view(request):
expert_skills = set(request.user.profile.skills.all())
skills = Skill.objects.all()
events = EventPost.objects.all()
matched_events = list()
# filtering events base on request
events, filtered = filter_events(events, request)
# filtering out events where no.of matches < 3
for event in events:
event_skills = set(event.skills.all())
count = len(expert_skills.intersection(event_skills))
if count > 0:
matched_events.append(event)
# pagination
paginator = Paginator(matched_events, 5)
page = request.GET.get("page")
jobs = paginator.get_page(page)
context = {
"page_data": jobs,
"skills": skills,
"filtered": False,
"last_page": paginator.num_pages,
}
return render(request, "events/event_list.html", context)
def event_post_view(request):
if request.method == "POST":
form = EventPostForm(request.POST)
if form.is_valid():
data = form.save(commit=False)
data.posted_by = request.user
data.save()
form.save_m2m()
return redirect("event_posts")
messages.error(
request, "There was some error while posting the event, please try again!!"
)
return redirect("event_posts")
events = request.user.event_posts.all()
form = EventPostForm()
context = {"events": events, "form": form}
return render(request, "events/event_posts.html", context)
def delete_event_post_view(request):
event_pk = request.POST.get("event_pk")
EventPost.objects.get(pk=event_pk).delete()
return redirect("event_posts")
def matching_expert_view(request, pk):
event_skills = set(EventPost.objects.get(pk=pk).skills.all())
experts = Account.objects.filter(type="expert")
matching_expert = list()
for expert in experts:
try:
expert_skills = set(expert.profile.skills.all())
except Exception as identifier:
continue
count = len(expert_skills.intersection(event_skills))
if count > 0:
matching_expert.append(expert)
context = {"experts": matching_expert}
return render(request, "events/event_candidates.html", context)
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright © Dragon Dollar Limited
# contact: contact@dragondollar.com
#
# This software is a collection of webservices designed to provide a secure
# and scalable framework to build e-commerce websites.
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-B
# license as circulated by CEA, CNRS and INRIA at the following URL
# " http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
#
#############################################################################
# frontend settings
import os
from platform import node
PRODUCTION = False
RUNNING_TEST = False
SERVER_PORT = 9500
FRONT_ROOT_URI = 'http://%s:%s' % (node().split('.')[0], SERVER_PORT)
CENTRAL_REDIS = {
'HOST': 'localhost',
'PORT': 6379,
'TEST_PORT': 6279,
'PASSWORD': 'setpassforsafety',
}
BRAND_ID = "1000001"
BRAND_NAME = "BREUER"
TEMPLATE_PATH = ['views/templates/breuer']
#TEMPLATE_PATH = ['views/templates/vessel', 'views/templates/breuer']
DEFAULT_TEMPLATE = 'default.html'
DEFAULT_LOCALE = 'fr_FR'
SUPPORTED_LOCALES = ['fr_FR']
LOCALE_DIR = '../locale'
LOCALE_DOMAIN = 'front'
STATIC_FILES_PATH = 'static'
LOG_CONFIG_FILE = 'logging.cfg'
BUGZ_SCOUT_REPORT = {
'url': 'http://dragondollar.fogbugz.com/scoutSubmit.asp',
'user_name': 'bugwatch@dragondollar.com',
'project': 'Dragon Dollar',
'area': 'front',
}
USR_ROOT_URI = "http://localhost:8100"
SERVER_APIKEY_URI_MAP = {
'USR': os.path.join(USR_ROOT_URI,
'webservice/1.0/pub/apikey.pem'),
}
PRIVATE_KEY_PATH = 'static/keys/front_pri.key'
PUB_KEY_PATH = 'static/keys/front_pub.key'
PP_SUCCESS = "http://localhost:9500/paypal/%(id_trans)s/success"
PP_FAILURE = "http://localhost:9500/paypal/%(id_trans)s/failure"
PB_SUCCESS = "http://localhost:9500/paybox/%(id_trans)s/success"
PB_ERROR = "http://localhost:9500/paybox/%(id_trans)s/error"
PB_CANCEL = "http://localhost:9500/paybox/%(id_trans)s/cancel"
PB_WAITING = "http://localhost:9500/paybox/%(id_trans)s/waiting"
SP_SUCCESS = "http://localhost:9500/stripe/%(id_trans)s/success"
SP_FAILURE = "http://localhost:9500/stripe/%(id_trans)s/failure"
NUM_OF_RANDOM_SALES = 4
SEND_EMAILS = False
SERVICE_EMAIL = 'serviceclients@breuer.fr'
MAIL_HOST = 'smtp.gmail.com:587'
MAIL_FROM_USER = 'xxx'
MAIL_FROM_PASS = 'xxx'
ORDERS_COUNT_IN_MY_ACCOUNT = 5
ORDERS_COUNT_PER_PAGE = 10
SESSION_EXP_TIME = 1800
TIMEZONE = 'Europe/Paris'
DRAGON_FEED_CACHE_PATH = None
DRAGON_BLOG_ADDR = 'www.dragondollar.com'
|
# Copyright (C) 2015 Ion Torrent Systems, Inc. All Rights Reserved
from django.utils.translation import ugettext as _
from iondb.rundb.plan.page_plan.abstract_step_data import AbstractStepData
from django.conf import settings
from iondb.utils import validation
from iondb.rundb.models import AnalysisArgs
from iondb.rundb.plan.page_plan.step_names import StepNames
from iondb.rundb.plan.page_plan.application_step_data import ApplicationFieldNames
from iondb.rundb.plan.page_plan.kits_step_data import KitsFieldNames
from iondb.rundb.plan.page_plan.step_helper_types import StepHelperType
import logging
logger = logging.getLogger(__name__)
class AnalysisParamsFieldNames:
# AP_DICT = "analysisParamsDict"
AP_ENTRIES = "analysisParamsEntries"
AP_DISPLAYED_NAMES = "analysisParamsNames"
AP_ENTRY_SELECTED = "analysisParamsEntrySelected"
AP_CUSTOM = "analysisParamsCustom"
AP_ENTRY_PREVIOUSLY_SELECTED = "analysisParamsEntryPreviouslySelected"
AP_ENTRY_SYSTEM_SELECTED = "analysisParamsEntrySystemSelected"
AP_ENTRY_SELECTED_VALUE = _(
"workflow.step.analysisparams.fields.analysis_params.choice.AP_ENTRY_SELECTED_VALUE"
) # "<Previous custom selection>"
AP_ENTRY_PREVIOUSLY_SELECTED_VALUE = _(
"workflow.step.analysisparams.fields.analysis_params.choice.AP_ENTRY_PREVIOUSLY_SELECTED_VALUE"
) # "<Selection before chip/kits selection change>"
AP_ENTRY_BEST_MATCH_PLAN_VALUE = _(
"workflow.step.analysisparams.fields.analysis_params.choice.AP_ENTRY_BEST_MATCH_PLAN_VALUE"
) # "System default for this plan"
AP_ENTRY_BEST_MATCH_TEMPLATE_VALUE = _(
"workflow.step.analysisparams.fields.analysis_params.choice.AP_ENTRY_BEST_MATCH_TEMPLATE_VALUE"
) # "System default for this template"
AP_BEADFIND_SELECTED = "beadFindSelected"
AP_ANALYSISARGS_SELECTED = "analysisArgsSelected"
AP_PREBASECALLER_SELECTED = "preBaseCallerSelected"
AP_CALIBRATE_SELECTED = "calibrateSelected"
AP_BASECALLER_SELECTED = "baseCallerSelected"
AP_ALIGNMENT_SELECTED = "alignmentSelected"
AP_IONSTATS_SELECTED = "ionStatsSelected"
AP_THUMBNAIL_BEADFIND_SELECTED = "thumbnailBeadFindSelected"
AP_THUMBNAIL_ANALYSISARGS_SELECTED = "thumbnailAnalysisArgsSelected"
AP_THUMBNAIL_PREBASECALLER_SELECTED = "thumbnailPreBaseCallerSelected"
AP_THUMBNAIL_CALIBRATE_SELECTED = "thumbnailCalibrateSelected"
AP_THUMBNAIL_BASECALLER_SELECTED = "thumbnailBaseCallerSelected"
AP_THUMBNAIL_ALIGNMENT_SELECTED = "thumbnailAlignmentSelected"
AP_THUMBNAIL_IONSTATS_SELECTED = "thumbnailIonStatsSelected"
APPL_PRODUCT = "applProduct"
RUN_TYPE = "runType"
APPLICATION_GROUP_NAME = "applicationGroupName"
CHIP_TYPE = "chipType"
SAMPLE_PREPARATION_KIT = "samplePreparationKit"
LIBRARY_KIT_NAME = "librarykitname"
TEMPLATE_KIT_NAME = "templatekitname"
SEQUENCE_KIT_NAME = "sequencekitname"
CATEGORIES = "categories"
class AnalysisParamsStepData(AbstractStepData):
def __init__(self, sh_type):
super(AnalysisParamsStepData, self).__init__(sh_type)
self.resourcePath = "rundb/plan/page_plan/page_plan_analysis_params.html"
self._dependsOn.append(StepNames.APPLICATION)
self._dependsOn.append(StepNames.KITS)
analysisParamsEntries = list(AnalysisArgs.objects.all().filter(active=True))
self.prepopulatedFields[
AnalysisParamsFieldNames.AP_ENTRIES
] = analysisParamsEntries
self.prepopulatedFields[AnalysisParamsFieldNames.AP_DISPLAYED_NAMES] = [
ap.description for ap in analysisParamsEntries
]
# self.prepopulatedFields[AnalysisParamsFieldNames.AP_ENTRY_BEST_MATCH] = None
self.savedObjects[AnalysisParamsFieldNames.AP_ENTRY_SELECTED] = ""
self.savedObjects[AnalysisParamsFieldNames.AP_ENTRY_PREVIOUSLY_SELECTED] = ""
self.savedFields[AnalysisParamsFieldNames.AP_BEADFIND_SELECTED] = ""
self.savedFields[AnalysisParamsFieldNames.AP_ANALYSISARGS_SELECTED] = ""
self.savedFields[AnalysisParamsFieldNames.AP_PREBASECALLER_SELECTED] = ""
self.savedFields[AnalysisParamsFieldNames.AP_CALIBRATE_SELECTED] = ""
self.savedFields[AnalysisParamsFieldNames.AP_BASECALLER_SELECTED] = ""
self.savedFields[AnalysisParamsFieldNames.AP_ALIGNMENT_SELECTED] = ""
self.savedFields[AnalysisParamsFieldNames.AP_IONSTATS_SELECTED] = ""
self.savedFields[AnalysisParamsFieldNames.AP_THUMBNAIL_BEADFIND_SELECTED] = ""
self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_ANALYSISARGS_SELECTED
] = ""
self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_PREBASECALLER_SELECTED
] = ""
self.savedFields[AnalysisParamsFieldNames.AP_THUMBNAIL_CALIBRATE_SELECTED] = ""
self.savedFields[AnalysisParamsFieldNames.AP_THUMBNAIL_BASECALLER_SELECTED] = ""
self.savedFields[AnalysisParamsFieldNames.AP_THUMBNAIL_ALIGNMENT_SELECTED] = ""
self.savedFields[AnalysisParamsFieldNames.AP_THUMBNAIL_IONSTATS_SELECTED] = ""
self.savedFields[AnalysisParamsFieldNames.AP_CUSTOM] = "False"
self.prepopulatedFields[AnalysisParamsFieldNames.APPL_PRODUCT] = ""
self.prepopulatedFields[AnalysisParamsFieldNames.RUN_TYPE] = ""
self.prepopulatedFields[AnalysisParamsFieldNames.APPLICATION_GROUP_NAME] = ""
self.prepopulatedFields[AnalysisParamsFieldNames.CATEGORIES] = ""
self.prepopulatedFields[AnalysisParamsFieldNames.CHIP_TYPE] = ""
self.prepopulatedFields[AnalysisParamsFieldNames.SAMPLE_PREPARATION_KIT] = ""
self.prepopulatedFields[AnalysisParamsFieldNames.LIBRARY_KIT_NAME] = ""
self.prepopulatedFields[AnalysisParamsFieldNames.TEMPLATE_KIT_NAME] = ""
self.prepopulatedFields[AnalysisParamsFieldNames.SEQUENCE_KIT_NAME] = ""
self.sh_type = sh_type
def getStepName(self):
return StepNames.ANALYSIS_PARAMS
def updateSavedObjectsFromSavedFields(self):
pass
def validate(self):
pass
def hasErrors(self):
"""
This step is a section of another step. It is crucial not to advertise having errors or
user will be re-directed to analysis args' resourcePath for error correction.
Let the parent step take care of the error broadcasting.
"""
return False
def validateField(self, field_name, new_field_value):
pass
def validateField_in_section(self, field_name, new_field_value):
"""
field validation for a step that acts as a section to another step
"""
logger.debug(
"at analysis_params_step_data.validateField_in_section field_name=%s; new_field_value=%s"
% (field_name, new_field_value)
)
pass
def updateFromStep(self, updated_step):
if updated_step.getStepName() not in self._dependsOn:
return
# reset best match if key attributes on other chevrons have changed
needToRefreshSelectionList = False
if (
updated_step.getStepName() == StepNames.APPLICATION
and updated_step.savedObjects[ApplicationFieldNames.APPL_PRODUCT]
):
applProduct = updated_step.savedObjects[ApplicationFieldNames.APPL_PRODUCT]
logger.debug(
"analysis_params_step_data - APPL CHEVRON... applProduct %s; runType=%s; applicationGroupName=%s"
% (
applProduct.productCode,
updated_step.savedObjects[ApplicationFieldNames.RUN_TYPE].runType,
updated_step.savedFields[
ApplicationFieldNames.APPLICATION_GROUP_NAME
],
)
)
if (
self.prepopulatedFields[AnalysisParamsFieldNames.RUN_TYPE]
!= updated_step.savedObjects[ApplicationFieldNames.RUN_TYPE].runType
or self.prepopulatedFields[
AnalysisParamsFieldNames.APPLICATION_GROUP_NAME
]
!= updated_step.savedFields[
ApplicationFieldNames.APPLICATION_GROUP_NAME
]
):
needToRefreshSelectionList = True
self.prepopulatedFields[AnalysisParamsFieldNames.APPL_PRODUCT] = ""
self.prepopulatedFields[
AnalysisParamsFieldNames.RUN_TYPE
] = updated_step.savedObjects[ApplicationFieldNames.RUN_TYPE].runType
self.prepopulatedFields[
AnalysisParamsFieldNames.APPLICATION_GROUP_NAME
] = updated_step.savedFields[ApplicationFieldNames.APPLICATION_GROUP_NAME]
elif updated_step.getStepName() == StepNames.KITS:
if (
self.prepopulatedFields[AnalysisParamsFieldNames.CHIP_TYPE]
!= updated_step.savedFields[KitsFieldNames.CHIP_TYPE]
or self.prepopulatedFields[
AnalysisParamsFieldNames.SAMPLE_PREPARATION_KIT
]
!= updated_step.savedFields[KitsFieldNames.SAMPLE_PREPARATION_KIT]
or self.prepopulatedFields[AnalysisParamsFieldNames.LIBRARY_KIT_NAME]
!= updated_step.savedFields[KitsFieldNames.LIBRARY_KIT_NAME]
or self.prepopulatedFields[AnalysisParamsFieldNames.TEMPLATE_KIT_NAME]
!= updated_step.savedFields[KitsFieldNames.TEMPLATE_KIT_NAME]
or self.prepopulatedFields[AnalysisParamsFieldNames.SEQUENCE_KIT_NAME]
!= updated_step.savedFields[KitsFieldNames.SEQUENCE_KIT_NAME]
):
needToRefreshSelectionList = True
self.prepopulatedFields[AnalysisParamsFieldNames.CHIP_TYPE] = (
updated_step.savedFields[KitsFieldNames.CHIP_TYPE]
if updated_step.savedFields[KitsFieldNames.CHIP_TYPE]
else ""
)
self.prepopulatedFields[
AnalysisParamsFieldNames.SAMPLE_PREPARATION_KIT
] = updated_step.savedFields[KitsFieldNames.SAMPLE_PREPARATION_KIT]
self.prepopulatedFields[
AnalysisParamsFieldNames.LIBRARY_KIT_NAME
] = updated_step.savedFields[KitsFieldNames.LIBRARY_KIT_NAME]
self.prepopulatedFields[
AnalysisParamsFieldNames.TEMPLATE_KIT_NAME
] = updated_step.savedFields[KitsFieldNames.TEMPLATE_KIT_NAME]
self.prepopulatedFields[
AnalysisParamsFieldNames.SEQUENCE_KIT_NAME
] = updated_step.savedFields[KitsFieldNames.SEQUENCE_KIT_NAME]
if needToRefreshSelectionList:
self._update_analysisParamsData_selection_list(
self.prepopulatedFields[AnalysisParamsFieldNames.CHIP_TYPE],
self.prepopulatedFields[AnalysisParamsFieldNames.SEQUENCE_KIT_NAME],
self.prepopulatedFields[AnalysisParamsFieldNames.TEMPLATE_KIT_NAME],
self.prepopulatedFields[AnalysisParamsFieldNames.LIBRARY_KIT_NAME],
self.prepopulatedFields[
AnalysisParamsFieldNames.SAMPLE_PREPARATION_KIT
],
self.prepopulatedFields[AnalysisParamsFieldNames.RUN_TYPE],
self.prepopulatedFields[
AnalysisParamsFieldNames.APPLICATION_GROUP_NAME
],
self.prepopulatedFields[AnalysisParamsFieldNames.CATEGORIES],
)
def _update_analysisParamsData_selection_list(
self,
chipType,
sequenceKitName,
templatingKitName,
libraryKitName,
samplePrepKitName,
applicationType,
applicationGroupName,
applicationCategories,
):
possible_match_entries = AnalysisArgs.possible_matches(
chipType,
sequenceKitName,
templatingKitName,
libraryKitName,
samplePrepKitName,
None,
applicationType,
applicationGroupName,
applicationCategories,
)
logger.debug(
"_update_analysisParamsData_selection_list() applicationType=%s; applicationGroupName=%s; applicationCategories=%s"
% (applicationType, applicationGroupName, applicationCategories)
)
best_match_entry = AnalysisArgs.best_match(
chipType,
sequenceKitName,
templatingKitName,
libraryKitName,
samplePrepKitName,
None,
applicationType,
applicationGroupName,
applicationCategories,
)
if best_match_entry:
isTemplate = self.sh_type in StepHelperType.TEMPLATE_TYPES
for ap in possible_match_entries:
if ap.name == best_match_entry.name:
ap.name = (
AnalysisParamsFieldNames.AP_ENTRY_BEST_MATCH_TEMPLATE_VALUE
if isTemplate
else AnalysisParamsFieldNames.AP_ENTRY_BEST_MATCH_PLAN_VALUE
)
ap.best_match = True
if self.savedFields[AnalysisParamsFieldNames.AP_CUSTOM] == "False":
previously_selected_analysisArgs = {
"description": AnalysisParamsFieldNames.AP_ENTRY_PREVIOUSLY_SELECTED_VALUE,
"name": "",
"beadfindargs": self.savedFields[
AnalysisParamsFieldNames.AP_BEADFIND_SELECTED
],
"analysisargs": self.savedFields[
AnalysisParamsFieldNames.AP_ANALYSISARGS_SELECTED
],
"prebasecallerargs": self.savedFields[
AnalysisParamsFieldNames.AP_PREBASECALLER_SELECTED
],
"calibrateargs": self.savedFields[
AnalysisParamsFieldNames.AP_CALIBRATE_SELECTED
],
"basecallerargs": self.savedFields[
AnalysisParamsFieldNames.AP_BASECALLER_SELECTED
],
"alignmentargs": self.savedFields[
AnalysisParamsFieldNames.AP_ALIGNMENT_SELECTED
],
"ionstatsargs": self.savedFields[
AnalysisParamsFieldNames.AP_IONSTATS_SELECTED
],
"thumbnailbeadfindargs": self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_BEADFIND_SELECTED
],
"thumbnailanalysisargs": self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_ANALYSISARGS_SELECTED
],
"prethumbnailbasecallerargs": self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_PREBASECALLER_SELECTED
],
"thumbnailcalibrateargs": self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_CALIBRATE_SELECTED
],
"thumbnailbasecallerargs": self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_BASECALLER_SELECTED
],
"thumbnailalignmentargs": self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_ALIGNMENT_SELECTED
],
"thumbnailionstatsargs": self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_IONSTATS_SELECTED
],
}
self.savedObjects[
AnalysisParamsFieldNames.AP_ENTRY_PREVIOUSLY_SELECTED
] = previously_selected_analysisArgs
best_match_entry.description = (
AnalysisParamsFieldNames.AP_ENTRY_SELECTED_VALUE
)
self.savedObjects[
AnalysisParamsFieldNames.AP_ENTRY_SELECTED
] = best_match_entry
self.savedFields[
AnalysisParamsFieldNames.AP_BEADFIND_SELECTED
] = best_match_entry.beadfindargs
self.savedFields[
AnalysisParamsFieldNames.AP_ANALYSISARGS_SELECTED
] = best_match_entry.analysisargs
self.savedFields[
AnalysisParamsFieldNames.AP_PREBASECALLER_SELECTED
] = best_match_entry.prebasecallerargs
self.savedFields[
AnalysisParamsFieldNames.AP_CALIBRATE_SELECTED
] = best_match_entry.calibrateargs
self.savedFields[
AnalysisParamsFieldNames.AP_BASECALLER_SELECTED
] = best_match_entry.basecallerargs
self.savedFields[
AnalysisParamsFieldNames.AP_ALIGNMENT_SELECTED
] = best_match_entry.alignmentargs
self.savedFields[
AnalysisParamsFieldNames.AP_IONSTATS_SELECTED
] = best_match_entry.ionstatsargs
self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_BEADFIND_SELECTED
] = best_match_entry.thumbnailbeadfindargs
self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_ANALYSISARGS_SELECTED
] = best_match_entry.thumbnailanalysisargs
self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_PREBASECALLER_SELECTED
] = best_match_entry.prethumbnailbasecallerargs
self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_CALIBRATE_SELECTED
] = best_match_entry.thumbnailcalibrateargs
self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_BASECALLER_SELECTED
] = best_match_entry.thumbnailbasecallerargs
self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_ALIGNMENT_SELECTED
] = best_match_entry.thumbnailalignmentargs
self.savedFields[
AnalysisParamsFieldNames.AP_THUMBNAIL_IONSTATS_SELECTED
] = best_match_entry.thumbnailionstatsargs
else:
logger.debug(
"analysis_params_step_data._update_analysisParamsData_selection_list() BEST MATCH NOT FOUND!!! chipType=%s;"
% (chipType)
)
self.prepopulatedFields[
AnalysisParamsFieldNames.AP_ENTRIES
] = possible_match_entries
self.prepopulatedFields[AnalysisParamsFieldNames.AP_DISPLAYED_NAMES] = [
ap.description for ap in possible_match_entries
]
|
# -*- coding: utf-8 -*-
'''
Challenge #19 will use The Adventures of Sherlock Holmes [https://www.gutenberg.org/cache/epub/1661/pg1661.txt] from
Project Gutenberg [https://www.gutenberg.org/].
Write a program that will build and output a word index for The Adventures of Sherlock Holmes. Assume one page contains
40 lines of text as formatted from Project Gutenberg's site. There are common words like "the", "a", "it" that will
probably appear on almost every page, so do not display words that occur more than 100 times.
Example Output: the word "abhorrent" appears once on page 1, and the word "medical" appears on multiple pages, so the
output for this word would look like:
abhorrent: 1
medical: 34, 97, 98, 130, 160
Exclude the Project Gutenberg header and footer, book title, story titles, and chapters.
'''
import re
with open('20120307.txt', 'r') as f:
text = f.read()
# Remove header and footer
window_strip = re.compile(r"(?<=[\n]{10}).*(?=[\n]{10})", re.DOTALL)
txt = window_strip.search(text)
# Remove glossary
content_strip = re.compile(r"(?<=[\n]{5}).*", re.DOTALL)
txt = content_strip.search(txt.group())
# Remove story and chapter titles
chapter_strip = re.compile(r"([IVX]+\. THE ADVENTURE.*?\n)|(ADVENTURE [IVX]+\..*?\n)|(\n[IVX]+\.\n)", re.DOTALL)
txt = chapter_strip.sub('', txt.group())
# split into 40 line strings
txt = txt.split('\n')
span = 40
txt = ["\n".join(txt[i:i+span]) for i in range(0, len(txt), span)]
#remove everything except letters and spaces
alpha_strip = re.compile(r"[^a-zA-Z\s]+", re.DOTALL)
for p in range(len(txt)):
txt[p] = txt[p].replace('\n', ' ')
txt[p] = alpha_strip.sub('', txt[p])
# blacklist holds words that occur more than 100 times and need not be counted
# whitelist is a dictionary that holds the word as the key and a list of page numbers as values
blacklist = []
whitelist = {}
for n, p in enumerate(txt, start=1):
# split pages into words
working_string = p.split(' ')
for w in working_string:
# check if word is in blacklist
if w.lower() in blacklist:
continue
#check if word is in whitelist; if yes add to it, if too many added remove and add to blacklist
elif w.lower() in whitelist:
# print(w.lower(), n)
whitelist[w.lower()].append(n)
if len(whitelist[w.lower()]) > 100:
blacklist.append(w.lower())
del whitelist[w.lower()]
# if not in whitelist, add to whitelist
else:
whitelist[w.lower()] = [n]
# print result
for i in sorted(whitelist):
print(i, whitelist[i])
|
from typing import List
import numpy as np
from numpy import zeros
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.op2.tables.oes_stressStrain.real.oes_objects import OES_Object
from pyNastran.f06.f06_formatting import write_floats_13e, _eigenvalue_header
class RealNonlinearRodArray(OES_Object): # 89-CRODNL, 92-CONRODNL
"""
::
ELEMENT-ID = 102
N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )
TIME AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL
STRESS PLASTIC/NLELAST STRAIN STRESS
2.000E-02 1.941367E+01 1.941367E+01 1.941367E-04 0.0 0.0 0.0
3.000E-02 1.941367E+01 1.941367E+01 1.941367E-04 0.0 0.0 0.0
"""
def __init__(self, data_code, is_sort1, isubcase, dt):
"""tested by elements/loadstep_elements.op2"""
OES_Object.__init__(self, data_code, isubcase, apply_data_code=True)
#self.code = [self.format_code, self.sort_code, self.s_code]
self.nelements = 0 # result specific
@property
def is_real(self) -> bool:
return True
@property
def is_complex(self) -> bool:
return False
@property
def nnodes_per_element(self) -> int:
return 1
def _reset_indices(self):
self.itotal = 0
self.ielement = 0
def _get_msgs(self):
raise NotImplementedError()
def get_headers(self) -> List[str]:
headers = ['axial_stress', 'equiv_stress', 'total_strain',
'effective_plastic_creep_strain', 'effective_creep_strain',
'linear_torsional_stress']
return headers
def build(self):
"""sizes the vectorized attributes of the RealNonlinearRodArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
self.is_built = True
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype = 'float32'
if isinstance(self.nonlinear_factor, integer_types):
dtype = 'int32'
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype='int32')
#[axial_stress, equiv_stress, total_strain, effective_plastic_creep_strain,
# effective_creep_strain, linear_torsional_stress]
self.data = zeros((self.ntimes, self.nelements, 6), dtype='float32')
def build_dataframe(self):
"""creates a pandas dataframe"""
import pandas as pd
headers = self.get_headers()
if self.nonlinear_factor not in (None, np.nan):
#Time 0.02 0.04
#ElementID Item
#102 axial_stress 19.413668 76.139496
# equiv_stress 19.413668 76.139496
# total_strain 0.000194 0.000761
# effective_plastic_creep_strain 0.000000 0.000000
# effective_creep_strain 0.000000 0.000000
# linear_torsional_stress 0.000000 0.000000
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
else:
df1 = pd.DataFrame(self.element).T
df1.columns = ['ElementID']
df2 = pd.DataFrame(self.data[0])
df2.columns = headers
self.data_frame = df1.join([df2])
#print(self.data_frame)
def __eq__(self, table): # pragma: no cover
self._eq_header(table)
assert self.is_sort1 == table.is_sort1
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
ntimes = self.data.shape[0]
i = 0
if self.is_sort1:
for itime in range(ntimes):
for ieid, eid, in enumerate(self.element):
t1 = self.data[itime, ieid, :]
t2 = table.data[itime, ieid, :]
(axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1) = t1
(axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2) = t2
if not np.allclose(t1, t2):
#if not np.array_equal(t1, t2):
msg += '%s\n (%s, %s, %s, %s, %s, %s)\n (%s, %s, %s, %s, %s, %s)\n' % (
eid,
axial_stress1, equiv_stress1, total_strain1, effective_plastic_creep_strain1, effective_creep_strain1, linear_torsional_stress1,
axial_stress2, equiv_stress2, total_strain2, effective_plastic_creep_strain2, effective_creep_strain2, linear_torsional_stress2)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
else:
raise NotImplementedError(self.is_sort2)
if i > 0:
print(msg)
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, axial_stress, equiv_stress, total_strain,
effective_plastic_creep_strain, effective_creep_strain, linear_torsional_stress):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, :] = [
axial_stress, equiv_stress, total_strain, effective_plastic_creep_strain,
effective_creep_strain, linear_torsional_stress
]
self.ielement += 1
def get_stats(self, short=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
' ntimes: %i\n' % self.ntimes,
' ntotal: %i\n' % self.ntotal,
]
ntimes, nelements, _ = self.data.shape
assert self.ntimes == ntimes, 'ntimes=%s expected=%s' % (self.ntimes, ntimes)
assert self.nelements == nelements, 'nelements=%s expected=%s' % (self.nelements, nelements)
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i\n'
% (self.__class__.__name__, ntimes, nelements))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i\n'
% (self.__class__.__name__, nelements))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (ntimes_word, n, n, str(', '.join(headers))))
msg.append(' data.shape = %s\n' % str(self.data.shape).replace('L', ''))
msg.append(' element type: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num=1, is_mag_phase=False, is_sort1=True):
if header is None:
header = []
if is_sort1:
msg = [
' N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )\n',
' \n',
' ELEMENT-ID AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL\n',
' STRESS PLASTIC/NLELAST STRAIN STRESS\n'
]
else:
msg = [
' N O N L I N E A R S T R E S S E S I N R O D E L E M E N T S ( C R O D )\n',
' \n',
' TIME AXIAL STRESS EQUIVALENT TOTAL STRAIN EFF. STRAIN EFF. CREEP LIN. TORSIONAL\n',
' STRESS PLASTIC/NLELAST STRAIN STRESS\n'
]
if self.is_sort1:
page_num = self._write_sort1_as_sort1(header, page_stamp, page_num, f06_file, msg)
else:
raise NotImplementedError('RealNonlinearRodArray')
return page_num
def _write_sort1_as_sort1(self, header, page_stamp, page_num, f06_file, msg_temp):
ntimes = self.data.shape[0]
eids = self.element
#is_odd = False
#nwrite = len(eids)
for itime in range(ntimes):
dt = self._times[itime]
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
axial = self.data[itime, :, 0]
eqs = self.data[itime, :, 1]
total = self.data[itime, :, 2]
epcs = self.data[itime, :, 3]
ecs = self.data[itime, :, 4]
lts = self.data[itime, :, 5]
#print "dt=%s axials=%s eqs=%s ts=%s epcs=%s ecs=%s lts=%s" %(dt,axial,eqs,ts,epcs,ecs,lts)
#msgE[eid] = ' ELEMENT-ID = %8i\n' % (eid)
#if eid not in msgT:
#msgT[eid] = []
#msgT[eid].append(' %9.3E %13.6E %13.6E %13.6E %13.6E %13.6E %13.6E\n' % (dt, axial, eqs, ts, epcs, ecs, lts))
for eid, axiali, eqsi, totali, epcsi, ecsi, ltsi in zip(eids, axial, eqs, total, epcs, ecs, lts):
([saxial, seqs, stotal, sepcs, secs, slts]) = write_floats_13e(
[axiali, eqsi, totali, epcsi, ecsi, ltsi])
f06_file.write(
' %8i %-13s %-13s %-13s %-13s %-13s %s\n' % (
eid, saxial, seqs, stotal, sepcs, secs, slts))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2, op2_ascii, itable, new_result, date,
is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write('%s.write_op2: %s\n' % (self.__class__.__name__, call_frame[1][3]))
if itable == -1:
self._write_table_header(op2, op2_ascii, date)
itable = -3
#if isinstance(self.nonlinear_factor, float):
#op2_format = '%sif' % (7 * self.ntimes)
#raise NotImplementedError()
#else:
#op2_format = 'i21f'
#s = Struct(op2_format)
eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
device_code = self.device_code
op2_ascii.write(' ntimes = %s\n' % self.ntimes)
eids_device = self.element * 10 + self.device_code
#fmt = '%2i %6f'
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i6f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('nelements=%i\n' % nelements)
for itime in range(self.ntimes):
#print('3, %s' % itable)
self._write_table_3(op2, op2_ascii, new_result, itable, itime)
# record 4
#print('stress itable = %s' % itable)
itable -= 1
#print('4, %s' % itable)
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write('r4 [4, %s, 4]\n' % (itable))
op2_ascii.write('r4 [4, %i, 4]\n' % (4 * ntotal))
axial = self.data[itime, :, 0]
eqs = self.data[itime, :, 1]
total = self.data[itime, :, 2]
epcs = self.data[itime, :, 3]
ecs = self.data[itime, :, 4]
lts = self.data[itime, :, 5]
for eid, axiali, eqsi, totali, epcsi, ecsi, ltsi in zip(eids_device, axial, eqs, total, epcs, ecs, lts):
data = [eid, axiali, eqsi, totali, epcsi, ecsi, ltsi]
op2_ascii.write(' eid=%s data=%s\n' % (eids_device, str(data)))
op2.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
|
from copy import deepcopy
from flask import render_template, url_for, request, flash, redirect
from flask_jwt_extended import jwt_required, get_current_user
from wtforms import BooleanField
from . import projects
from .forms import EditProject, ManageProjectAccessForm
from seamm_datastore.database.models import Project, User, UserProjectAssociation
from seamm_dashboard import authorize, db
def _bind_users_to_form(form, current_user, project_id):
actions = ["read", "update", "create", "delete", "manage"]
users = User.query.all()
user_names = []
if len(users) > 1:
# Put current user first
users.remove(current_user)
reorder_users = [current_user]
reorder_users.extend(users)
users = reorder_users
for user in users:
field_name = f"user_{user.id}"
permissions = []
assoc = UserProjectAssociation.query.filter_by(
resource_id=project_id, entity_id=user.id
).one_or_none()
if assoc:
permissions = assoc.permissions
for action in actions:
checked = action in permissions
setattr(form, f"{field_name}_{action}", BooleanField(default=checked))
user_names.append({"username": user.username, "id": user.id})
return form, user_names
@projects.route("/views/projects")
def project_list():
return render_template("projects/project_list.html")
@projects.route("/views/projects/<id>/jobs")
@projects.route("/views//projects/<id>/jobs")
@jwt_required(optional=True)
def project_jobs_list(id):
project = Project.query.get(id)
manage_project = authorize.manage(project)
edit_project = authorize.update(project)
# Build the url ourselves.
base_url = url_for("main.index")
edit_url = base_url + f"projects/{id}/edit"
manage_url = base_url + f"projects/{id}/manage"
return render_template(
"jobs/jobs_list.html",
project=True,
manage_project=manage_project,
edit_project=edit_project,
edit_url=edit_url,
manage_url=manage_url,
)
@projects.route("/projects/<project_id>/edit", methods=["GET", "POST"])
@jwt_required(optional=True)
def edit_project(project_id):
project = Project.query.get(project_id)
if not authorize.update(project):
return render_template("401.html")
form = EditProject()
# Build the url ourselves.
base_url = url_for("main.index")
project_url = base_url + f"#projects/{project_id}/jobs"
if form.validate_on_submit():
project.name = form.name.data
project.description = form.notes.data
db.session.commit()
flash("Project updated successfully.", "successs")
return redirect(project_url)
elif request.method == "GET":
form.name.data = project.name
form.notes.data = project.description
return render_template(
"jobs/edit_job.html",
title=f"Edit Project {project_id}",
form=form,
back_url=project_url,
)
@projects.route("/projects/<project_id>/manage", methods=["GET", "POST"])
@jwt_required(optional=True)
def manage_project(project_id):
project = Project.query.get(project_id)
if not project:
return render_template("404.html")
if not authorize.manage(project):
return render_template("401.html")
form = deepcopy(ManageProjectAccessForm)
form, usernames = _bind_users_to_form(
form, current_user=get_current_user(), project_id=project.id
)
form = form()
# Build the url ourselves.
base_url = url_for("main.index")
project_url = base_url + f"#projects/{project_id}/jobs"
if request.method == "POST":
if form.validate_on_submit():
user_keys = [
x for x in form.data.keys() if "user" in x if form.data[x] is True
]
permissions_dict = {}
for key in user_keys:
split = key.split("_")
user_id = int(split[1])
permission = split[2]
try:
permissions_dict[user_id].append(permission)
except KeyError:
permissions_dict[user_id] = [permission]
# Find an entries for special user permissions which exist for this project
users = User.query.all()
for entry in users:
user_id = entry.id
try:
permissions = permissions_dict[user_id]
except KeyError:
permissions = []
assoc = UserProjectAssociation.query.filter_by(
entity_id=user_id, resource_id=project_id
).one_or_none()
if not assoc:
assoc = UserProjectAssociation(
entity_id=user_id,
resource_id=project_id,
permissions=permissions,
)
else:
assoc.permissions = permissions
db.session.add(assoc)
db.session.commit()
flash(f"Permissions for {project.name} successfully updated.")
return redirect(project_url)
return render_template(
"projects/project_access.html",
form=form,
users=usernames,
project=project,
back_url=project_url,
)
|
import os
from concurrent.futures import Future, ThreadPoolExecutor, as_completed, TimeoutError
import time
from typing import Optional, List
GLOBAL_EXECUTOR = ThreadPoolExecutor(max_workers=os.environ.get("HIVEMIND_THREADS", float('inf')))
def run_in_background(func: callable, *args, **kwargs) -> Future:
""" run func(*args, **kwargs) in background and return Future for its outputs """
return GLOBAL_EXECUTOR.submit(func, *args, **kwargs)
def run_forever(func: callable, *args, **kwargs):
""" A function that runs a :func: in background forever. Returns a future that catches exceptions """
def repeat():
while True:
func(*args, **kwargs)
return run_in_background(repeat)
def run_and_await_k(jobs: List[callable], k: int,
timeout_after_k: Optional[float] = 0, timeout_total: Optional[float] = None):
"""
Runs all :jobs: asynchronously, awaits for at least k of them to finish
:param jobs: functions to call asynchronously
:param k: how many functions should finish for call to be successful
:param timeout_after_k: after reaching k finished jobs, wait for this long before cancelling
:param timeout_total: if specified, terminate cancel jobs after this many seconds
:returns: a list of either results or exceptions for each job
"""
jobs = list(jobs)
assert k <= len(jobs), f"Can't await {k} out of {len(jobs)} jobs."
start_time = time.time()
future_to_ix = {run_in_background(job): i for i, job in enumerate(jobs)}
outputs = [None] * len(jobs)
success_count = 0
try:
# await first k futures for as long as it takes
for future in as_completed(list(future_to_ix.keys()), timeout=timeout_total):
success_count += int(not future.exception())
outputs[future_to_ix.pop(future)] = future.result() if not future.exception() else future.exception()
if success_count >= k:
break # we have enough futures to succeed
if len(outputs) + len(future_to_ix) < k:
failed = len(jobs) - len(outputs) - len(future_to_ix)
raise ValueError(f"Couldn't get enough results: too many jobs failed ({failed} / {len(outputs)})")
# await stragglers for at most self.timeout_after_k_min or whatever time is left
if timeout_after_k is not None and timeout_total is not None:
time_left = min(timeout_after_k, timeout_total - time.time() + start_time)
else:
time_left = timeout_after_k if timeout_after_k is not None else timeout_total
for future in as_completed(list(future_to_ix.keys()), timeout=time_left):
success_count += int(not future.exception())
outputs[future_to_ix.pop(future)] = future.result() if not future.exception() else future.exception()
except TimeoutError:
if len(outputs) < k:
raise TimeoutError(f"Couldn't get enough results: time limit exceeded (got {len(outputs)} of {k})")
finally:
for future, index in future_to_ix.items():
future.cancel()
outputs[index] = future.result() if not future.exception() else future.exception()
return outputs
|
from django import forms
from django.core.validators import MinLengthValidator
from ddcz.notifications import Audience
class News(forms.Form):
text = forms.CharField(
label="",
widget=forms.Textarea(
attrs={"class": "comment__textarea", "cols": 80, "rows": 30},
),
validators=[MinLengthValidator(80)],
)
audience = forms.ChoiceField(
choices=((i.name, i.value) for i in Audience), label="Komu poslat e-mail"
)
|
class Solution:
def maxArea(self, h: int, w: int, horizontalCuts: List[int], verticalCuts: List[int]) -> int:
horizontalCuts = sorted(horizontalCuts)
verticalCuts = sorted(verticalCuts)
max_w = max(horizontalCuts[0],h-horizontalCuts[-1])
for i in range(len(horizontalCuts)):
max_w = max(horizontalCuts[i] - horizontalCuts[i-1],max_w)
max_h = 0
max_h = max(verticalCuts[0],w-verticalCuts[-1])
for i in range(len(verticalCuts)):
max_h = max(verticalCuts[i] - verticalCuts[i-1],max_h)
return (max_w * max_h) % ((10 ** 9) + 7)
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Union, List, Tuple
import paddle
from paddle import nn
import paddle.nn.functional as F
import numpy as np
from paddlehub.module.module import moduleinfo
import paddlehub.vision.segmentation_transforms as T
from paddlehub.module.cv_module import ImageSegmentationModule
import unet_cityscapes.layers as layers
@moduleinfo(
name="unet_cityscapes",
type="CV/semantic_segmentation",
author="paddlepaddle",
author_email="",
summary="Unet is a segmentation model.",
version="1.0.0",
meta=ImageSegmentationModule)
class UNet(nn.Layer):
"""
The UNet implementation based on PaddlePaddle.
The original article refers to
Olaf Ronneberger, et, al. "U-Net: Convolutional Networks for Biomedical Image Segmentation"
(https://arxiv.org/abs/1505.04597).
Args:
num_classes (int): The unique number of target classes.
align_corners (bool): An argument of F.interpolate. It should be set to False when the output size of feature
is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
use_deconv (bool, optional): A bool value indicates whether using deconvolution in upsampling.
If False, use resize_bilinear. Default: False.
pretrained (str, optional): The path or url of pretrained model for fine tuning. Default: None.
"""
def __init__(self,
num_classes: int = 19,
align_corners: bool = False,
use_deconv: bool = False,
pretrained: str = None):
super(UNet, self).__init__()
self.encode = Encoder()
self.decode = Decoder(align_corners, use_deconv=use_deconv)
self.cls = self.conv = nn.Conv2D(in_channels=64, out_channels=num_classes, kernel_size=3, stride=1, padding=1)
self.transforms = T.Compose([T.Normalize()])
if pretrained is not None:
model_dict = paddle.load(pretrained)
self.set_dict(model_dict)
print("load custom parameters success")
else:
checkpoint = os.path.join(self.directory, 'model.pdparams')
model_dict = paddle.load(checkpoint)
self.set_dict(model_dict)
print("load pretrained parameters success")
def transform(self, img: Union[np.ndarray, str]) -> Union[np.ndarray, str]:
return self.transforms(img)
def forward(self, x: paddle.Tensor) -> List[paddle.Tensor]:
logit_list = []
x, short_cuts = self.encode(x)
x = self.decode(x, short_cuts)
logit = self.cls(x)
logit_list.append(logit)
return logit_list
class Encoder(nn.Layer):
def __init__(self):
super().__init__()
self.double_conv = nn.Sequential(layers.ConvBNReLU(3, 64, 3), layers.ConvBNReLU(64, 64, 3))
down_channels = [[64, 128], [128, 256], [256, 512], [512, 512]]
self.down_sample_list = nn.LayerList([self.down_sampling(channel[0], channel[1]) for channel in down_channels])
def down_sampling(self, in_channels: int, out_channels: int) -> nn.Layer:
modules = []
modules.append(nn.MaxPool2D(kernel_size=2, stride=2))
modules.append(layers.ConvBNReLU(in_channels, out_channels, 3))
modules.append(layers.ConvBNReLU(out_channels, out_channels, 3))
return nn.Sequential(*modules)
def forward(self, x: paddle.Tensor) -> Tuple:
short_cuts = []
x = self.double_conv(x)
for down_sample in self.down_sample_list:
short_cuts.append(x)
x = down_sample(x)
return x, short_cuts
class Decoder(nn.Layer):
def __init__(self, align_corners: bool, use_deconv: bool = False):
super().__init__()
up_channels = [[512, 256], [256, 128], [128, 64], [64, 64]]
self.up_sample_list = nn.LayerList(
[UpSampling(channel[0], channel[1], align_corners, use_deconv) for channel in up_channels])
def forward(self, x: paddle.Tensor, short_cuts: List) -> paddle.Tensor:
for i in range(len(short_cuts)):
x = self.up_sample_list[i](x, short_cuts[-(i + 1)])
return x
class UpSampling(nn.Layer):
def __init__(self, in_channels: int, out_channels: int, align_corners: bool, use_deconv: bool = False):
super().__init__()
self.align_corners = align_corners
self.use_deconv = use_deconv
if self.use_deconv:
self.deconv = nn.Conv2DTranspose(in_channels, out_channels // 2, kernel_size=2, stride=2, padding=0)
in_channels = in_channels + out_channels // 2
else:
in_channels *= 2
self.double_conv = nn.Sequential(
layers.ConvBNReLU(in_channels, out_channels, 3), layers.ConvBNReLU(out_channels, out_channels, 3))
def forward(self, x: paddle.Tensor, short_cut: paddle.Tensor) -> paddle.Tensor:
if self.use_deconv:
x = self.deconv(x)
else:
x = F.interpolate(x, paddle.shape(short_cut)[2:], mode='bilinear', align_corners=self.align_corners)
x = paddle.concat([x, short_cut], axis=1)
x = self.double_conv(x)
return x
|
# Generated by Django 4.0 on 2021-12-28 23:15
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('choice_type', models.CharField(choices=[('t', 'Text'), ('r', 'Radio'), ('c', 'Checkbox')], default='r', help_text='Вид ответов на вопрос', max_length=1)),
],
),
migrations.CreateModel(
name='Survey',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='Дата публикации')),
('require_login', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='SurveyResult',
fields=[
('id', models.UUIDField(default=uuid.uuid4, help_text='Unique id of Survey result', primary_key=True, serialize=False)),
('user', models.CharField(blank=True, max_length=200, null=True)),
('created', models.DateTimeField(auto_now_add=True)),
('survey', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='survey.survey')),
],
),
migrations.CreateModel(
name='ResultChoice',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_result', models.TextField(blank=True, null=True)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.question')),
('refer_survey_result', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.surveyresult')),
],
),
migrations.AddField(
model_name='question',
name='survey',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.survey'),
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice', models.CharField(max_length=200)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='survey.question')),
],
),
]
|
from ssd.models import registry
from .efficient_net import EfficientNet
__all__ = ['efficient_net_b3', 'EfficientNet']
@registry.BACKBONES.register('efficient_net-b3')
def efficient_net_b3(cfg, pretrained=True):
if pretrained:
model = EfficientNet.from_pretrained('efficientnet-b3')
else:
model = EfficientNet.from_name('efficientnet-b3')
return model
|
class Solution:
def largestPerimeter(self, nums: List[int]) -> int:
nums.sort(reverse=True)
for i in range(len(nums)-2):
if nums[i]-nums[i+1]<nums[i+2]:
return nums[i]+nums[i+1]+nums[i+2]
return 0
|
import matplotlib.pyplot as plt
import numpy as np
# This import registers the 3D projection, but is otherwise unused.
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
# prepare some coordinates
x, y, z = np.indices((11,9,8))
#
a = (x < 6) & (x >= 4 ) & (y < 4 ) & (y >= 3) & (z < 7) & (z >= 1)
b = (x < 9) & (x >= 2) & (y < 2) & (y >= 0) & (z < 5 ) & (z >= 3)
c = (x < 11) & (x >= 0) & (y < 9) & (y >= 0 ) & (z < 2 ) & (z >= 0 )
d = (x < 7) & (x >=5 ) & (y <8 ) & (y >=1 ) & (z <5 ) & (z >=3 )
e = (x < 3) & (x >= 1) & (y <6 ) & (y >= 1) & (z <5 ) & (z >=1 )
A = (x < 11) & (x >=0 ) & (y <9 ) & (y >=0 ) & (z < 8) & (z >=6 )
B = (x < 11) & (x >=8 ) & (y <4 ) & (y >=1 ) & (z <7 ) & (z >=3 )
C = (x < 10) & (x >=9 ) & (y <8 ) & (y >=3 ) & (z <5 ) & (z >=1 )
D = (x < 11) & (x >=1 ) & (y <9 ) & (y >= 7) & (z <5 ) & (z >=3 )
E = (x < 2) & (x >=0 ) & (y <8 ) & (y >=3 ) & (z <7 ) & (z >=3 )
# combine the objects into a single boolean array
voxels = a | b | c | d | e | A | B | C | D | E
# set the colors of each object
colors = np.empty(voxels.shape, dtype=object)
colors[a] = 'red'
colors[b] = 'blue'
colors[c] = 'green'
colors[d] = 'yellow'
colors[e] = 'cyan'
colors[B] = 'red'
colors[C] = 'blue'
colors[D] = 'green'
colors[E] = 'yellow'
colors[A] = 'cyan'
## for 2D verification
voxels2D = a | b | d | e | B | C | D | E
colors2D = np.empty(voxels2D.shape, dtype=object)
colors2D[a] = 'red'
colors2D[b] = 'blue'
colors2D[d] = 'yellow'
colors2D[e] = 'cyan'
colors2D[B] = 'red'
colors2D[C] = 'blue'
colors2D[D] = 'green'
colors2D[E] = 'yellow'
# and plot everything (2D)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.voxels(voxels2D, facecolors=colors2D)
plt.title('2D reduction (A & c removed)')
plt.show()
# and plot everything (3Ds)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.voxels(voxels, facecolors=colors)
plt.title('3D version ')
plt.show()
|
from flask_restful import Resource, fields, marshal_with, abort, reqparse
from models import Station
import db
station_fields = {
'id': fields.Integer,
'name': fields.String,
'url': fields.String,
'is_favorite': fields.Boolean
}
status_fields = {
'url': fields.String,
'state': fields.String(attribute='state.name'),
'title': fields.String,
'name': fields.String,
'volume': fields.Integer,
'bitrate': fields.String,
}
parser = reqparse.RequestParser()
parser.add_argument('name', type=str)
parser.add_argument('url', type=str)
parser.add_argument('is_favorite', type=bool)
class StationResource(Resource):
@marshal_with(station_fields)
def get(self, id):
# Get
station = db.session.query(Station).filter(Station.id == id).first()
if not station:
abort(404, message="Station {} doesn't exist".format(id))
return station
@marshal_with(station_fields)
def put(self, id):
# Update
parsed_args = parser.parse_args()
station = db.session.query(Station).filter(Station.id == id).first()
if not station:
abort(404, message="Station {} doesn't exist".format(id))
station.name = parsed_args['name']
station.url = parsed_args['url']
station.is_favorite = parsed_args['is_favorite']
try:
db.session.add(station)
db.session.commit()
except:
db.session.rollback()
raise
return station, 201
def delete(self, id):
station = db.session.query(Station).filter(Station.id == id).first()
if not station:
abort(404, message="Station {} doesn't exist".format(id))
try:
db.session.delete(station)
db.session.commit()
except:
db.session.rollback()
raise
return {}, 204
class StationListResource(Resource):
@marshal_with(station_fields)
def get(self):
# Get
parsed_args = parser.parse_args()
if parsed_args['is_favorite']:
stations = db.session.query(Station).filter(Station.is_favorite == parsed_args['is_favorite']).all()
else:
stations = db.session.query(Station).all()
return stations
@marshal_with(station_fields)
def post(self):
# Create
parsed_args = parser.parse_args()
station = Station()
station.name = parsed_args['name']
station.url = parsed_args['url']
station.is_favorite = parsed_args['is_favorite']
try:
db.session.add(station)
db.session.commit()
except:
db.session.rollback()
raise
return station, 201
class StatusResource(Resource):
def __init__(self, radio):
self.radio = radio
@marshal_with(status_fields)
def get(self):
return self.radio.get_status()
|
# -*- coding: utf-8 -*-
from pyramid.session import SignedCookieSessionFactory
from h.security import derive_key
def model(request):
session = {}
session['csrf'] = request.session.get_csrf_token()
session['userid'] = request.authenticated_userid
session['groups'] = _current_groups(request)
session['features'] = request.feature.all()
session['preferences'] = _user_preferences(request.authenticated_user)
return session
def profile(request):
"""
Return a representation of the current user's information and settings.
"""
profile = {}
profile['userid'] = request.authenticated_userid
profile['groups'] = _current_groups(request)
profile['features'] = request.feature.all()
profile['preferences'] = _user_preferences(request.authenticated_user)
return profile
def pop_flash(request):
return {k: request.session.pop_flash(k)
for k in ['error', 'info', 'warning', 'success']}
def _group_sort_key(group):
"""Sort private groups for the session model list"""
# groups are sorted first by name but also by ID
# so that multiple groups with the same name are displayed
# in a consistent order in clients
return (group.name.lower(), group.pubid)
def _current_groups(request):
"""Return a list of the groups the current user is a member of.
This list is meant to be returned to the client in the "session" model.
"""
groups = [
{'name': 'Public', 'id': '__world__', 'public': True},
]
user = request.authenticated_user
if user is None:
return groups
for group in sorted(user.groups, key=_group_sort_key):
groups.append({
'name': group.name,
'id': group.pubid,
'url': request.route_url('group_read',
pubid=group.pubid,
slug=group.slug),
})
return groups
def _user_preferences(user):
preferences = {}
if user and not user.sidebar_tutorial_dismissed:
preferences['show_sidebar_tutorial'] = True
return preferences
def includeme(config):
settings = config.registry.settings
# By default, derive_key generates a 64-byte (512 bit) secret, which is the
# correct length for SHA512-based HMAC as specified by the `hashalg`.
factory = SignedCookieSessionFactory(
secret=derive_key(settings['secret_key'], b'h.session.cookie_secret'),
hashalg='sha512',
httponly=True,
timeout=3600,
)
config.set_session_factory(factory)
|
def chess_triangle(n, m):
|
import datetime
import json
from typing import Dict, List, Optional
from uuid import UUID
from django.db.models.query import QuerySet
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.utils.timezone import now
from rest_framework import serializers
from ee.clickhouse.client import sync_execute
from ee.clickhouse.sql.person import (
DELETE_PERSON_BY_ID,
DELETE_PERSON_EVENTS_BY_ID,
INSERT_PERSON_DISTINCT_ID,
INSERT_PERSON_SQL,
)
from ee.kafka_client.client import ClickhouseProducer
from ee.kafka_client.topics import KAFKA_PERSON, KAFKA_PERSON_UNIQUE_ID
from posthog import settings
from posthog.ee import is_clickhouse_enabled
from posthog.models.person import Person, PersonDistinctId
from posthog.models.utils import UUIDT
if settings.EE_AVAILABLE and is_clickhouse_enabled():
@receiver(post_save, sender=Person)
def person_created(sender, instance: Person, created, **kwargs):
create_person(
team_id=instance.team.pk,
properties=instance.properties,
uuid=str(instance.uuid),
is_identified=instance.is_identified,
)
@receiver(post_save, sender=PersonDistinctId)
def person_distinct_id_created(sender, instance: PersonDistinctId, created, **kwargs):
create_person_distinct_id(instance.team.pk, instance.distinct_id, str(instance.person.uuid))
@receiver(post_delete, sender=Person)
def person_deleted(sender, instance: Person, **kwargs):
delete_person(instance.uuid, instance.properties, instance.is_identified, team_id=instance.team_id)
@receiver(post_delete, sender=PersonDistinctId)
def person_distinct_id_deleted(sender, instance: PersonDistinctId, **kwargs):
create_person_distinct_id(instance.team.pk, instance.distinct_id, str(instance.person.uuid), sign=-1)
def create_person(
team_id: int,
uuid: Optional[str] = None,
properties: Optional[Dict] = {},
sync: bool = False,
is_identified: bool = False,
timestamp: Optional[datetime.datetime] = None,
) -> str:
if uuid:
uuid = str(uuid)
else:
uuid = str(UUIDT())
if not timestamp:
timestamp = now()
data = {
"id": str(uuid),
"team_id": team_id,
"properties": json.dumps(properties),
"is_identified": int(is_identified),
"created_at": timestamp.strftime("%Y-%m-%d %H:%M:%S.%f"),
"_timestamp": timestamp.strftime("%Y-%m-%d %H:%M:%S"),
}
p = ClickhouseProducer()
p.produce(topic=KAFKA_PERSON, sql=INSERT_PERSON_SQL, data=data, sync=sync)
return uuid
def create_person_distinct_id(team_id: int, distinct_id: str, person_id: str, sign=1) -> None:
data = {"distinct_id": distinct_id, "person_id": person_id, "team_id": team_id, "sign": sign}
p = ClickhouseProducer()
p.produce(topic=KAFKA_PERSON_UNIQUE_ID, sql=INSERT_PERSON_DISTINCT_ID, data=data)
def get_persons_by_distinct_ids(team_id: int, distinct_ids: List[str]) -> QuerySet:
return Person.objects.filter(
team_id=team_id, persondistinctid__team_id=team_id, persondistinctid__distinct_id__in=distinct_ids
)
def get_persons_by_uuids(team_id: int, uuids: List[str]) -> QuerySet:
return Person.objects.filter(team_id=team_id, uuid__in=uuids)
def delete_person(
person_id: UUID, properties: Dict, is_identified: bool, delete_events: bool = False, team_id: int = False
) -> None:
timestamp = now()
data = {
"id": person_id,
"team_id": team_id,
"properties": json.dumps(properties),
"is_identified": int(is_identified),
"created_at": timestamp.strftime("%Y-%m-%d %H:%M:%S"),
"_timestamp": timestamp.strftime("%Y-%m-%d %H:%M:%S"),
}
try:
if delete_events:
sync_execute(DELETE_PERSON_EVENTS_BY_ID, {"id": person_id, "team_id": team_id})
except:
pass # cannot delete if the table is distributed
sync_execute(DELETE_PERSON_BY_ID, data)
class ClickhousePersonSerializer(serializers.Serializer):
id = serializers.SerializerMethodField()
created_at = serializers.SerializerMethodField()
team_id = serializers.SerializerMethodField()
properties = serializers.SerializerMethodField()
is_identified = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
distinct_ids = serializers.SerializerMethodField()
def get_name(self, person):
props = json.loads(person[3])
email = props.get("email", None)
return email or person[0]
def get_id(self, person):
return person[0]
def get_created_at(self, person):
return person[1]
def get_team_id(self, person):
return person[2]
def get_properties(self, person):
return json.loads(person[3])
def get_is_identified(self, person):
return person[4]
# all queries might not retrieve distinct_ids
def get_distinct_ids(self, person):
return person[5] if len(person) > 5 else []
|
from .verb import Verb
from .exceptions import WorkloadExceededError
from .interpreter import Interpreter, build_node_tree
|
import sys
from STDF import STDR
class FTR(STDR):
def __init__(self, version=None, endian=None, record=None):
self.id = 'FTR'
self.local_debug = False
if version==None or version=='V4':
self.version = 'V4'
self.info = '''
Functional Test Record
----------------------
Function:
Contains the results of the single execution of a functional test in the test program. The
first occurrence of this record also establishes the default values for all semi-static
information about the test. The FTR is related to the Test Synopsis Record (TSR) by test
number, head number, and site number.
Frequency:
* Obligatory, one or more for each execution of a functional test.
Location:
Anywhere in the data stream after the corresponding Part Information Record (PIR)
and before the corresponding Part Result Record (PRR).
'''
self.fields = {
'REC_LEN' : {'#' : 0, 'Type' : 'U*2', 'Ref' : None, 'Value' : None, 'Text' : 'Bytes of data following header ', 'Missing' : None},
'REC_TYP' : {'#' : 1, 'Type' : 'U*1', 'Ref' : None, 'Value' : 15, 'Text' : 'Record type ', 'Missing' : None},
'REC_SUB' : {'#' : 2, 'Type' : 'U*1', 'Ref' : None, 'Value' : 20, 'Text' : 'Record sub-type ', 'Missing' : None},
'TEST_NUM' : {'#' : 3, 'Type' : 'U*4', 'Ref' : None, 'Value' : None, 'Text' : 'Test number ', 'Missing' : None}, # Obligatory!
'HEAD_NUM' : {'#' : 4, 'Type' : 'U*1', 'Ref' : None, 'Value' : None, 'Text' : 'Test head number ', 'Missing' : 1},
'SITE_NUM' : {'#' : 5, 'Type' : 'U*1', 'Ref' : None, 'Value' : None, 'Text' : 'Test site number ', 'Missing' : 1},
'TEST_FLG' : {'#' : 6, 'Type' : 'B*1', 'Ref' : None, 'Value' : None, 'Text' : 'Test flags (fail, alarm, etc.) ', 'Missing' : ['0']*8},
'OPT_FLAG' : {'#' : 7, 'Type' : 'B*1', 'Ref' : None, 'Value' : None, 'Text' : 'Optional data flag ', 'Missing' : ['1']*8},
'CYCL_CNT' : {'#' : 8, 'Type' : 'U*4', 'Ref' : None, 'Value' : None, 'Text' : 'Cycle count of vector ', 'Missing' : 0}, # OPT_FLAG bit0 = 1
'REL_VADR' : {'#' : 9, 'Type' : 'U*4', 'Ref' : None, 'Value' : None, 'Text' : 'Relative vector address ', 'Missing' : 0}, # OPT_FLAG bit1 = 1
'REPT_CNT' : {'#' : 10, 'Type' : 'U*4', 'Ref' : None, 'Value' : None, 'Text' : 'Repeat count of vector ', 'Missing' : 0}, # OPT_FLAG bit2 = 1
'NUM_FAIL' : {'#' : 11, 'Type' : 'U*4', 'Ref' : None, 'Value' : None, 'Text' : 'Number of pins with 1 or more failures', 'Missing' : 0}, # OPT_FLAG bit3 = 1
'XFAIL_AD' : {'#' : 12, 'Type' : 'I*4', 'Ref' : None, 'Value' : None, 'Text' : 'X logical device failure address ', 'Missing' : 0}, # OPT_FLAG bit4 = 1
'YFAIL_AD' : {'#' : 13, 'Type' : 'I*4', 'Ref' : None, 'Value' : None, 'Text' : 'Y logical device failure address ', 'Missing' : 0}, # OPT_FLAG bit4 = 1
'VECT_OFF' : {'#' : 14, 'Type' : 'I*2', 'Ref' : None, 'Value' : None, 'Text' : 'Offset from vector of interest ', 'Missing' : 0}, # OPT_FLAG bit5 = 1
'RTN_ICNT' : {'#' : 15, 'Type' : 'U*2', 'Ref' : None, 'Value' : None, 'Text' : 'Count (j) of return data PMR indexes ', 'Missing' : 0},
'PGM_ICNT' : {'#' : 16, 'Type' : 'U*2', 'Ref' : None, 'Value' : None, 'Text' : 'Count (k) of programmed state indexes ', 'Missing' : 0},
'RTN_INDX' : {'#' : 17, 'Type' : 'xU*2', 'Ref' : 'RTN_ICNT', 'Value' : None, 'Text' : 'Array of j return data PMR indexes ', 'Missing' : []}, # RTN_ICNT = 0
'RTN_STAT' : {'#' : 18, 'Type' : 'xN*1', 'Ref' : 'RTN_ICNT', 'Value' : None, 'Text' : 'Array of j returned states ', 'Missing' : []}, # RTN_ICNT = 0
'PGM_INDX' : {'#' : 19, 'Type' : 'xU*2', 'Ref' : 'PGM_ICNT', 'Value' : None, 'Text' : 'Array of k programmed state indexes ', 'Missing' : []}, # PGM_ICNT = 0
'PGM_STAT' : {'#' : 20, 'Type' : 'xN*1', 'Ref' : 'PGM_ICNT', 'Value' : None, 'Text' : 'Array of k programmed states ', 'Missing' : []}, # PGM_ICNT = 0
'FAIL_PIN' : {'#' : 21, 'Type' : 'D*n', 'Ref' : None, 'Value' : None, 'Text' : 'Failing pin bitfield ', 'Missing' : []},
'VECT_NAM' : {'#' : 22, 'Type' : 'C*n', 'Ref' : None, 'Value' : None, 'Text' : 'Vector module pattern name ', 'Missing' : ''},
'TIME_SET' : {'#' : 23, 'Type' : 'C*n', 'Ref' : None, 'Value' : None, 'Text' : 'Time set name ', 'Missing' : ''},
'OP_CODE' : {'#' : 24, 'Type' : 'C*n', 'Ref' : None, 'Value' : None, 'Text' : 'Vector Op Code ', 'Missing' : ''},
'TEST_TXT' : {'#' : 25, 'Type' : 'C*n', 'Ref' : None, 'Value' : None, 'Text' : 'Descriptive text or label ', 'Missing' : ''},
'ALARM_ID' : {'#' : 26, 'Type' : 'C*n', 'Ref' : None, 'Value' : None, 'Text' : 'Name of alarm ', 'Missing' : ''},
'PROG_TXT' : {'#' : 27, 'Type' : 'C*n', 'Ref' : None, 'Value' : None, 'Text' : 'Additional programmed information ', 'Missing' : ''},
'RSLT_TXT' : {'#' : 28, 'Type' : 'C*n', 'Ref' : None, 'Value' : None, 'Text' : 'Additional result information ', 'Missing' : ''},
'PATG_NUM' : {'#' : 29, 'Type' : 'U*1', 'Ref' : None, 'Value' : None, 'Text' : 'Pattern generator number ', 'Missing' : 0xFF},
'SPIN_MAP' : {'#' : 30, 'Type' : 'D*n', 'Ref' : None, 'Value' : None, 'Text' : 'Bit map of enabled comparators ', 'Missing' : []}
}
else:
raise STDFError("%s object creation error: unsupported version '%s'" % (self.id, version))
self._default_init(endian, record)
def to_atdf(self):
'''
Method that writes A(SCII)TDF version of the STDF file.
'''
sequence = {}
header = ''
body = ''
header = self.id + ':'
# The order of fields is different in STDF and ATDF for FTR record
# STDF page 57| ATDF page 51
# 3 TEST_NUM = 3 TEST_NUM
# 4 HEAD_NUM = 4 HEAD_NUM
# 5 SITE_NUM = 5 SITE_NUM
# 6 TEST_FLG -> 6 TEST_FLG bits 6 & 7
# -> 6 TEST_FLG bits 0, 2, 3, 4, & 5
# 7 OPT_FLAG -> missing
# 8 CYCL_CNT
# 9 REL_VADR
# 10 REPT_CNT
# 11 NUM_FAIL
# 12 XFAIL_AD
# 13 YFAIL_AD
# 14 VECT_OFF
# 15 RTN_ICNT -> missing
# 16 PGM_ICNT -> missing
# 17 RTN_INDX
# 18 RTN_STAT
# 19 PGM_INDX
# 20 PGM_STAT
# 21 FAIL_PIN
# 22 VECT_NAM = 22 VECT_NAM
# 23 TIME_SET = 23 TIME_SET
# 24 OP_CODE
# 25 TEST_TXT
# 26 ALARM_ID
# 27 PROG_TXT
# 28 RSLT_TXT
# 29 PATG_NUM
# 30 SPIN_MAP
# = 8 CYCL_CNT
# = 9 REL_VADR
# = 10 REPT_CNT
# = 11 NUM_FAIL
# = 12 XFAIL_AD
# = 13 YFAIL_AD
# = 14 VECT_OFF
# = 17 RTN_INDX
# = 18 RTN_STAT
# = 19 PGM_INDX
# = 20 PGM_STAT
# = 21 FAIL_PIN
# = 24 OP_CODE
# = 25 TEST_TXT
# = 26 ALARM_ID
# = 27 PROG_TXT
# = 28 RSLT_TXT
# = 29 PATG_NUM
# = 30 SPIN_MAP
# 3 TEST_NUM
body += self.gen_atdf(3)
# 4 HEAD_NUM
body += self.gen_atdf(4)
# 5 SITE_NUM
body += self.gen_atdf(5)
# 6 TEST_FLG bits 6 & 7
# bit 6: Pass/fail flag (bit 7) is valid
v = self.get_fields(6)[3]
if v != None and v[6] == '0':
# bit 7:
# 0 = Part passed
if self.get_fields(6)[3][7] == '0':
body += 'P|'
# 1 = Part failed
elif self.get_fields(6)[3][7] == '1':
body += 'F|'
# 6 TEST_FLG bits 0, 2, 3, 4, & 5
# bit 0:
# 0 = No alarm
if self.get_fields(6)[3][0] == '0':
body += ''
# 1 = Alarm detected during testing
elif self.get_fields(6)[3][0] == '1':
body += 'A'
# bit 2:
# 0= Test result is reliable
if self.get_fields(6)[3][2] == '0':
body += ''
# 1 = Test result is unreliable
elif self.get_fields(6)[3][2] == '1':
body += 'U'
# bit 3:
# 0 = No timeout
if self.get_fields(6)[3][3] == '0':
body += ''
# 1 = Timeout occurred
elif self.get_fields(6)[3][3] == '1':
body += 'T'
# bit 4:
# 0 = Test was executed
if self.get_fields(6)[3][4] == '0':
body += ''
# 1= Testnotexecuted
elif self.get_fields(6)[3][4] == '1':
body += 'N'
# bit 5:
# 0 = No abort
if self.get_fields(6)[3][5] == '0':
body += '|'
# 1= Testaborted
elif self.get_fields(6)[3][5] == '1':
body += 'X|'
else:
body += '|'
# 22 VECT_NAM
body += self.gen_atdf(22)
# 23 TIME_SET
body += self.gen_atdf(23)
# 8 CYCL_CNT
body += self.gen_atdf(8)
# 9 REL_VADR
body += self.gen_atdf(9)
# 10 REPT_CNT
body += self.gen_atdf(10)
# 11 NUM_FAIL
body += self.gen_atdf(11)
# 12 XFAIL_AD
body += self.gen_atdf(12)
# 13 YFAIL_AD
body += self.gen_atdf(13)
# 14 VECT_OFF
body += self.gen_atdf(14)
# 17 RTN_INDX
body += self.gen_atdf(17)
# 18 RTN_STAT
body += self.gen_atdf(18)
# 19 PGM_INDX
body += self.gen_atdf(19)
# 20 PGM_STAT
body += self.gen_atdf(20)
# 22 FAIL_PIN
body += self.gen_atdf(21)
# 24 OP_CODE
body += self.gen_atdf(24)
# 25 TEST_TXT
body += self.gen_atdf(25)
# 26 ALARM_ID
body += self.gen_atdf(26)
# 27 PROG_TXT
body += self.gen_atdf(27)
# 28 RSLT_TXT
body += self.gen_atdf(28)
# 29 PATG_NUM
body += self.gen_atdf(29)
# 30 SPIN_MAP
body += self.gen_atdf(30)
body = body[:-1]
# assemble the record
retval = header + body
if self.local_debug: print("%s._to_atdf()\n '%s'\n" % (self.id, retval))
return retval
|
from random import randint
def random(array_size):
array = []
for _ in range (0, array_size):
array.append(randint(1, 101))
return array
def population():
lines = open("data/population.csv").readlines()
x, y = [], []
for line in lines:
if line == lines[0]:
continue
data = line.split(";")
x.append(int(data[0]))
y.append(int(data[1]))
return {
"x": x, "y": y, "color": "#20bf6b"
}
# Chart Data
datasets = (
{
"x": random(15),
"y": random(15),
"name": "Set-A",
"color": "#20bf6b"
},
{
"x": random(15),
"y": random(15),
"name": "Set-A",
"color": "#3867d6"
}
)
|
"""
Simple Highlighted Bar Chart
============================
This example shows a basic bar chart with a single bar singled out for a highlight.
"""
# category: bar charts
import altair as alt
from vega_datasets import data
population = data.population()
alt.Chart(population).mark_bar().encode(
x="year:O",
y="sum(people):Q",
# The highlight will be set on the result of a conditional statement
color=alt.condition(
alt.datum.year == 1970, # If the year is 1970 this test returns True,
alt.value('orange'), # which sets the bar orange.
alt.value('steelblue') # And if it's not true it sets the bar steelblue.
)
)
|
"""
This module illustrates how to generate a three-dimensional plot and a
contour plot.
For the example, the f(x,y) = x**2 * y**3 will be reproduced.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
X = np.linspace(-2, 2)
Y = np.linspace(-1, 1)
x, y = np.meshgrid(X, Y)
z = x**2 * y**3
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.plot_surface(x, y, z)
ax.set_xlabel("$x$", usetex=True)
ax.set_ylabel("$y$", usetex=True)
ax.set_zlabel("$z$", usetex=True)
ax.set_title("Graph of the function $f(x,y) = x^2y^3$", usetex=True)
plt.show()
fig, ax = plt.subplots()
ax.contour(x, y, z)
ax.set_title("Contour of $f(x) = x^2y^3$", usetex=True)
ax.set_xlabel("$x$", usetex=True)
ax.set_ylabel("$y$", usetex=True)
plt.show()
|
import numpy as np
import matplotlib.pyplot as plt
from . import stats_util
from .logging_util import get_slug, debug, error
import scipy.stats
import xarray as xr
class Distribution:
"""
An object for storing Cumulative Distribution Function information.
Used primarily for calculating the Continuous Ranked Probability Score.
The object is instantiated by passing in a 'sample' vector of data for
which to determine a CDF. This sample is used to construct either a
empirical or theoretical CDF, depending on the cdf_type argument.
"""
def __init__(self, sample):
"""Initialisation of CDF object.
Args:
sample (array): Data sample over which to estimate CDF
Returns:
New CDF object.
"""
debug(f"Creating a new {get_slug(self)}")
self.sample = sample
self.sample_size = len(sample)
self.mu = np.nanmean(sample)
self.sigma = np.nanstd(sample)
self.plot_xmin, self.plot_xmax = self.set_x_bounds()
debug(f"{get_slug(self)} initialised")
def set_x_bounds(self):
"""Calculate x bounds for CDF plotting"""
# Is input a single value (st. dev == 0)
single_value = self.sigma == 0
if single_value:
self.cdf_type = "empirical"
# Calculate x bounds as 5 std dev. either side of the mean
debug(f"Calculating x bounds for {get_slug(self)}")
if single_value:
x_min = self.mu - 1
x_max = self.mu + 1
else:
x_min = self.mu - 5 * self.sigma
x_max = self.mu + 5 * self.sigma
return x_min, x_max
def build_discrete_cdf(self, x: np.ndarray = None, n_pts: int = 1000):
"""Builds a discrete CDF for plotting and direct comparison.
Args:
x (array): x-values over which to calculate discrete CDF values.
If none, these will be determined using mu and sigma.
n_pts (int): n_pts to use for x if x=None.
Returns:
x and y arrays for discrete CDF.
"""
debug(f"Discrete CDF will be built for {get_slug(self)}")
# Build discrete X bounds
if x is None:
debug(f"Building discrete X bounds for {get_slug(self)}")
x = np.linspace(self.plot_xmin, self.plot_xmax, n_pts)
if self.cdf_type == "empirical":
y = stats_util.empirical_distribution(x, self.sample) # TODO Should this come from distribution.py?
else:
error(f"CDF type for {get_slug(self)} is , which is not acceptable, raising exception!")
return x, y # TODO What if we haven't defined y? (i.e. if cdf_type is not "empirical")
@staticmethod
def normal_distribution(mu: float = 0, sigma: float = 1, x: np.ndarray = None, n_pts: int = 1000):
"""Generates a discrete normal distribution.
Keyword arguments:
x -- Arbitrary array of x-values
mu -- Distribution mean
sigma -- Distribution standard deviation
return: Array of len(x) containing the normal values calculated from
the elements of x.
"""
if x is None:
x = np.linspace(mu - 5 * sigma, mu + 5 * sigma, n_pts)
debug(f"Generating normal distribution for {get_slug(x)}")
term1 = sigma * np.sqrt(2 * np.pi)
term1 = 1 / term1
exponent = -0.5 * ((x - mu) / sigma) ** 2
return term1 * np.exp(exponent)
@staticmethod
def cumulative_distribution(mu: float = 0, sigma: float = 1, x: np.ndarray = None, cdf_func: str = "gaussian"):
"""Integrates under a discrete PDF to obtain an estimated CDF.
Keyword arguments:
x -- Arbitrary array of x-values
pdf -- PDF corresponding to values in x. E.g. as generated using
normal_distribution.
return: Array of len(x) containing the discrete cumulative values
estimated using the integral under the provided PDF.
"""
debug(f"Estimating CDF using {get_slug(x)}")
if cdf_func == "gaussian": # If Gaussian, integrate under pdf
pdf = Distribution.normal_distribution(mu=mu, sigma=sigma, x=x)
cdf = [np.trapz(pdf[:ii], x[:ii]) for ii in range(0, len(x))]
else:
raise NotImplementedError
return np.array(cdf)
@staticmethod
def empirical_distribution(x, sample):
"""Estimates a CDF empirically.
Keyword arguments:
x -- Array of x-values over which to generate distribution
sample -- Sample to use to generate distribution
return: Array of len(x) containing corresponding EDF values
"""
debug(f"Estimating empirical distribution with {get_slug(x)}")
sample = np.array(sample)
sample = sample[~np.isnan(sample)]
sample = np.sort(sample)
edf = np.zeros(len(x))
n_sample = len(sample)
for ss in sample:
edf[x > ss] = edf[x > ss] + 1 / n_sample
return xr.DataArray(edf)
def get_common_x(self, other, n_pts=2000):
"""Generates a common x vector for two CDF objects."""
debug(f"Generating common X vector for {get_slug(self)} and {get_slug(other)}")
xmin = min(self.plot_xmin, other.plot_xmin)
xmax = max(self.plot_xmax, other.plot_xmax)
common_x = np.linspace(xmin, xmax, n_pts)
return common_x
def plot_cdf(self):
"""A quick plot showing the CDF contained in this object."""
debug(f"Generating quick plot for {get_slug(self)}")
ax = plt.subplot(111)
x, y = self.build_discrete_cdf()
ax.plot(x, y)
ax.grid()
return
def integrate_cdf(self, other=None, plot=False):
"""Returns the integral under CDF or between two CDFs. This is
equivalent to the first order Wasserstein metric for two
probability distributions"""
debug(f"Generating diff plot for {get_slug(self)} and {get_slug(other)}")
if other is None:
integral = scipy.stats.wasserstein_distance(self.sample, [0])
if plot:
x = self.get_common_x(other)
x, y1 = self.build_discrete_cdf(x)
dum, y2 = other.build_discrete_cdf(x)
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(x, y1, c="k", linestyle="--")
ax.plot(x, y2, linestyle="--")
ax.fill_between(x, y1, y2, alpha=0.5)
ax.set_title("Area: " + str(integral))
plt.legend(("1", "2"))
else:
integral = scipy.stats.wasserstein_distance(self.sample, other.sample)
if plot:
x = self.get_common_x(other)
x, y1 = self.build_discrete_cdf(x)
dum, y2 = other.build_discrete_cdf(x)
fig = plt.figure()
ax = plt.subplot(111)
ax.plot(x, y1, c="k", linestyle="--")
ax.plot(x, y2, linestyle="--")
ax.fill_between(x, y1, y2, alpha=0.5)
ax.set_title("Area: " + str(integral))
plt.legend(("1", "2"))
if plot:
return integral, fig, ax # TODO fig and ax might not be defined
else:
return integral
|
"""Faça um Programa que peça o raio de um círculo, calcule e mostre sua área.
"""
print('Cálculo da Área de um Círculo\n')
raio = float(input('Informe o valor do raio: '))
# area = pi * raio
area = 3.14 * raio
print(f'A área do círculo é: {area:.2f}.')
|
from os import system
from pymongo import MongoClient
from pprint import pprint
import math
import csv
import re
import random as rand
from operator import itemgetter
def clear(): system("cls")
def pause(): system("pause")
def gen_menu(options, comment=None, cls=True):
c = 0
if cls:
clear()
else:
print("")
if len(options) == 0:
print("There were no options passed to the gen_menu function")
return None
elif len(options) == 1:
print("Forced option [{}] because it was the only one.".format(options[0]))
return 0
if comment:
print(comment)
for o_num in range(len(options)):
print("{}> {}".format(str(o_num + 1), options[o_num]))
option = None
while option is None:
try:
option = int(input(">> "))
if option < 1 or option > (len(options)):
c += 1
option = None
if c == 3:
option = 0
break
print("Choose one of the options above. [{}/3]".format(c))
except Exception as e:
print("Please input a number.")
return option - 1
def roll(s, mod=0):
if re.match(re.compile("^[0-9]+d[0-9]+$"), s.lower()):
p = s.lower().split("d")
rolls = [rand.randint(1, int(p[1])) for i in range(int(p[0]))]
elif re.match(re.compile("^[0-9]+$"), s):
rolls = [int(s)]
else:
print("You fucked up")
rolls = [0]
pause()
return rolls, sum(rolls) + mod
|
import argparse
import os
import torch
import torch.optim as optim
import torch.nn as nn
from AdversarialLoss import MobileImprovingAdversarialLoss,MultiSigmaAdversarialLoss, PhotoRealisticAdversarialLoss, WassersteinAdversarialLoss
from DeepImageDenoiser import DeepImageDenoiser , LEARNING_RATE
from DenoiserDataset import DenoiserDataset
from RSGUNetGenerator import RSGUNetGenerator
from NeuralModels import SILU, UpsampleDeConv, TransposedDeConv, PixelDeConv
from PerceptualLoss import AdaptivePerceptualLoss, FastNeuralStylePerceptualLoss, SpectralAdaptivePerceptualLoss, WassersteinAdaptivePerceptualLoss , SimplePerceptualLoss, SqueezeAdaptivePerceptualLoss
from ResidualGenerator import ResidualGenerator
from UNetGenerator import UNetGenerator
parser = argparse.ArgumentParser()
parser.add_argument('--image_dir', type = str, default='./BSDS500/', help='path to dataset')
parser.add_argument('--generator', type = str, default='RSGUNet', help='type of image generator')
parser.add_argument('--criterion', type = str, default='MobileImproving', help='type of criterion')
parser.add_argument('--deconv', type = str, default='Upsample', help='type of deconv')
parser.add_argument('--activation', type = str, default='Leaky', help='type of activation')
parser.add_argument('--optimizer', type = str, default='Adam', help='type of optimizer')
parser.add_argument('--batch_size', type = int, default=32)
parser.add_argument('--epochs', type = int, default=128)
parser.add_argument('--resume_train', type = bool, default=True)
args = parser.parse_args()
print(torch.__version__)
criterion_types = {
'MobileImproving' : MobileImprovingAdversarialLoss(),
'MultiSigma' : MultiSigmaAdversarialLoss(),
'PhotoRealistic' : PhotoRealisticAdversarialLoss(),
'Wasserstein' : WassersteinAdversarialLoss(),
'FastNeuralStyle' : FastNeuralStylePerceptualLoss(),
'PAN' : AdaptivePerceptualLoss(),
'SimplePerceptual' : SimplePerceptualLoss(),
'SpectralPAN' : SpectralAdaptivePerceptualLoss(),
'SqueezeAdaptive' : SqueezeAdaptivePerceptualLoss(),
'WassersteinAdaptive' : WassersteinAdaptivePerceptualLoss(),
'MSE' : nn.MSELoss(),
}
generator_types = {
'UNet' : UNetGenerator,
'RSGUNet' : RSGUNetGenerator,
'Residual' : ResidualGenerator,
}
deconv_types = {
'Transposed' : TransposedDeConv,
'Upsample' : UpsampleDeConv,
'Pixel' : PixelDeConv
}
activation_types = {
'ReLU' : nn.ReLU(),
'Leaky': nn.LeakyReLU(),
'PReLU': nn.PReLU(),
'ELU' : nn.ELU(),
'SELU' : nn.SELU(),
'SILU' : SILU()
}
optimizer_types = {
'Adam' : optim.Adam,
'RMSprop': optim.RMSprop,
'SGD' : optim.SGD
}
model = generator_types[args.generator]
deconvLayer = (deconv_types[args.deconv] if args.deconv in deconv_types else deconv_types['upsample'])
function = (activation_types[args.activation] if args.activation in activation_types else activation_types['Leaky'])
generator = model(deconv=deconvLayer, activation=function)
optimizer =(optimizer_types[args.optimizer] if args.optimizer in optimizer_types else optimizer_types['Adam'])(generator.parameters(), lr = LEARNING_RATE)
criterion = criterion_types[args.criterion]
augmentations = {'train' : True, 'val' : False}
shufles = {'train' : True, 'val' : False}
batch_sizes = {'train' : args.batch_size, 'val' : args.batch_size if args.batch_size < 8 else 8}
image_datasets = {x: DenoiserDataset(os.path.join(args.image_dir, x), augmentation = augmentations[x])
for x in ['train', 'val']}
imageloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_sizes[x],
shuffle=shufles[x], num_workers=4)
for x in ['train', 'val']}
test_dataset = DenoiserDataset(args.image_dir+'/test/', augmentation = False)
testloader = torch.utils.data.DataLoader(test_dataset, batch_size=4, shuffle=False, num_workers=4)
framework = DeepImageDenoiser(generator = generator, criterion = criterion, optimizer = optimizer)
framework.approximate(dataloaders = imageloaders, num_epochs=args.epochs, resume_train=args.resume_train)
framework.estimate(testloader)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Perkasa JoB and Contributors
# License: QL. See license.txt
from __future__ import unicode_literals
import frappe, erpnext, json, math
import frappe.defaults
from frappe.utils import nowdate, cstr, flt, cint, now, getdate
from frappe import throw, _
from frappe.utils import formatdate, get_number_format_info
from six import iteritems, string_types
from datetime import datetime
@frappe.whitelist()
def get_latest_stock_qty(item_code, warehouse=None, project="", work_order=""):
values, condition = [item_code], ""
if warehouse:
lft, rgt, is_group = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt", "is_group"])
if is_group:
values.extend([lft, rgt])
condition += "and exists (\
select name from `tabWarehouse` wh where wh.name = tabBin.warehouse\
and wh.lft >= %s and wh.rgt <= %s)"
else:
values.append(warehouse)
condition += " AND warehouse = %s"
actual_qty = frappe.db.sql("""select sum(actual_qty) from tabBin
where item_code=%s {0}""".format(condition), values)[0][0]
return actual_qty
@frappe.whitelist()
def get_unique_item_code(prefix=""):
interval = 10
maxnr = 10000
for i in range(interval,maxnr,interval):
print("{}, {}".format(i+1-interval, i))
res = frappe.db.sql('''SELECT * FROM (SELECT LPAD(seq, 4, "0") AS seq FROM seq_{}_to_{}) s WHERE s.seq NOT IN (select DISTINCT REGEXP_SUBSTR(name,"[0-9]+") as item_code_nr from `tabItem` WHERE item_code LIKE "{}%") LIMIT 1'''.format(i+1-interval, i, prefix))
if res:
return prefix + str(res[0][0])
return []
def query_permission(doc):
user = frappe.session.user
if user == "Administrator":
return True
match_role = list(set(['FIN','Finance Manager', 'CSD', 'Accounts Manager']) & frappe.get_roles(user))
if match_role:
return True
for approver in ['verifier', 'approver_1', 'approver_2', 'approver_3' ]:
if getattr(doc, approver, None) == user:
return True
return False
|
from pydci import *
import enchant
from StringIO import StringIO
class SpellCheck(Context):
class CurrentSelection(Role):
pass
class SpellChecker(Role):
def do_spellckeck(self):
for word in self.context.CurrentSelection.words():
if not self.check(word):
self.context.Ouput.write('The text has spelling errors \r\n')
suggestions = self.suggest(word)
if suggestions:
self.context.Ouput.write('Here are suggestions for `{}`: \r\n'.format(word.strip()))
self.context.Ouput.write('-'.join(suggestions))
class Ouput(Role):
pass
def __init__(self, text, dictionary, output):
self.CurrentSelection = text
self.SpellChecker = dictionary
self.Ouput = output
def check(self):
self.SpellChecker.do_spellckeck()
return self.Ouput
class TextBuffer(StringIO):
def words(self):
for line in self.readlines():
if len(line) > 1:
for word in line.strip().rstrip('.,-').split(" "):
yield word
self.seek(0)
class LanguageDictionary(enchant.Dict):
pass
text = """
Eye have a spelling chequer,
It came with my Pea Sea.
It plane lee marks four my revue
Miss Steaks I can knot sea.
"""
buff = TextBuffer(text)
dict = LanguageDictionary('en_US')
out = TextBuffer()
spell_check = SpellCheck(buff, dict, out)
spell_check.check()
out.seek(0)
print(out.read())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Adam Townsend, adam@adamtownsend.com, 17/10/2014
import numpy as np
from functions_shared import add_sphere_rotations_to_positions, same_setup_as
from os import sys
import subprocess
import sys as sys2
def pos_setup(n):
desc = ""
if n == 1:
# Test case 1
# Durlofsky, Brady & Bossis, 1987. Dynamic simulation of hydrodynamically interacting particles. Figure 1.
# This test case looks at horizontal chains of 5, 9 and 15 spheres sedimenting vertically.
# The instantaneous drag coefficient, F/(6*pi*mu*a*U), is measured for each sphere in the chain, in each case,
# i.e. it runs for 1 timestep. Here we set up the chain of length 15.
num_spheres = 15
sphere_sizes = np.array([1 for i in range(num_spheres)])
sphere_positions = np.array([[4*i,0,0] for i in range(num_spheres)])
sphere_rotations = add_sphere_rotations_to_positions(sphere_positions,sphere_sizes,np.array([[1,0,0],[0,0,1]]))
dumbbell_sizes = np.array([])
dumbbell_positions = np.empty([0,3])
dumbbell_deltax = np.empty([0,3])
elif n == 2:
# Test case 2
# Durlofsky, Brady & Bossis, 1987. Dynamic simulation of hydrodynamically interacting particles. Figure 5.
# This test case considers three particles sedimenting vertically, and looks at their interesting paths over
# a large number of timesteps.
sphere_sizes = np.array([1,1,1])
sphere_positions = np.array([[-5,0,0],[0,0,0],[7,0,0]])
sphere_rotations = add_sphere_rotations_to_positions(sphere_positions,sphere_sizes,np.array([[1,0,0],[0,0,1]]))
dumbbell_sizes = np.array([])
dumbbell_positions = np.empty([0,3])
dumbbell_deltax = np.empty([0,3])
elif n == 3:
# Test case 3
# Brady, Phillips, Jester, Bossis 1988. Dynamic simulation of hydrodynamically interacting suspensions. Figure 1.
# Figure corrected by:
# Sierou & Brady 2001. Accelerated Stokesian Dynamics simulations. Figure 9.
# This test case is periodic and measures the velocity of a sedimenting, simple cubic array for different particle
# concentrations.
num_spheres = 8
cube_side_length = 8
sphere_sizes = np.array([1 for i in range(num_spheres)])
sphere_positions,box_bottom_left,box_top_right = simple_cubic_8(cube_side_length)
sphere_rotations = add_sphere_rotations_to_positions(sphere_positions,sphere_sizes,np.array([[1,0,0],[0,0,1]]))
dumbbell_sizes = np.array([])
dumbbell_positions = np.empty([0,3])
dumbbell_deltax = np.empty([0,3])
elif n == 4:
# Test case 4
# Two spheres, two dumbbells
sphere_sizes = np.array([1,1])
sphere_positions = np.array([[0,0,0],[4.5,0,4.5]])
sphere_rotations = add_sphere_rotations_to_positions(sphere_positions,sphere_sizes,np.array([[1,0,0],[0,0,1]]))
dumbbell_sizes = np.array([0.1,0.1])
dumbbell_positions = np.array([[4.5,0,0],[0,0,4.5]])
dumbbell_deltax = np.array([[np.sqrt(2),0,np.sqrt(2)],[np.sqrt(2),0,np.sqrt(2)]])
elif n == 5:
# Test case 5
# Randomly arranged spheres
num_spheres = 40
sphere_sizes = np.array([1 for i in range(num_spheres)])
L = 17 # This is how wide you want to box for all the particles to fit inside (not just putting the centres inside this box)
# This will put the centres in a given box size
sphere_positions = randomise_spheres([-L/2.+1,0,-L/2.+1],[L/2.-1,0,L/2.-1],sphere_sizes,np.array([]), np.empty([0,3]))
sphere_rotations = add_sphere_rotations_to_positions(sphere_positions,sphere_sizes,np.array([[1,0,0],[0,0,1]]))
dumbbell_sizes = np.array([])
dumbbell_positions = np.empty([0,3])
dumbbell_deltax = np.empty([0,3])
elif n == 6:
# Test case 5
# Two walls of spheres with dumbbells randomly distributed between them.
num_lid_particles_each_lid = 45
num_random_dumbbells = 100*2
sphere_sizes = np.array([1 for n in range(num_lid_particles_each_lid*2)])
sep = 2.00001
sphere_positions = np.array([[sep*i-(num_lid_particles_each_lid//2)*sep,0,0] for i in range(num_lid_particles_each_lid)] + [[sep*i-(num_lid_particles_each_lid//2)*sep,0,11] for i in range(num_lid_particles_each_lid)])
sphere_rotations = add_sphere_rotations_to_positions(sphere_positions,sphere_sizes,np.array([[1,0,0],[0,0,1]]))
dumbbell_sizes = np.array([0.1 for n in range (num_random_dumbbells)])
random_box_bottom_left = [-17,0,1+2*dumbbell_sizes[0]]
random_box_top_right = [17,0,10-2*dumbbell_sizes[0]]
(dumbbell_positions, dumbbell_deltax) = randomise_dumbbells(random_box_bottom_left,random_box_top_right,dumbbell_sizes,dx=2,phi=0)
elif n == 7:
# To replicate setup of an existing output file
(sphere_sizes, sphere_positions, sphere_rotations, dumbbell_sizes, dumbbell_positions, dumbbell_deltax) = same_setup_as('FILENAME', frameno=0)
elif n == 8:
# Two sphere
sphere_sizes = np.array([1, 1])
sphere_positions = np.array([[-1.1,0,0],[1.1,0,0]])
sphere_rotations = add_sphere_rotations_to_positions(sphere_positions,sphere_sizes,np.array([[1,0,0],[0,0,1]]))
dumbbell_sizes = np.array([])
dumbbell_positions = np.empty([0,3])
dumbbell_deltax = np.empty([0,3])
try:
sphere_sizes
except NameError:
print "ERROR: You have not inputted a valid position setup number."
posdata = (sphere_sizes, sphere_positions, sphere_rotations, dumbbell_sizes, dumbbell_positions, dumbbell_deltax)
return posdata, desc
def randomise_spheres(random_box_bottom_left,random_box_top_right,random_sphere_sizes,current_sphere_sizes, current_sphere_positions):
sphere_positions_out = current_sphere_positions
num_current_spheres = current_sphere_sizes.shape[0]
all_sphere_sizes = np.concatenate([current_sphere_sizes, random_sphere_sizes])
print "Randomly distributing " + str(len(random_sphere_sizes)) + " spheres (using a naive method in Python)... ",
for i in range(len(random_sphere_sizes)):
while 1 == 1:
proposed_sphere_position = np.array([np.random.rand()*(random_box_top_right[0]-random_box_bottom_left[0])+random_box_bottom_left[0], np.random.rand()*(random_box_top_right[1]-random_box_bottom_left[1])+random_box_bottom_left[1], np.random.rand()*(random_box_top_right[2]-random_box_bottom_left[2])+random_box_bottom_left[2]])
too_close = 0
for j in range(num_current_spheres+i):
if np.linalg.norm(proposed_sphere_position - sphere_positions_out[j]) < (random_sphere_sizes[i]+all_sphere_sizes[j]):
too_close = too_close + 1
if too_close == 0:
symbol = '|'
if (i+1)%10 == 0:
symbol = 'X'
if (i+1)%100 == 0:
symbol = 'C'
sys.stdout.write(symbol)
sys.stdout.flush()
break
sphere_positions_out = np.append(sphere_positions_out,[proposed_sphere_position],axis=0)
print " succeeded."
random_sphere_positions = sphere_positions_out[current_sphere_positions.shape[0]:all_sphere_sizes.shape[0]]
distance_matrix = np.linalg.norm(random_sphere_positions-random_sphere_positions[:,None],axis=2)
min_element_distance = np.min(distance_matrix[np.nonzero(distance_matrix)])
two_closest_elements = np.where(distance_matrix == min_element_distance)
print "Min added sphere s': " + str(min_element_distance/random_sphere_sizes[0])
print "Closest two spheres: " + str(two_closest_elements[0])
box_dimensions = abs(np.asarray(random_box_top_right) - np.asarray(random_box_bottom_left))
if box_dimensions[1] == 0: #2D
box_volume = box_dimensions[0]*box_dimensions[2]
sphere_volumes = np.pi*np.dot(random_sphere_sizes,random_sphere_sizes)
else: #3D
box_volume = box_dimensions[0]*box_dimensions[1]*box_dimensions[2]
sphere_volumes = 4./3. * np.pi * np.sum(np.asarray(random_sphere_sizes)**3)
volume_fraction = sphere_volumes/box_volume
print "Volume fraction: " + str("{:.1f}".format(volume_fraction * 100)) + "%"
return random_sphere_positions
def randomise_dumbbells(random_box_bottom_left,random_box_top_right,dumbbell_sizes,dx=1,theta='r',phi='r',current_sphere_sizes=np.array([]),current_sphere_positions=np.empty([0,3])):
dumbbell_positions = np.zeros([len(dumbbell_sizes),3])
dumbbell_deltax = np.zeros([len(dumbbell_sizes),3])
bead_positions = np.zeros([2*len(dumbbell_sizes),3])
num_fails=0
if theta =='r': random_theta = True
else: random_theta = False
if phi == 'r': random_phi = True
else: random_phi = False
print "Randomly distributing " + str(len(dumbbell_sizes)) + " dumbbells... ",
for i in range(len(dumbbell_sizes)):
while 1 == 1:
proposed_bead1_position = np.array([np.random.rand()*(random_box_top_right[0]-random_box_bottom_left[0])+random_box_bottom_left[0], np.random.rand()*(random_box_top_right[1]-random_box_bottom_left[1])+random_box_bottom_left[1], np.random.rand()*(random_box_top_right[2]-random_box_bottom_left[2])+random_box_bottom_left[2]])
too_close = 0
for j in range(len(current_sphere_sizes)):
if np.linalg.norm(proposed_bead1_position - current_sphere_positions[j]) < (dumbbell_sizes[i]+current_sphere_sizes[j]):
too_close = too_close + 1
if too_close == 0:
for j in range(2*i):
if np.linalg.norm(proposed_bead1_position - bead_positions[j]) < (dumbbell_sizes[i]+dumbbell_sizes[j//2]):
too_close = too_close + 1
if too_close == 0:
bingo = 0
for tries in xrange(100):
if random_theta: theta = np.random.rand()*np.pi*2
if random_phi: phi = np.random.rand()*np.pi
proposed_bead2_position = proposed_bead1_position + np.array([dx*np.sin(theta)*np.cos(phi),dx*np.sin(theta)*np.sin(phi),dx*np.cos(theta)])
if (proposed_bead2_position >= random_box_bottom_left).all() and (proposed_bead2_position <= random_box_top_right).all():
for j in range(len(current_sphere_sizes)):
if np.linalg.norm(proposed_bead2_position - current_sphere_positions[j]) < (dumbbell_sizes[i]+current_sphere_sizes[j]):
too_close = too_close + 1
if too_close == 0:
for j in xrange(2*i):
if np.linalg.norm(proposed_bead2_position - bead_positions[j]) < (dumbbell_sizes[i]+dumbbell_sizes[j//2]):
too_close = too_close + 1
if too_close == 0:
bingo = 1
break
if bingo == 1:
#print "G"
break
sys.stdout.write('|')
sys.stdout.flush()
bead_positions[2*i] = proposed_bead1_position
bead_positions[2*i+1] = proposed_bead2_position
dumbbell_positions[i] = 0.5*(proposed_bead1_position+proposed_bead2_position)
dumbbell_deltax[i] = (proposed_bead2_position-proposed_bead1_position)
print " succeeded."
bead_positions = np.concatenate((dumbbell_positions + 0.5*dumbbell_deltax, dumbbell_positions - 0.5*dumbbell_deltax),axis = 0)
distance_matrix = np.linalg.norm(bead_positions-bead_positions[:,None],axis=2)
min_element_distance = np.min(distance_matrix[np.nonzero(distance_matrix)])
two_closest_elements = np.where(distance_matrix == min_element_distance)
print "Min dumbbell s': " + str(min_element_distance/dumbbell_sizes[0])
print "Closest two elements: " + str(two_closest_elements[0])
box_dimensions = abs(np.asarray(random_box_top_right) - np.asarray(random_box_bottom_left))
if box_dimensions[1] == 0: #2D
box_area = box_dimensions[0]*box_dimensions[2]
box_volume = box_dimensions[0]*box_dimensions[2]*dumbbell_sizes[0]*2
sphere_areas = np.pi*np.dot(dumbbell_sizes,dumbbell_sizes)*2
sphere_volumes = 4./3. * np.pi * np.sum(np.asarray(dumbbell_sizes)**3)*2
area_fraction = sphere_areas/box_area
volume_fraction = sphere_volumes/box_volume
print "Area fraction: " + str("{:.1f}".format(area_fraction * 100)) + "%"
print "Effective volume fraction: " + str("{:.1f}".format(volume_fraction * 100)) + "%"
else: #3D
box_volume = box_dimensions[0]*box_dimensions[1]*box_dimensions[2]
sphere_volumes = 4./3. * np.pi * np.sum(np.asarray(dumbbell_sizes)**3)*2
volume_fraction = sphere_volumes/box_volume
print "Volume fraction: " + str("{:.1f}".format(volume_fraction * 100)) + "%"
return dumbbell_positions, dumbbell_deltax
def not_in_spheres(position,current_spheres_positions,current_sphere_size, dumbbell_size):
flag = 0
for centre in current_spheres_positions:
if np.linalg.norm(position - centre) < current_sphere_size + dumbbell_size:
return False
return True
def randomise_dumbbells_periodic(random_box_bottom_left,random_box_top_right,dumbbell_sizes,dx=1,theta='r',phi='r',current_sphere_sizes=np.array([]),current_sphere_positions=np.empty([0,3])):
dumbbell_positions = np.zeros([len(dumbbell_sizes),3])
dumbbell_deltax = np.zeros([len(dumbbell_sizes),3])
bead_positions = np.zeros([2*len(dumbbell_sizes),3])
num_fails=0
if theta =='r': random_theta = True
else: random_theta = False
if phi == 'r': random_phi = True
else: random_phi = False
Dx,Dy,Dz = np.array(random_box_top_right) - np.array(random_box_bottom_left)
print "Randomly distributing " + str(len(dumbbell_sizes)) + " dumbbells... ",
for i in range(len(dumbbell_sizes)):
while 1 == 1:
proposed_bead1_position = np.array([np.random.rand()*(random_box_top_right[0]-random_box_bottom_left[0])+random_box_bottom_left[0], np.random.rand()*(random_box_top_right[1]-random_box_bottom_left[1])+random_box_bottom_left[1], np.random.rand()*(random_box_top_right[2]-random_box_bottom_left[2])+random_box_bottom_left[2]])
too_close = 0
for m in [-1,0,1]: # NEW
for n in [-1,0,1]: # NEW
dxy = np.array([m*Dx,0,n*Dz]) # NEW
for j in range(len(current_sphere_sizes)):
if np.linalg.norm(proposed_bead1_position - current_sphere_positions[j]-dxy) < (dumbbell_sizes[i]+current_sphere_sizes[j]):
too_close = too_close + 1
if too_close == 0:
for j in range(2*i):
if np.linalg.norm(proposed_bead1_position - bead_positions[j]-dxy) < (dumbbell_sizes[i]+dumbbell_sizes[j//2]):
too_close = too_close + 1
if too_close == 0:
bingo = 0
for tries in xrange(100):
if random_theta: theta = np.random.rand()*np.pi*2
if random_phi: phi = np.random.rand()*np.pi
proposed_bead2_position = proposed_bead1_position + np.array([dx*np.sin(theta)*np.cos(phi),dx*np.sin(theta)*np.sin(phi),dx*np.cos(theta)])
# NOTE: I have turned off checking whether the second bead is in the box.
if 1==1: # NEW
for m in [-1,0,1]: # NEW
for n in [-1,0,1]: # NEW
dxy = np.array([m*Dx,0,n*Dz]) # NEW
for j in range(len(current_sphere_sizes)):
if np.linalg.norm(proposed_bead2_position - current_sphere_positions[j] - dxy) < (dumbbell_sizes[i]+current_sphere_sizes[j]):
too_close = too_close + 1
if too_close == 0:
for j in xrange(2*i):
if np.linalg.norm(proposed_bead2_position - bead_positions[j] - dxy) < (dumbbell_sizes[i]+dumbbell_sizes[j//2]):
too_close = too_close + 1
if too_close == 0:
bingo = 1
break
if bingo == 1:
break
q = 1000
qq = 1000
for m in [-1,0,1]: # NEW
for n in [-1,0,1]: # NEW
dxy = np.array([m*Dx,0,n*Dz]) # NEW
for p in bead_positions[:i]:
q = min(q,np.linalg.norm(proposed_bead2_position - (p+dxy)))
qq = min(qq,np.linalg.norm(proposed_bead1_position - (p+dxy)))
if min(q, qq) < 0.2:
print min(q,qq)
from IPython import embed
embed()
sys.stdout.write('|')
sys.stdout.flush()
bead_positions[2*i] = proposed_bead1_position
bead_positions[2*i+1] = proposed_bead2_position
dumbbell_positions[i] = 0.5*(proposed_bead1_position+proposed_bead2_position)
dumbbell_deltax[i] = (proposed_bead2_position-proposed_bead1_position)
# Move the dumbbell_positions (centre) to inside the periodic box
dumbbell_positions[i] = np.mod(dumbbell_positions[i]-random_box_bottom_left,[Dx,1e6,Dz])+random_box_bottom_left
print " succeeded."
bead_positions = np.concatenate((dumbbell_positions + 0.5*dumbbell_deltax, dumbbell_positions - 0.5*dumbbell_deltax),axis = 0)
distance_matrix = np.linalg.norm(bead_positions-bead_positions[:,None],axis=2)
min_element_distance = np.min(distance_matrix[np.nonzero(distance_matrix)])
two_closest_elements = np.where(distance_matrix == min_element_distance)
print "Min dumbbell s': " + str(min_element_distance/dumbbell_sizes[0])
print "Closest two elements: " + str(two_closest_elements[0])
print "Mean dumbbell pitch: " + "%3.1f"%(np.mean(np.arccos(np.abs(np.dot(dumbbell_deltax/np.linalg.norm(dumbbell_deltax,axis=1)[:,None],np.array([1,0,0]))))*180/np.pi,axis=0)) + "°"
box_dimensions = abs(np.asarray(random_box_top_right) - np.asarray(random_box_bottom_left))
if box_dimensions[1] == 0: #2D
box_area = box_dimensions[0]*box_dimensions[2]
box_volume = box_dimensions[0]*box_dimensions[2]*dumbbell_sizes[0]*2
sphere_areas = np.pi*np.dot(dumbbell_sizes,dumbbell_sizes)*2
sphere_volumes = 4./3. * np.pi * np.sum(np.asarray(dumbbell_sizes)**3)*2
area_fraction = sphere_areas/box_area
volume_fraction = sphere_volumes/box_volume
print "Area fraction: " + str("{:.1f}".format(area_fraction * 100)) + "%"
print "Effective volume fraction: " + str("{:.1f}".format(volume_fraction * 100)) + "%"
else: #3D
box_volume = box_dimensions[0]*box_dimensions[1]*box_dimensions[2]
sphere_volumes = 4./3. * np.pi * np.sum(np.asarray(dumbbell_sizes)**3)*2
volume_fraction = sphere_volumes/box_volume
print "Volume fraction: " + str("{:.1f}".format(volume_fraction * 100)) + "%"
return dumbbell_positions, dumbbell_deltax
def randomise_beads_inside_quadrilateral(quadrilateral, dumbbell_sizes, current_sphere_positions, current_sphere_size):
# Works best for low densities
print "Randomly distributing dumbbells... "
a = dumbbell_sizes[0]
num_dumbbells = len(dumbbell_sizes)
num_current_spheres = len(current_sphere_positions)
random_box_bottom_left = np.array([np.min(quadrilateral[:,0]),0,np.min(quadrilateral[:,1])])
random_box_top_right = np.array([np.max(quadrilateral[:,0]),0,np.max(quadrilateral[:,1])])
dumbbell_positions = np.zeros([num_dumbbells,3])
dumbbell_deltax = np.zeros([num_dumbbells,3])
bead_positions = np.zeros([2*num_dumbbells,3])
for i in range(num_dumbbells*2):
fail = True
while fail:
fail = False
proposed_bead1_position = np.array([np.random.rand()*(random_box_top_right[0]-random_box_bottom_left[0])+random_box_bottom_left[0], np.random.rand()*(random_box_top_right[1]-random_box_bottom_left[1])+random_box_bottom_left[1], np.random.rand()*(random_box_top_right[2]-random_box_bottom_left[2])+random_box_bottom_left[2]])
if not point_inside_polygon(proposed_bead1_position[0],proposed_bead1_position[2],quadrilateral):
fail = True
if not fail:
for j in range(num_current_spheres):
if not fail and np.linalg.norm(proposed_bead1_position - current_sphere_positions[j]) < (dumbbell_sizes[i//2]+current_sphere_size):
fail = True
if not fail:
for j in range(i):
if not fail and np.linalg.norm(proposed_bead1_position - bead_positions[j]) < (dumbbell_sizes[i//2]+dumbbell_sizes[j//2]):
fail = True
sys.stdout.write('|')
sys.stdout.flush()
bead_positions[i] = proposed_bead1_position
pos3 = bead_positions.reshape([2,bead_positions.shape[0]/2,3])
pos3_dumbbell = 0.5*(pos3[0] + pos3[1])
pos3_deltax = 0.5*(pos3[1] - pos3[0])
print "... succeeded."
return pos3_dumbbell, pos3_deltax
def point_inside_polygon(x,y,poly):
n = len(poly)
inside =False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x,p1y = p2x,p2y
return inside
def simple_cubic_8(side_length):
s = side_length
sphere_positions = np.array([[np.float(i),np.float(j),np.float(k)] for i in [-s/4,s/4] for j in [-s/4,s/4] for k in [-s/4,s/4]])
box_bottom_left = np.array([-s/2,-s/2,-s/2])
box_top_right = np.array([s/2,s/2,s/2])
return sphere_positions,box_bottom_left,box_top_right
|
"""Auxiliary functions."""
from IPython.display import display_html
def display_side_by_side(*args):
"""Displays pd.DataFrames side by side in Jupyter notebooks"""
html_str = ""
for df in args:
html_str += df.to_html()
display_html(html_str.replace("table", 'table style="display:inline"'), raw=True)
|
"""
"""
# Handle imports
from PIL import Image
import numpy as np
import os
import json
# Define constants
tileset_info_file = os.path.join("resources", "tileset_info.txt")
precompute_tileset_info_files = [
os.path.join("resources", "tmp_tileset_info.txt"),
os.path.join("resources", "default_tileset_info.txt")
]
tileset_info = None
max_checks = 4 # Max number of color guesses
min_tile_length = 2 # Anything smaller causes subtle bugs in code >.>
max_tile_length = 32 # They're slooowwww
epsilon = 0.001
# Define methods
def load_tileset_info():
"""
Loads tileset info files and associated images.
Does some precomputation.
:param files: Tileset info files
:return: All the info merged into a list.
"""
global tileset_info_file, precompute_tileset_info_files, min_tile_length, max_tile_length
print("Loading in tileset info.")
recompute_part1 = False
if not os.path.exists(tileset_info_file):
# The tileset info file doesn't exist.
recompute_part1 = True
else:
for file in precompute_tileset_info_files:
if os.path.getmtime(file) > os.path.getmtime(tileset_info_file):
# A file was updated.
recompute_part1 = True
if recompute_part1:
precompute_tilesets_part1(precompute_tileset_info_files, tileset_info_file)
precompute_tilesets_part2(min_tile_length, max_tile_length, tileset_info_file)
def precompute_tilesets_part1(precompute_tileset_info_files, tileset_info_file):
"""
Compute non-numpy, easily chacheable data about the tilesets.
:param precompute_tileset_info_files: Tileset info files from TilesetScrapper.py
:param tileset_info_file: Where to output computed info to.
:return: Nothing, really. *sweats*
"""
to_return = []
for file in precompute_tileset_info_files:
with open(file, "r") as f:
to_return.extend(json.loads(f.read()))
for info in to_return:
local_filepath = info["local_filepath"]
image = image_to_array(local_filepath)
use_alpha = check_image_alpha(local_filepath)
tile_shape = [image.shape[0] // 16, image.shape[1] // 16]
combined_color_guesses = []
for y in range(16):
for x in range(16):
tile = image[
y * tile_shape[0]: (y + 1) * tile_shape[0],
x * tile_shape[1]: (x + 1) * tile_shape[1]
]
combined_color_guesses.append(
tile_color_guesses(tile, use_alpha))
info["alpha"] = use_alpha
info["shape"] = tile_shape
info["size"] = tile_shape[0] * tile_shape[1]
info["color_guesses"] = combined_color_guesses
with open(tileset_info_file, "w+") as f:
f.write(json.dumps(to_return, indent=2))
print("Finished precomputing part 1.")
def precompute_tilesets_part2(min_tile_length, max_tile_length, tileset_info_file):
"""
Compute more information about the tilesets.
:return: Nothing.
"""
global tileset_info
to_return = []
with open(tileset_info_file, "r") as f:
to_return.extend(json.loads(f.read()))
for i in range(len(to_return)-1, -1, -1):
info = to_return[i]
image = image_to_array(info["local_filepath"])
tiles = []
tile_shape = info["shape"]
if tile_shape[0] < min_tile_length or tile_shape[1] < min_tile_length:
del to_return[i]
continue
if tile_shape[0] > max_tile_length or tile_shape[1] > max_tile_length:
del to_return[i]
continue
for y in range(16):
for x in range(16):
tile = image[
y * tile_shape[0]: (y + 1) * tile_shape[0],
x * tile_shape[1]: (x + 1) * tile_shape[1]
]
tile_info = {}
tile_info["image"] = tile
tile_info["color_guesses"] = info["color_guesses"][x + y*16]
tile_info["pink_mask"] = tile_pink_mask(tile)
tiles.append(tile_info)
info["tiles"] = np.array(tiles)
info["hashes"] = calculate_tileset_info(info["tiles"], info["alpha"])
tileset_info = to_return
print("Finished precomputing part 2.")
def calculate_tileset_info(tiles, use_alpha):
"""
Calculate additional info for an entire tileset.
:param tiles: Tileset tiles
:param use_alpha: If the tileset uses alpha
:return: Additional tileset info
"""
hashes = []
for i in range(len(tiles)):
tile = tiles[i]
hashes.append(hash_tile(tile["image"]))
return np.array(hashes)
def tile_pink_mask(tile):
"""
Precompute which parts of a pink tile are background.
:param tile: A tileset tile
:return: The tile's pink mask
"""
# A heuristic
is_pink = (tile[:, :, 0:1] > 250) * \
(tile[:, :, 1:2] < 5) * \
(tile[:, :, 2:3] > 250)
return is_pink.astype(int)
def hash_tile(tile):
"""
Quickly calculate a vector of a tile that is color invariant and noise resistant.
:param tile: A tileset tile
:return: A numpy vector
"""
global epsilon
# Make Hue invariant
tile = np.sum(tile, axis=2)
# Make 1D
tile.resize([tile.shape[0] * tile.shape[1]])
# Calculate diff between consecutive pixels
diff = np.convolve(tile, np.array([1, -1]), 'valid')
np.abs(diff, out=diff)
diff -= diff.min()
return diff / (np.linalg.norm(diff) + epsilon)
def entropy_image(image):
"""
Calculates the entropy of types of colors in an image.
:param image:
:return:
"""
prob = np.sort(
np.unique(np.resize(image, [-1, image.shape[2]]), return_counts=True,
axis=0)[1]) / (
image.shape[0] * image.shape[1])
return np.sum(prob * np.log(prob))
def tile_color_guesses(tile, use_alpha):
"""
Precompute pixels to check for obtaining foreground and background colors of a tile.
:param tile:
:param use_alpha:
:return:
"""
global max_checks
# We have to track foreground and background colors for checking purposes
tile_shape = tile.shape
checks = []
foreground_c = False
background_c = False
reset = False
done = False
# Check each and every pixel
x = 0
while x < tile_shape[1] and not done:
y = 0
while y < tile_shape[0] and not done:
tileset_c = tile[y, x]
alpha = tileset_c[3] / 255
tileset_c = tileset_c[:3]
is_pink = tileset_c[0] > 250 and tileset_c[1] < 5 \
and tileset_c[2] > 250
if is_pink and (not use_alpha):
if not background_c:
# The background color and sample color should be the same
# Limit to one check, because bleh otherwise
checks.append({"type": "p_b", "pos": (y, x)})
reset = True # Reset scan with new information
background_c = True
else:
average = np.max(
tileset_c) # Aka value (HSV) // 255 = fore, 0 = back
transparency = average / 255
"""
We need to guess the foreground color.
If the background color is known, this is easy.
If alpha = 1.0, we can ignore the background color.
"""
if alpha == 1 or (
background_c is True and alpha != 0.0):
if len(checks) < max_checks - 1 or background_c:
checks.append({"type": "f", "pos": (y, x)})
if not foreground_c:
reset = True # Reset scan with new information
foreground_c = True
"""
We need to guess the background color.
It's pretty much the same as guessing the foreground color.
"""
if (alpha == 0) or (
foreground_c is True and alpha != 1) or (
transparency == 0):
if len(checks) < max_checks - 1 or foreground_c:
checks.append({"type": "b", "pos": (y, x)})
if not background_c:
reset = True # Reset scan with new information
background_c = True
if (foreground_c and background_c) and len(checks) == max_checks:
done = True
y += 1
if reset:
y = tile_shape[0]
x += 1
if reset:
x, y = 0, 0
reset = False
return checks
def check_image_alpha(image_path):
"""
Check if an image uses alpha.
:param image_path:
:return:
"""
with Image.open(image_path) as image:
im_arr = np.fromstring(image.tobytes(), dtype=np.uint8)
im_arr = im_arr.reshape((image.size[0], image.size[1], -1)).astype(int)
return im_arr.shape[2] == 4
def image_to_array(image_path):
"""
Loads image into 3D Numpy array of shape
(width, height, 4)
Where 4 represents RGBA
:param image_path: The location of the image
:return: 3d numpy array
"""
with Image.open(image_path) as image:
image = image.convert('RGBA')
im_arr = np.fromstring(image.tobytes(), dtype=np.uint8)
im_arr = im_arr.reshape((image.size[1], image.size[0], -1))
return im_arr
def get_tileset(name):
"""
Get info for a given tileset.
:param name: Tileset name
:return: Tileset info
"""
global tileset_info
for info in tileset_info:
if name.lower() in info["local_filename"].lower():
return info
else:
return None
def get_tileset_by_id(i):
"""
Get a tileset by an id.
:param i: An id.
:return: A corresponding tileset.
"""
global tileset_info
return tileset_info[i]
def get_id_of_tileset(name):
"""
Get an id of a tileset. Returns -1 if nothing matches.
:param i: An id.
:return: A corresponding tileset.
"""
global tileset_info
id = 0
for info in tileset_info:
if name.lower() in info["local_filename"].lower():
return id
id += 1
else:
return -1
def num_tilesets():
"""
Get the number of tilesets.
:return: The number of tilesets
"""
global tileset_info
return len(tileset_info)
def largest_tile_dims(tileset_ids = None):
"""
Get the largest tileset tile dimension.
:return: A tuple.
"""
global tileset_info
max_x = 0
max_y = 0
if tileset_ids is None:
tileset_ids = list(range(num_tilesets()))
for tileset_id in tileset_ids:
info = tileset_info[tileset_id]
if info["shape"][0] > max_y:
max_y = info["shape"][0]
if info["shape"][1] > max_x:
max_x = info["shape"][1]
largest_dims = [max_y, max_x]
return largest_dims
smallest_dims = None
def smallest_tile_dims(tileset_ids = None):
"""
Get the largest tileset tile dimension.
:return: A tuple.
"""
global tileset_info
min_x = 1000
min_y = 1000
if tileset_ids is None:
tileset_ids = list(range(num_tilesets()))
for tileset_id in tileset_ids:
info = tileset_info[tileset_id]
if info["shape"][0] < min_y:
min_y = info["shape"][0]
if info["shape"][1] < min_x:
min_x = info["shape"][1]
smallest_dims = [min_y, min_x]
return smallest_dims
# Temp code
# tileset = image_to_array("resources/test/Curses 640x300diag.png")
# np.save('resources/test/Curses', tileset)
|
from app import DB
class Ip(DB.Model):
""" Ip table model """
__tablename__ = "ip"
id = DB.Column(DB.Integer, primary_key=True)
address = DB.Column(DB.String(), nullable=False)
def __init__(self, address):
""" Constructor for Ip model."""
self.address = address
def __repr__(self):
""" String representation of Ip model object."""
return self.address
|
from random import randint
from markupsafe import escape
from pymongo import ASCENDING
from flask import Blueprint, render_template, session, url_for,redirect,flash,request,abort
from functools import wraps
from bson.objectid import ObjectId
import json
from datetime import datetime, timedelta
from app.users.forms import LoginForm,RegistrationForm,StaffForm,getAnimalForm,MammalForm
from app.users.helper import Helper,checkSession,assignRole,setSession,getAnimal,InsertAnimal
from app.db.claybrookZoo import log,users,categories,compound,aquarium,hothouse,aviary,contract
from app import bcrypt
from app.models.user import User
from app.models.roles import Sponsor
from app.models.mammals import Mammals
from app.models.birds import Birds
from app.models.category import Category
import secrets
import os
from app import app
userDB = Helper(users)
logs = Helper(log)
sponsor_contract = Helper(contract)
insertAnimal = InsertAnimal({'mammals':Helper(compound),'birds':Helper(aviary),'reptiles':Helper(hothouse),'amphibians':Helper(hothouse),'fishes':Helper(aquarium)})
user = Blueprint('user',__name__)
severity = {'healthy':"healthy.png",'moderate':"moderate.png",'moderately severe':"moderateS.png",'severe':'severe.png'}
newSponsors = sponsor_contract.count_items({'approved':False})
newUsers=userDB.get_new_users()
def uploadImage(image_data,animalType):
random_hex = secrets.token_hex(16)
_, fext = os.path.splitext(image_data.filename)
newFilename = random_hex + fext
picture_path = os.path.join(app.root_path,'static','images',animalType,newFilename)
image_data.save(picture_path)
return newFilename
@user.route("/login",methods=["GET","POST"])
def loginUser():
logForm = LoginForm()
loginFail = False
color = ""
print(request.method)
if 'current_user' in session:
return redirect(url_for('home.index'))
if request.method == "POST":
if logForm.validate_on_submit():
userInst = userDB.login({'email':logForm.email.data,'password':logForm.password.data})
if userInst != None:
flash(f'Welcome! {escape(userInst["username"])}.','notification is-success')
setSession(session,userInst)
return redirect(url_for('home.index' if session['current_user']['role'] in ["Sponsor","Visitor"] else "user.staffHome"))
else:
loginFail = True
color = "#b70606"
return render_template('login.html',form=logForm,notification=6,fail=loginFail,color=color)
@user.route("/staff/<string:category>/<string:animalID>/archive",methods=["GET"])
def archiveAnimal(category,animalID):
if checkSession(session,["Admin","Manager"]):
_id = insertAnimal.archive(category,animalID)
if _id:
flash("Animal Archived Succesfully")
log.insert_one({
"inserted_by": session['current_user']['username'],
"animal": _id,
'category':category,
'operation':'archive',
"date_added":datetime.now()
})
else:
flash("Archive failed")
return redirect(f"/staff/{category}")
else:
return redirect(url_for("home.index"))
@user.route('/register',methods=["GET","POST"])
def register():
if 'current_user' in session:
return redirect(url_for('home.index'))
regForm = RegistrationForm()
if regForm.validate_on_submit():
userID = userDB.register({'username':regForm.username.data,'password':regForm.password.data,'email':regForm.email.data,'checked': False})
setSession(session,userID)
return redirect(url_for('home.index'))
elif request.method == "POST":
flash(f'Failed to create account {regForm.username.data}! ','notification is-danger')
return render_template('register.html',form=regForm)
@user.route("/logout",methods=["GET","POST"])
def logout():
if request.method == "GET":
if "current_user" in session:
session.pop('current_user',None)
flash("You have now been logged out","notification is-danger")
return redirect(url_for('home.index'))
else:
flash("You must first Login","notification is-warning")
return redirect(url_for('home.index'))
@user.route("/staff",methods=["GET"])
def staffHome():
if checkSession(session,["Admin","Manager","Staff"]):
global newSponsors
latest = insertAnimal.getLastAdded()
animal_count = insertAnimal.getAllCount()
return render_template('staff/staff.html',total=randint(1,100),newUsers=userDB.get_new_users(),latest=latest,animal_count=animal_count,newSponsors=newSponsors)
else:
return redirect(url_for("home.index"))
@user.route("/staff/animals",methods=["GET","POST"])
@user.route("/staff/animals/<string:category>",methods=["GET","POST"])
def animalOperation(category=None):
print(app.root_path)
if checkSession(session,["Admin","Manager","Staff"]):
global newUsers
global newSponsors
global severity
# newUsers = userDB.get_new_users()
form = getAnimalForm(category) if category != None else None
cats = Category.get_all()
addForm = True
severAnimals = insertAnimal.health_count()
if request.method == "GET":
if category == None:
form = None
addForm = False
else:
addForm = True
if form and form.validate_on_submit():
imagePath = uploadImage(form.image.data,category)
newAnimal = getAnimal(form.__dict__['_fields'],category)
newAnimal.update({'image':[imagePath],"dateAdded":datetime.utcnow(),'status':'healthy'})
_id = insertAnimal.insert_animal(newAnimal.__dict__,category)
log.insert_one({
"inserted_by": session['current_user']['username'],
"animal": _id,
'operation':'add',
'category': category,
"date_added":datetime.now()
})
return redirect(url_for("user.staffHome"))
return render_template("staff/animals.html",profile="profile.png",categories=cats,addForm=addForm,category=category,form=form,newUsers=newUsers,newSponsors=newSponsors,severity=severity,health_dist=severAnimals)
else:
return redirect(url_for("animals.zooAnimals"))
@user.route("/staff/<string:category>", methods=["GET"])
def displayCategory(category):
if checkSession(session,["Admin","Manager","Staff"]):
category = escape(category)
global newUsers
# newUsers = userDB.get_new_users()
if category.lower() == "sick":
animals = insertAnimal.get_animals("total",{'status':{'$exists':'true'}})
else:
animals = insertAnimal.get_animals(category)
return render_template("staff/animalinfo.html",category=category,newUsers=newUsers,animals=animals)
return redirect(url_for("animals.zooAnimals"))
@user.route("/staff/evaluate",methods=["GET"])
def staffEvaluate():
if checkSession(session,["Admin"]):
global newSponsors
newUsers = userDB.get_new_users()
# global newUsers
return render_template("staff/evaluate.html",evaluateUsers=newUsers,newUsers=newUsers,profile="ssg-goku.jpg",newSponsors=newSponsors)
else:
return redirect(url_for("user.staffHome"))
@user.route("/evaluate/<string:email>",methods=["GET","POST"])
def updateStaff(email):
if checkSession(session,["Admin"]):
global newUsers
global newSponsors
user = userDB.find_by_email(email)
_id = user['_id']
currVis = User({'email':user['email'],'username':user['username']})
editForm = StaffForm()
if editForm.validate_on_submit():
currVis = assignRole(editForm.role.data,currVis.__dict__,{'username': editForm.username.data,'role':editForm.role.data,'checked':True})
if currVis.saveToDb(userDB):
return redirect(url_for("user.staffEvaluate"))
return render_template("staff/editStaff.html",profile="ssg-goku.jpg",form=editForm,user=currVis,newUsers=newUsers,newSponsors=newSponsors)
else:
return render_template("staff/staff.html")
@user.route("/eval",methods=["POST"])
def approveRole():
if checkSession(session,["Admin"]):
criteria = request.get_json()
print(criteria)
if userDB.find_and_update({'email':criteria['email']},{"checked":True,"role":criteria['role']}):
return json.dumps({'approve':True})
@user.route("/staff/sponsor",methods=["GET","POST"])
def approveStaff():
global newUsers
global newSponsors
if checkSession(session,['Admin',"Manager","Staff"]):
sponsors = sponsor_contract.getProp({})
return render_template("staff/sponsors.html",newSponsors=newSponsors,newUsers=newUsers,role=session['current_user']['role'],sponsors=sponsors)
else:
return render_template("sponsor.applySponsor")
@user.route("/staff/health",methods=["GET","POST"])
def assignHealth():
if checkSession(session,["Admin","Staff","Manager"]):
global newUsers
global newSponsors
global severity
animals = insertAnimal.get_animals("total")
return render_template("staff/sickAnimal.html",newUsers=newUsers,newSponsors=newSponsors,animals=animals,severity=severity)
else:
return redirect(url_for(""))
@user.route("/staff/approve",methods=["POST"])
def approve():
if checkSession(session,["Admin"]):
payload = request.form
dateApproved = datetime.now() + timedelta(int(payload['months']) * 30)
sponsor_contract.find_and_update({'_id':ObjectId(payload['id'])},{'end_date':dateApproved,'approved':True})
newSponsors -= 1
userDB.find_and_update({'_id':ObjectId(payload['sponsor_id'])},{"role":"Sponsor"})
return redirect(url_for("user.approveStaff"))
@user.route("/staff/<string:category>/<string:_id>/<string:status>",methods=["GET"])
def updateHealth(category,_id,status):
if checkSession(session,["Admin","Manager"]):
global severity
if status.lower() in severity:
anim = insertAnimal.dbs.get(category)
if anim:
if anim.find_and_update({'_id':ObjectId(_id)},{'status':status}):
return redirect(url_for("user.assignHealth"))
else:
return "Animal Not found"
else:
return "Invalid Status"
else:
return redirect(url_for("user.staff"))
@user.route("/staff/collect",methods=["GET"])
def returnCount():
return insertAnimal.getAllCount()
@user.route("/staff/logs",methods=["GET"])
def staffLog():
if checkSession(session,["Admin"]):
global newUsers
global newSponsors
userLogs = logs.getProp({}).sort([('date_added',ASCENDING)])
orderd = {}
for i in userLogs:
datekey = str(i['date_added']).split(' ')[0]
if datekey == str(datetime.now()).split(' ')[0]:
k = "Today"
else:
k = datekey
curr = orderd.get(k,None)
if not curr:
orderd[k] = []
orderd[k].append(i)
return render_template("staff/activityLog.html",newUsers=newUsers,newSponsors=newSponsors,log=orderd)
|
# import sys
# def main(input_file):
# with open(input_file, 'r') as data:
# for line in data:
# print first_non_repeated_char(line.strip())
# def first_non_repeated_char(string):
# output = None
# for char in string:
# if string.count(char) == 1:
# output = char
# # for first non-repeated char need to break loop
# break
# return output
# if __name__ == "__main__":
# try:
# main(sys.argv[1])
# except Exception as e:
# print 'First argument must be a text file!\nError: {0}'.format(e)
board = [
['A', 'S', 'A', 'D', 'B'],
['A', 'B', 'C', 'C', 'E', 'D' ],
['A', 'B', 'C', 'F'],
]
for index, element in enumerate(board):
print index, element
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import hashlib
import os
from django.conf import settings
from django.core import urlresolvers
from django.core.files.base import ContentFile
from django.db import models
from django.utils.translation import ugettext_lazy as _
from filer.utils.compatibility import python_2_unicode_compatible
from filer.utils.files import get_valid_filename
from leonardo.module.media import mixins
from .. import settings as filer_settings
from ..fields.multistorage_file import MultiStorageFileField
try:
from polymorphic.models import PolymorphicModel
from polymorphic.managers import PolymorphicManager
except ImportError:
# django-polymorphic < 0.8
from polymorphic import PolymorphicModel, PolymorphicManager
class FileManager(PolymorphicManager):
def find_all_duplicates(self):
r = {}
for file_obj in self.all():
if file_obj.sha1:
q = self.filter(sha1=file_obj.sha1)
if len(q) > 1:
r[file_obj.sha1] = q
return r
def find_duplicates(self, file_obj):
return [i for i in self.exclude(pk=file_obj.pk).filter(sha1=file_obj.sha1)]
@python_2_unicode_compatible
class File(PolymorphicModel, mixins.IconsMixin):
file_type = 'File'
_icon = "file"
folder = models.ForeignKey('media.Folder', verbose_name=_('folder'),
null=True, blank=True, related_name="%(app_label)s_%(class)s_files")
file = MultiStorageFileField(
_('file'), null=True, blank=True, max_length=255)
_file_size = models.IntegerField(_('file size'), null=True, blank=True)
sha1 = models.CharField(_('sha1'), max_length=40, blank=True, default='')
has_all_mandatory_data = models.BooleanField(
_('has all mandatory data'), default=False, editable=False)
original_filename = models.CharField(
_('original filename'), max_length=255, blank=True, null=True)
name = models.CharField(max_length=255, default="", blank=True,
verbose_name=_('name'))
description = models.TextField(null=True, blank=True,
verbose_name=_('description'))
owner = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
related_name='owned_%(class)ss',
null=True, blank=True, verbose_name=_('owner'))
uploaded_at = models.DateTimeField(_('uploaded at'), auto_now_add=True)
modified_at = models.DateTimeField(_('modified at'), auto_now=True)
is_public = models.BooleanField(
default=filer_settings.FILER_IS_PUBLIC_DEFAULT,
verbose_name=_('Permissions disabled'),
help_text=_('Disable any permission checking for this ' +
'file. File will be publicly accessible ' +
'to anyone.'))
objects = FileManager()
@classmethod
def matches_file_type(cls, iname, ifile=None, request=None):
return True # I match all files...
def __init__(self, *args, **kwargs):
super(File, self).__init__(*args, **kwargs)
self._old_is_public = self.is_public
def _move_file(self):
"""
Move the file from src to dst.
"""
src_file_name = self.file.name
dst_file_name = self._meta.get_field('file').generate_filename(
self, self.original_filename)
if self.is_public:
src_storage = self.file.storages['private']
dst_storage = self.file.storages['public']
else:
src_storage = self.file.storages['public']
dst_storage = self.file.storages['private']
# delete the thumbnail
# We are toggling the is_public to make sure that easy_thumbnails can
# delete the thumbnails
self.is_public = not self.is_public
self.file.delete_thumbnails()
self.is_public = not self.is_public
# This is needed because most of the remote File Storage backend do not
# open the file.
src_file = src_storage.open(src_file_name)
src_file.open()
self.file = dst_storage.save(dst_file_name,
ContentFile(src_file.read()))
src_storage.delete(src_file_name)
def _copy_file(self, destination, overwrite=False):
"""
Copies the file to a destination files and returns it.
"""
if overwrite:
# If the destination file already exists default storage backend
# does not overwrite it but generates another filename.
# TODO: Find a way to override this behavior.
raise NotImplementedError
src_file_name = self.file.name
storage = self.file.storages['public' if self.is_public else 'private']
# This is needed because most of the remote File Storage backend do not
# open the file.
src_file = storage.open(src_file_name)
src_file.open()
return storage.save(destination, ContentFile(src_file.read()))
def generate_sha1(self):
sha = hashlib.sha1()
self.file.seek(0)
while True:
buf = self.file.read(104857600)
if not buf:
break
sha.update(buf)
self.sha1 = sha.hexdigest()
# to make sure later operations can read the whole file
self.file.seek(0)
def save(self, *args, **kwargs):
# check if this is a subclass of "File" or not and set
# _file_type_plugin_name
if self.__class__ == File:
# what should we do now?
# maybe this has a subclass, but is being saved as a File instance
# anyway. do we need to go check all possible subclasses?
pass
elif issubclass(self.__class__, File):
self._file_type_plugin_name = self.__class__.__name__
# cache the file size
# TODO: only do this if needed (depending on the storage backend the whole
# file will be downloaded)
try:
self._file_size = self.file.size
except:
pass
if self._old_is_public != self.is_public and self.pk:
self._move_file()
self._old_is_public = self.is_public
# generate SHA1 hash
# TODO: only do this if needed (depending on the storage backend the whole
# file will be downloaded)
try:
self.generate_sha1()
except Exception:
pass
super(File, self).save(*args, **kwargs)
save.alters_data = True
def delete(self, *args, **kwargs):
# Delete the model before the file
super(File, self).delete(*args, **kwargs)
# Delete the file if there are no other Files referencing it.
if not File.objects.filter(file=self.file.name, is_public=self.is_public).exists():
self.file.delete(False)
delete.alters_data = True
@property
def canonical_url(self):
url = ''
if self.file and self.is_public:
try:
url = urlresolvers.reverse('canonical', kwargs={
'uploaded_at': self.uploaded_at.strftime('%s'),
'file_id': self.id
})
except urlresolvers.NoReverseMatch:
pass # No canonical url, return empty string
return url
@property
def label(self):
if self.name in ['', None]:
text = self.original_filename or 'unnamed file'
else:
text = self.name
text = "%s" % (text,)
return text
def __lt__(self, other):
return self.label.lower() < other.label.lower()
def has_edit_permission(self, request):
return self.has_generic_permission(request, 'edit')
def has_read_permission(self, request):
return self.has_generic_permission(request, 'read')
def has_add_children_permission(self, request):
return self.has_generic_permission(request, 'add_children')
def has_generic_permission(self, request, permission_type):
"""
Return true if the current user has permission on this
image. Return the string 'ALL' if the user has all rights.
"""
user = request.user
if not user.is_authenticated():
return False
elif user.is_superuser:
return True
elif user == self.owner:
return True
elif self.folder:
return self.folder.has_generic_permission(request, permission_type)
else:
return False
def __str__(self):
if self.name in ('', None):
text = "%s" % (self.original_filename,)
else:
text = "%s" % (self.name,)
return text
def get_admin_url_path(self):
model_name = self._meta.model_name
return urlresolvers.reverse(
'admin:%s_%s_change' % (self._meta.app_label,
model_name,),
args=(self.pk,)
)
@property
def url(self):
"""
to make the model behave like a file field
"""
try:
r = self.file.url
except:
r = ''
return r
@property
def path(self):
try:
return self.file.path
except:
return ""
@property
def size(self):
return self._file_size or 0
@property
def extension(self):
filetype = os.path.splitext(self.file.name)[1].lower()
if len(filetype) > 0:
filetype = filetype[1:]
return filetype
@property
def get_logical_path(self):
'''returns logical path like /directory/file.jpg'''
return os.path.join(self.folder.quoted_logical_path if self.folder else '',
get_valid_filename(self.original_filename))
@property
def pretty_logical_path(self):
'''returns pretty logical path like /directory/File.jpg'''
return os.path.join(self.folder.quoted_logical_path if self.folder else '',
self.label)
def relocate_file(self):
'''relocate file to new directory'''
old_path = self.file.path
self.file.name = self.get_logical_path[1:]
if self.file.path != old_path:
try:
os.makedirs(os.path.dirname(self.file.path))
except:
pass
os.rename(old_path, self.file.path)
@property
def logical_folder(self):
"""
if this file is not in a specific folder return the Special "unfiled"
Folder object
"""
if not self.folder:
from filer.models.virtualitems import UnfiledImages
return UnfiledImages()
else:
return self.folder
@property
def logical_path(self):
"""
Gets logical path of the folder in the tree structure.
Used to generate breadcrumbs
"""
folder_path = []
if self.folder:
folder_path.extend(self.folder.get_ancestors())
folder_path.append(self.logical_folder)
return folder_path
@property
def duplicates(self):
return File.objects.find_duplicates(self)
class Meta:
app_label = 'media'
verbose_name = _('file')
verbose_name_plural = _('files')
|
#!/usr/bin/env python3
import hashlib
import itertools
import multiprocessing
from multiprocessing import freeze_support
def brute(worker_function, data_list, processes=8):
pool = multiprocessing.Pool(processes=processes)
result = pool.map(worker_function, data_list)
pool.close()
return result
def worker(f):
prefix = "AC34BFB5683"
for option in itertools.product(map(str, range(10)), repeat=6):
potential = f + "".join(option)
if hashlib.sha256(potential).hexdigest().upper().startswith(prefix):
print("found", potential)
return
def main():
brute(worker, map(str, range(10)), processes=6)
if __name__ == "__main__":
freeze_support()
main()
|
from datetime import datetime
from vnpy.trader.object import HistoryRequest
from vnpy.trader.database import database_manager
from vnpy.trader.constant import Interval, Exchange
from vnpy.trader.rqdata import rqdata_client
from vnpy.trader.setting import SETTINGS
from vnpy_pro.config import load_futures
from vnpy_pro.data.source.tdxdata import tdxdata_client
# 设置配置参数
FUTURES = load_futures()
# P99.DCE
future = "P"
interval = Interval.MINUTE
symbol = future.upper() + "99"
exchange = Exchange.DCE
# 查询数据库中的最新数据
start = datetime(2010, 1, 1)
# bar = database_manager.get_newest_bar_data(symbol, exchange, interval)
# if bar:
# start = bar.datetime
# else:
# start = datetime(2017, 1, 1)
if tdxdata_client.init():
print("数据服务器登录成功")
else:
print("数据服务器登录失败")
# 下载数据
req = HistoryRequest(
symbol,
exchange,
start,
datetime.now(),
interval=interval
)
data = tdxdata_client.query_history(req)
# 写入数据库
if data:
database_manager.save_bar_data(data)
print(f"数据更新完成:{data[0].datetime} -- {data[-1].datetime}")
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from app.models import User
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import DataRequired, Length
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
fullname = StringField('Full Name', validators=[Length(min=1,max=50)])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField('Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class EditProfileForm(FlaskForm):
fullname = StringField('Full Name', validators=[Length(max=50)])
occupation = StringField('Occupation', validators=[Length(max=50)])
hobby = StringField('Your Hobbies', validators=[Length(max=50)])
about_me = TextAreaField('About Myself', validators=[Length(min=0, max=150)])
submit = SubmitField('Submit')
class ContactForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
company = StringField('Company / Website')
message = TextAreaField('Message')
submit = SubmitField('Submit')
class ArticleForm(FlaskForm):
heading = StringField('Title', validators=[DataRequired(), Length(min=1, max=250)])
body = TextAreaField('Body', validators=[DataRequired(), Length(min=30)])
submit = SubmitField('Submit')
class NewsLetterForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email(), Length(min=1)])
submit = SubmitField('Subscribe')
class CommentForm(FlaskForm):
comment = TextAreaField('Comment', validators=[DataRequired(), Length(min=1)])
submit = SubmitField('Comment')
class FileUploadForm(FlaskForm):
submit = SubmitField('Upload')
class SearchForm(FlaskForm):
search = StringField('search', validators=[DataRequired()])
|
#!/usr/bin/env python
# -*- vim set ft=python
#
# @author kchr
from PIL import Image as pil, ImageOps as pilops
import jinja2
import argparse
import os
import glob
import shutil
import time
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--force', action="store_true",
help='Force overwrite of existing files')
parser.add_argument('-o', '--output', type=str, required=True,
help='Output directory (required)')
parser.add_argument('-t', '--title', type=str,
help='Title for the output index page (default "gallery")')
parser.add_argument('-s', '--size', type=int,
help='Thumbnail width/height (default 256)')
parser.add_argument('-c', '--cols', type=int, choices=[2, 3, 4, 6, 8, 12],
help='Columns on index page (default 8)')
parser.add_argument('sources', metavar='SOURCE', type=str, nargs='+',
help='Directory or filename to include')
args = parser.parse_args()
env = jinja2.Environment(loader=jinja2.FileSystemLoader('./templates'))
tpl_list = env.get_template('list.html')
tpl_list_images = []
out_dir = args.output or 'www'
out_title = args.title or 'gallery'
out_size = args.size or 256
out_cols = args.cols or 8
out_force = args.force or False
# Thumbnail options
tn_size = (out_size, out_size) # thumbnail width x height
tn_canvas = 'black' # canvas bleed color (upsizing bg)
tn_dir = os.path.join(out_dir, 'thumbs') # thumbnail subdir
s_list = []
s_failed = False
for d in [out_dir, tn_dir]:
if not os.path.isdir(d):
os.mkdir(d)
print "[*] Created output directory: %s" % d
else:
print "[*] Using output directory: %s" % d
def wdir(files, dirname, names):
for bn in names:
f = os.path.join(dirname, bn)
if os.path.isdir(f):
pass
elif os.path.isfile(f):
if not f in files:
files.append(f)
else:
print 'Unknown file: %s' % f
for s_glob in args.sources:
for s in glob.glob(s_glob):
ab = os.path.abspath(s)
if not os.path.exists(s):
s_failed = True
print "Could not read source: %s" % s
else:
os.path.walk(s, wdir, s_list)
print "[*] Creating gallery '%s' in %s (%d files)..." \
% (out_title, out_dir, len(s_list))
print "[*] Creating thumbnails (%dx%d)..." % tn_size
for infile in sorted(s_list):
filename, ext = os.path.splitext(infile)
basename = os.path.basename(filename)
tn_name = '%s_tn%s' % (basename, ext)
tn_path = '%s/%s' % (tn_dir, tn_name)
im = pil.open(infile)
(width, height) = im.size
# No side is smaller than 256, fit (downsize) it
if min(im.size) > min(tn_size):
bleed = 0
center = (0.5, 0.5)
im_t = pilops.fit(im, tn_size, pil.NEAREST, bleed, center)
# One side is smaller than 256, crop and paste
else:
im_t = pil.new(im.mode, tn_size, tn_canvas)
box_ul = (tn_size[0] - width) / 2
box_lr = (tn_size[1] - height) / 2
im_t.paste(im, (box_ul, box_lr))
try:
# Read metadata from file
created = time.ctime(os.path.getctime(infile))
modified = time.ctime(os.path.getmtime(infile))
target = os.path.join(out_dir, basename + ext)
# Check if file exists, skip if not forced
if os.path.isfile(target):
if not out_force:
print "[/] File exists, skipping: %s" % target
continue
# Copy infile to output directory
shutil.copy2(infile, target)
try:
# Save thumbnail
im_t.save(tn_path)
# Image dict for template output
image = {
'url': basename + ext,
'title': basename,
'date_created': created,
'date_modified': modified,
'thumb_url': os.path.join('thumbs', tn_name)
}
tpl_list_images.append(image)
except (IOError, os.error) as why:
print "[x] Failed to save thumbnail: %s (%s)" % (tn_path, why)
except (IOError, os.error) as why:
print "[x] Failed to copy source: %s -> %s (%s)" \
% (infile, out_dir, why)
try:
print "[*] Rendering %d-column view from template..." % out_cols
index = os.path.join(out_dir, 'index.html')
# Render output from template
output = tpl_list.render(title=out_title,
cols=out_cols,
images=tpl_list_images)
# to save the results
with open(index, 'wb') as fh:
fh.write(output)
# Copy CSS to output directory
for f in ['templates/style.css', 'templates/grid.css']:
bn = os.path.basename(f)
try:
shutil.copy2(f, os.path.join(out_dir, bn))
print "[*] Copied media asset: %s" % bn
except (IOError, os.error) as why:
print "[x] Failed to copy media: %s (%s)" % (bn, why)
print "[*] Wrote index file: %s" % index
except (IOError, os.error) as why:
print "[x] Failed to write output: %s (%s)" % (index, why)
except (Exception, os.error) as why:
print "Failed to compile output (%s)" % (why)
|
'''
Markers:
'1' represents X
'-1' represents O
Flags:
for CURRENT_PLAYER: '1' represents Player 1 and '-1' represents Player 2
'''
import os
from random import randint
import numpy as np
CURRENT_PLAYER=0
def clear_screen():
os.system('clear')
print 'Welcome to Tic Tac Toe'
print '\nThe Board Positions corrospond to Numeric Keypad\n\n'
def get_Input(board,player_marker):
'''
gets Input from the user
'''
global CURRENT_PLAYER
if CURRENT_PLAYER==1:
while True:
x=int(raw_input("Player A: Enter Move: "))
print x
if x>=1 and x<=9 and board[x-1]==0:
board[x-1]=player_marker['A']
break
else:
print 'Invalid Position.'
continue
elif CURRENT_PLAYER==-1:
while True:
x=int(raw_input("Player B: Enter Move: "))
print x
if x>=1 and x<=9 and board[x-1]==0:
board[x-1]=player_marker['B']
break
else:
print 'Invalid move'
continue
else:
pass
return board
def choose_Player():
'''
Randomly chooses which Player goes First
'''
global CURRENT_PLAYER
player_number=randint(0,1)
if player_number==0:
CURRENT_PLAYER+=1
return 'A'
else:
CURRENT_PLAYER+=-1
return 'B'
def getPlayerMarker():
'''
Keeps Track of Markers associated with the Players
'''
player=choose_Player()
while True:
player__Marker=raw_input("Player %s, Choose a Marker: 'X' , 'O': " %player)
if player__Marker=='X' or player__Marker=='O':
break
else:
print 'Invalid Marker. Choose again'
continue
if player=='A':
if player__Marker=='X':
marker_list={'A':1, 'B':-1}
else:
marker_list={'A':-1, 'B':1}
else:
if player__Marker=='X':
marker_list={'A':-1, 'B':1}
else:
marker_list={'A':1, 'B':-1}
return marker_list
def win_Check(board,playerMarker,win_flag):
'''
Check for winning Player
Returns: "1" if Player "A" wins or "-1" if Player "B" wins else "0" for No Win
'''
if board[0]==board[1]==board[2]==1 or board[0]==board[1]==board[2]==-1:
if (playerMarker['A']==1 and board[0]==1) or (playerMarker['A']==-1 and board[0]==-1):
win_flag=1
else:
win_flag=-1
elif board[3]==board[4]==board[5]==1 or board[3]==board[4]==board[5]==-1:
if (playerMarker['A']==1 and board[3]==1) or (playerMarker['A']==-1 and board[3]==-1):
win_flag=1
else:
win_flag=-1
elif board[6]==board[7]==board[8]==1 or board[6]==board[7]==board[8]==-1:
if (playerMarker['A']==1 and board[6]==1) or (playerMarker['A']==-1 and board[6]==-1):
win_flag=1
else:
win_flag=-1
elif board[0]==board[3]==board[6]==1 or board[0]==board[3]==board[6]==-1:
if (playerMarker['A']==1 and board[0]==1) or (playerMarker['A']==-1 and board[0]==-1):
win_flag=1
else:
win_flag=-1
elif board[1]==board[4]==board[7]==1 or board[1]==board[4]==board[7]==-1:
if (playerMarker['A']==1 and board[1]==1) or (playerMarker['A']==-1 and board[1]==-1):
win_flag=1
else:
win_flag=-1
elif board[2]==board[5]==board[8]==1 or board[2]==board[5]==board[8]==-1:
if (playerMarker['A']==1 and board[2]==1) or (playerMarker['A']==-1 and board[2]==-1):
win_flag=1
else:
win_flag=-1
elif board[0]==board[4]==board[8]==1 or board[0]==board[4]==board[8]==-1:
if (playerMarker['A']==1 and board[0]==1) or (playerMarker['A']==-1 and board[0]==-1):
win_flag=1
else:
win_flag=-1
elif board[2]==board[4]==board[6]==1 or board[2]==board[4]==board[6]==-1:
if (playerMarker['A']==1 and board[2]==1) or (playerMarker['A']==-1 and board[2]==-1):
win_flag=1
else:
win_flag=-1
else:
win_flag=0
return win_flag
def checkBoard(board,flags,playerMarker):
'''
Checks for Status of the game
'''
flags['Winner']=win_Check(board,playerMarker,flags['Winner'])
if board.count(0)==0:
flags['FullBoard']=1
if flags['Winner']==0:
flags['Tie']=1
else:
if flags['Winner']!=0:
flags['Ongoing']=0
return flags
def normalize_board(board):
'''
This Function makes board corrospond to Numpad
'''
x=[]
for i in xrange(0,9,3):
x.append(board[i:i+3])
x2=[]
for i in xrange(2,-1,-1):
for j in xrange(3):
x2.append(x[i][j])
return x2
def printBoard(board):
pos=0
boardpos=normalize_board(board)
for i in xrange(13):
for j in xrange(13):
if i%4==0:
if j%4==0:
print '+',
else:
print '-',
elif i%2==0 and i%4!=0:
if j%2==0 and j%4!=0:
markerValue=boardpos[pos]
if markerValue==1:
print 'X',
elif markerValue==-1:
print 'O',
else:
print ' ',
pos+=1
elif j%4==0:
print '|',
else:
print ' ',
else:
if j%4==0:
print '|',
else:
print ' ',
print ''
while True:
clear_screen()
board=[0]*9
Flags={'Ongoing':1,'Tie':0,'Winner':0,'FullBoard':0}
player_Marker=getPlayerMarker()
printBoard(board)
while Flags['Ongoing']==1 and Flags['FullBoard']==0:
board=get_Input(board,player_Marker)
clear_screen()
printBoard(board)
Flags=checkBoard(board,Flags,player_Marker)
if Flags['Tie']:
print 'Board Full!! The Game is Tied'
break
elif Flags['Winner']==1:
print 'Player A Wins!!'
break
elif Flags['Winner']==-1:
print 'Player B Wins!!'
break
CURRENT_PLAYER=np.negative(CURRENT_PLAYER)
next_Play=raw_input("Do you want to Play again? Yes/No ")
if next_Play.lower()=='yes' or next_Play=='y':
CURRENT_PLAYER=0
continue
else:
break
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='mysql2csv',
version='1.0',
description='Convert some or all tables in a MySQL database into CSV files.',
author='Elcio Ferreira',
author_email='elcio@visie.com.br',
maintainer='Ricardo Lafuente',
maintainer_email='r@manufacturaindependente.org',
url='http://github.com/rlafuente/mysql2csv/',
download_url='https://github.com/rlafuente/mysql2csv/tarball/master',
scripts=['mysql2csv'],
keywords=['mysql', 'csv'],
license="MIT"
)
|
# Generated by Django 3.1.1 on 2020-10-01 14:50
import app_users.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofileinfo',
name='profile_pic',
field=models.ImageField(blank=True, upload_to=app_users.models.path_and_rename, verbose_name='Profile Picture'),
),
]
|
import sys
import traceback
from flask import Flask, jsonify
class MyException(Exception):
@classmethod
def __call__(cls, message=None):
cls.error_message = message
return cls
@classmethod
def response(cls):
traceback.print_exc()
return jsonify (
{'error': str(cls.error_message)}
), cls.status_code
class MethodNotAllowedException(MyException):
status_code = 405
error_message = 'parse error. json format is not correct.'
class InternalServerException(MyException):
status_code = 500
error_message = 'unknown error.'
|
from flask import Flask
from flask import request
from co2.co2_db import Co2DB
from datetime import datetime, timedelta
app = Flask(__name__)
DBConfig = {"user":"myusr", "password":"myusrpass", "host":"localhost", "port":"3306", "database":"mydb"}
co2DB = Co2DB(DBConfig)
try:
co2DB.writeCo2ValuesToDB()
except Exception:
pass
@app.route('/')
def index():
return 'Co2 Service'
@app.route('/updateco2values', methods=['POST'])
def updateCo2Values():
if request.method == 'POST':
co2DB.writeCo2ValuesToDB()
return "success", 200
return "HTTP-Method not supported", 404
if __name__ == '__main__':
app.run(debug=True, port=5005) #run app in debug mode on port 5005
|
from django.urls import path
from .views import ArticleListView, ArticleDetail
app_name = 'article'
urlpatterns = [
path('list', ArticleListView.as_view(), name="article_list"),
path('detail/<int:article_id>/', ArticleDetail.as_view(), name='article_detail'),
]
|
from django.shortcuts import render
from Nucleo.models import Chofer,Viaje,User
# Create your views here.
def CancelarViaje(request,id):
print("cancelo",id)
try:
chofer = Chofer.objects.get(user=User(id=request.user.id))
viaje = Viaje.objects.get(id=id)
viajes = Viaje.objects.filter(conductor=chofer)
if viaje.conductor!=chofer:
return render(request, 'listarviajes.html', {"viajes": viajes, "mensaje":
"No estás autorizado a realizar esa acción"})
viaje.limpiar_reservas()
viaje.delete()
return render(request, 'listarviajes.html', {"viajes": viajes,"mensaje":"Viaje eliminado exitosamente"})
except Viaje.DoesNotExist:
viajes = Viaje.objects.filter(conductor=chofer)
return render(request, 'listarviajes.html', {"viajes": viajes,"mensaje":"El viaje no existe"})
def ConfirmarCancelacion(request,id):
try:
viaje = Viaje.objects.get(id=id)
except Viaje.DoesNotExist:
return render(request, 'error.html',
{"mensaje": "El viaje no existe",
"redirection": "/listarviajes/"})
try:
chofer = Chofer.objects.get(user=User(id=request.user.id))
except Chofer.DoesNotExist:
return render(request, 'error.html',
{"mensaje": "No has ingresado al sistema como conductor",
"redirection": "/"})
chofer_viaje = Chofer.objects.get(user=User(id=viaje.conductor.id))
if chofer!=chofer_viaje:
return render(request, 'error.html',
{"mensaje": "No tienes permiso para realizar esta acción",
"redirection": "/listarviajes/"})
return render(request, 'confirmacion.html', {"viaje_cancelado": viaje})
|
from typing import Iterable
from dl.graph.variable import Variable
from dl.utils.fitShape import fit_shape
import numpy as np
from scipy.special import erf
class Operator(object):
"""
Basic class for all the operator objects.
"""
def __call__(self, *var) -> Variable:
"""
Perform the computation.
Parameters
----------
var: Variable..
Variables to perform the computation.
Returns
-------
out: Variable
An Variable object of computation result.
"""
return self.compute(*var)
def compute(self, *var) -> Variable:
"""
Perform the computation.
Parameters
----------
var: Variable..
Operands of the computation.
Returns
-------
out: Variable
An Variable object of computation result.
"""
raise NotImplementedError
def gradient(self, input_variables: list, prev_grad: np.ndarray) -> Iterable:
"""
Calculate the gradient through chain rules.
Parameters
----------
input_variables: Variable..
Operands of this computation.
prev_grad: np.ndarray
Gradient from previous computation.
Returns
-------
out: Iterable
The gradient of all operands in this computation.
"""
raise NotImplementedError
class Add(Operator):
"""
Operator object of add.
"""
def compute(self, *var) -> Variable:
"""
Perform the add computation.
Parameters
----------
var: Variable..
Variables to be added.
Returns
-------
out: Variable
An result Variable object.
"""
return Variable(var[0].item + var[1].item,
input_vars=var, operator=self, no_grad=True)
def gradient(self, input_variables: list, prev_grad: np.ndarray) -> Iterable:
"""
Calculate the gradient through chain rules.
For add operation, the gradient should be 1 for each operand.
z = x + y , w = f(z), then
dx/dw = dz/dw
Parameters
----------
input_variables: Variable..
Operands of this computation.
prev_grad: np.ndarray
Gradient from previous computation.
Returns
-------
out: Iterable
An tuple of gradients of two operands.
"""
# Gradient is reshaped to fit the operand.
return fit_shape(prev_grad, input_variables[0]), fit_shape(prev_grad, input_variables[1])
class Mul(Operator):
"""
Operator object of multiplication.
"""
def compute(self, *var):
"""
Perform the multiply computation.
Parameters
----------
var: Variable..
Variables to be multiplied.
Returns
-------
out: Variable
An result Variable object.
"""
return Variable(var[0].item * var[1].item,
input_vars=var, operator=self, no_grad=True)
def gradient(self, input_variables, prev_grad):
"""
Calculate the gradient through chain rules.
For multiply operation, the gradient should be the value of other operand for each operand.
z = x * y , w = f(z), then
dx/dw = y * dz/dw
Parameters
----------
input_variables: Variable..
Operands of this computation.
prev_grad: np.ndarray
Gradient from previous computation.
Returns
-------
out: Iterable
An tuple of gradients of two operands.
-------
"""
return fit_shape(input_variables[1].item * prev_grad, input_variables[0].item),\
fit_shape(input_variables[0].item * prev_grad, input_variables[1].item)
class MatMul(Operator):
"""
Operator object of matrix multiplication.
"""
def compute(self, *var):
"""
Perform the matrix multiplication.
Parameters
----------
var: Variable..
Operands of matrix multiplication.
Returns
-------
out: Variable
Result variable object.
"""
return Variable(np.matmul(var[0].item, var[1].item),
input_vars=var, operator=self, no_grad=True)
def gradient(self, input_variables, prev_grad):
"""
Calculate the gradient through chain rules.
For matrix multiplication, the gradient should be the other operand dot multiply with previous gradient.
z = x @ y , w = f(z), then
dx/dw = dz/dw @ yT
dy/dw = xT @ dz/dw
Parameters
----------
input_variables: Variable..
Operands of this computation.
prev_grad: np.ndarray
Gradient from previous computation.
Returns
-------
out: Iterable
An tuple of gradients of two operands.
"""
return np.matmul(prev_grad, input_variables[1].item.T), \
np.matmul(input_variables[0].item.T, prev_grad)
class ReLU(Operator):
"""
Operator object of ReLU operation.
"""
def compute(self, *var) -> Variable:
"""
Perform the ReLU operation.
Parameters
----------
var: Variable..
The operand.
Returns
-------
out: Variable
Result Variable object.
"""
return Variable(np.where(var[0].item > 0, var[0].item, 0),
input_vars=var, operator=self, no_grad=True)
def gradient(self, input_variables, prev_grad):
"""
Calculate the gradient through chain rules.
For relu, the gradient should 1 if the operand is greater than 0 else 0.
z = relu(x) , w = f(z), then
dx/dw = dz/dw if x > 0 else 0
Parameters
----------
input_variables: Variable..
Operands of this computation.
prev_grad: np.ndarray
Gradient from previous computation.
Returns
-------
out: Iterable
An tuple of gradients of two operands.
"""
grad = np.where(input_variables[0].item > 0, 1, 0) * prev_grad
return [grad]
class SoftMax(Operator):
"""
Operator object of SoftMax operation.
"""
def compute(self, *var):
"""
Perform the softmax operation.
Parameters
----------
var: Variable..
The vector or matrix.
Returns
-------
out: Variable
A Variable object of Softmax result.
"""
max_val = np.max(var[0].item, axis=0)
"""
avoid underflow/overflow of exp operation.
e^x1 / (e^x1 + ... + e^xi) = (e^(x1 - xmax)) / (e^(x1-xmax) +...+ e^(xi-xmax))
"""
return Variable(np.exp(var[0].item-max_val) / np.sum(np.exp(var[0].item-max_val), axis=0),
input_vars=var, operator=self, no_grad=True)
def gradient(self, input_variables, prev_grad):
"""
Calculate the gradient through chain rules.
The gradient of softmax operation has be omitted and integrated into the loss function.
Parameters
----------
input_variables: Variable..
Operands of this computation.
prev_grad: np.ndarray
Gradient from previous computation.
Returns
-------
out: Iterable
The graients of given vector or matrix.
"""
return [prev_grad] # No grad needed for output
class CrossEntropy(Operator):
"""
Operator object of Cross Entropy operation.
"""
def __init__(self, eps=1e-8):
self.eps = eps
def compute(self, *var): # var = [y, yhat]
"""
Perform the CrossEntropy computation.
Parameters
----------
var: Variable..
[y, yhat]
Returns
-------
out: Variable
Cross Entropy loss.
"""
softmax_input = var[1].input_vars[0].item
input_max = np.max(softmax_input, axis=0)
reduced_input = softmax_input - input_max
exp_res = np.exp(reduced_input)
exp_sum_other = np.sum(exp_res, axis=0)
log_res_of_softmax = reduced_input - np.log(exp_sum_other)
"""
Avoid log(0) error.
Log(e^x1 / (e^x1 + ... + e^xi)) = log((e^(x1 - xmax)) / (e^(x1-xmax) +...+ e^(xi-xmax))) =
log(e^(x1-xmax) - Log((e^(x1-xmax) +...+ e^(xi-xmax)))) = x1-xmax-Log((e^(x1-xmax) +...+ e^(xi-xmax))))
"""
return Variable(
np.mean(-np.sum(var[0].item * log_res_of_softmax, axis=0) / var[0].item.shape[0]),
input_vars=var,
operator=self,
no_grad=True
)
def gradient(self, input_variables, prev_grad):
"""
Calculate the gradient through chain rules.
The gradient of Cross Entropy has been combined with the softmax.
y = softmax(x), z = CE(z, ground truth)
dx/dz = z - ground truth
Parameters
----------
input_variables: Variable..
Operands of this computation.
prev_grad: np.ndarray
Gradient from previous computation.
Returns
-------
out: Iterable
The graients of the variables before softmax.
"""
return 0, input_variables[1].item - input_variables[0].item # No gradients needed for ground truth
class Sub(Operator):
"""
Operator object of substraction. This operator has not been involved in current functions, but can be used in
generall Variable caculations.
"""
def compute(self, *var):
"""
Perform the substraction.
Parameters
----------
var: Variable..
Two operands of substraction.
Returns
-------
out: Variable
A Variable object of the difference of two operands.
"""
return Variable(var[0].item - var[1].item,
input_vars=var, operator=self, no_grad=True)
def gradient(self, input_variables, prev_grad):
"""
Calculate the gradient through chain rules.
The gradient of substraction is 1 and -1 for the two operands correspondly.
z = x - y, w = f(z)
dx/dw = dz/dw
dy/dw = -dz/dw
Parameters
----------
input_variables: Variable..
Operands of this computation.
prev_grad: np.ndarray
Gradient from previous computation.
Returns
-------
out: Iterable
The graients of the operands.
"""
# Gradient is reshaped to fit the operand.
return fit_shape(prev_grad, input_variables[0]), fit_shape(-prev_grad, input_variables[1])
class Dropout(Operator):
"""
Operator object of Dropout operation.
"""
def __init__(self, rate: float) -> None:
"""
Dropout object. Recording the mask used to perform the dropout operation.
Parameters
----------
rate: float
Dropout rate.
"""
self.rate = rate
self.mask = None
self.eval = False
def maskGen(self, shape: tuple) -> None:
"""
Generate mask for dropout.
Parameters
----------
shape: tuple
Returns
-------
out:
None
"""
self.mask = np.ones(shape) # if the model is being test, dropout will not be performed.
if not self.eval:
input_nuerons = shape[0]
dropout = int(input_nuerons * self.rate)
choice = np.random.choice(input_nuerons, size=dropout, replace=False)
for i in choice:
self.mask[i] = np.zeros_like(self.mask[i])
self.mask *= 1 / (1 - self.rate) # Enlarge the variables which has not been dropped.
def compute(self, *var):
"""
Perform Dropout operation.
Parameters
----------
var:
Variable to be dropout-ed.
Returns
-------
out:
Variable has been dropout-ed
"""
self.maskGen(var[0].shape) # generate masks.
return Variable(var[0].item * self.mask, input_vars=var, operator=self, no_grad=True)
def gradient(self, input_variables, prev_grad):
"""
Calculate the gradient of variable has been dropout-ed.
The gradient of input should be equal to the mask used for previous dropout operation.
y = dropout(x, mask), z = f(y)
dx/dz = dy/dz * mask
Parameters
----------
input_variables:
Input of the dropout operation.
prev_grad:
Gradient from previous computation.
Returns
-------
out:
Gradient of the operand.
"""
return [prev_grad * self.mask]
class BatchNorm(Operator):
"""
Operator object for batch-normalization.
"""
def compute(self, *var): # var = [val, mean, stddev, eps]
"""
Perform the batch-norm operation.
The mean and stddev will be calculated in other module, so does the shifting.
output = (value - mean) / (stddev + eps) # epsilon here to avoid divide by 0 error.
Parameters
----------
var: Variable..
Value, mean, stddev and epsilon
Returns
-------
out: Variable
The batchnorm result.
"""
return Variable((var[0].item - var[1]) / np.sqrt(var[2] + var[3]),
input_vars=[var[0], Variable(np.sqrt(var[2] + var[3]), no_grad=True)],
operator=self,
no_grad=True)
def gradient(self, input_variables, prev_grad):
"""
Calculate the gradient of batchnorm.
Basically the batchnorm is the operation of:
bn(x) = (x - mean) / (stddev + eps)
thus:
y = bn(x), z = f(y)
dx/dz = (dy/dz) / (stddev + eps)
Parameters
----------
input_variables
prev_grad
Returns
-------
out:
Gradient of the input.
"""
return prev_grad / input_variables[1].item, 0
class GELU(Operator):
"""
Estimating GELU Operator.
"""
def compute(self, *var):
"""
Estimates the gelu operation
gelu(x) = 0.5x(1 + tanh(((2/pi)^-2) * (x + 0.044715 * x^3)))
Parameters
----------
var:
input
Returns
-------
out:
GELU result
"""
x = var[0].item
return Variable(0.5 * x * (1 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3)))),
input_vars=var,
operator=self,
no_grad=True)
def gradient(self, input_variables, prev_grad):
"""
Estimates the gradient of gelu operation.
Parameters
----------
input_variables:
Input
prev_grad:
Gradient from previous operations.
Returns
-------
out:
Gradient of input.
"""
x = input_variables[0].item
return (0.5 * np.tanh(0.0356774 * np.power(x, 3) + 0.797885 * x) +
(0.0535161 * np.power(x, 3) + 0.398942 * x) *
np.power((1 / np.cosh(0.0356774 * np.power(x, 3) + 0.797885 * x)), 2) + 0.5) * prev_grad
def _p(var):
"""Caculate Bernoulli distribution"""
return .5 * (1. + erf(var / np.sqrt(2.)))
class _GELU(Operator):
"""
Accurate GELU computation.
"""
def compute(self, *var):
"""
Accurate GELU computation.
GELU(x) = 0.5 * (1 + erf(x/(2^-2))) * x
Parameters
----------
var:
Input
Returns
-------
out:
Result.
"""
return Variable(var[0].item * _p(var[0].item),
input_vars=var,
operator=self,
no_grad=True)
def gradient(self, input_variables, prev_grad):
"""
Compute the accurate gradient of GELU operation.
y = GELU(x), z = f(y)
dx/dz = 0.5 * (1 + erf(x/(2^-1))) + x / (pi^-2) * e^(-(x^2)/2) * dy/dz
Parameters
----------
input_variables:
Input of GELU
prev_grad
Gradient from previous operations
Returns
-------
out:
Gradient of input.
"""
x = input_variables[0].item
return (_p(x) + x / np.sqrt(np.pi) * np.exp(-np.power(x, 2) / 2)) * prev_grad
def relu(var: Variable) -> Variable:
"""
Relu activation.
x = 0 if x < 0 else 1
Parameters
----------
var:
Input
Returns
-------
out:
Result of ReLU.
"""
return ReLU()(var)
def softmax(var: Variable) -> Variable:
"""
Softmax activation.
x = e^x_i / sum(e^x)
Parameters
----------
var:
Input
Returns
-------
out:
Result of Softmax.
"""
return SoftMax()(var)
def batchNorm(x, mean, var, eps) -> Variable:
"""
BatchNorm operation. Mean, stddev and eps must be defined beforehand.
Parameters
----------
x:
Input
mean:
Mean of input.
var:
Stddev of input
eps:
a very small value to avoid divide by zero.
Returns
-------
out:
Result of BatchNorm
"""
return BatchNorm()(x, mean, var, eps)
def gelu(var, estimate=True):
"""
GELU activation.
Parameters
----------
var:
Input
estimate:
If estimate the GELU result.
Returns
-------
out:
GELU result.
"""
return GELU()(var) if estimate else _GELU()(var)
def do_nothing(var):
return var
|
# https://www.hackerrank.com/challenges/big-sorting/problem?isFullScreen=true
def myFunc(a):
return int(a)
def bigSorting(unsorted):
# Write your code here
unsorted.sort(key=myFunc)
return unsorted
|
import os
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-data_path", help="Path to your dataset", type=str, default='')
parser.add_argument("-print_string", action='store_true')
params = parser.parse_args()
main_func(params)
def main_func(params):
classes = [d.name for d in os.scandir(params.data_path) if d.is_dir()]
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
print('Classes:')
print('', class_to_idx)
if params.print_string:
print('\nParam string:')
class_string = ','.join(classes)
print('', class_string)
if __name__ == "__main__":
main()
|
#
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
CONTAINED_IN_RELATIONSHIP_NAME = 'cloudify.relationships.contained_in'
def is_contained_in(context, relationship_or_relationship_template):
"""
Whether we are a contained-in relationship or relationship template.
"""
if relationship_or_relationship_template is None:
return False
return context.modeling.relationship_types.is_descendant(CONTAINED_IN_RELATIONSHIP_NAME, relationship_or_relationship_template.type_name)
|
"""Device Schema."""
import re
import fnmatch
import PyTango
from operator import attrgetter
from graphene import Interface, String, Int, List, Boolean, Field, ObjectType
from tangogql.schema.base import db, proxies
from tangogql.schema.types import TypeConverter
from tangogql.schema.attribute import DeviceAttribute
from tangogql.schema.attribute import ScalarDeviceAttribute
from tangogql.schema.attribute import ImageDeviceAttribute
from tangogql.schema.attribute import SpectrumDeviceAttribute
from tangogql.schema.log import UserAction, user_actions
class DeviceProperty(ObjectType, Interface):
""" This class represents a property of a device. """
name = String()
device = String()
value = List(String)
def resolve_value(self, info):
""" This method fetch the value of the property by its name.
:return: A list of string contains the values corespond to the name of
the property.
:rtype: str
"""
device = self.device
name = self.name
value = db.get_device_property(device, name)
if value:
return [line for line in value[name]]
class DeviceCommand(ObjectType, Interface):
"""This class represents an command and its properties."""
name = String()
tag = Int()
displevel = String()
intype = String()
intypedesc = String()
outtype = String()
outtypedesc = String()
class DeviceInfo(ObjectType, Interface):
""" This class represents info of a device. """
id = String() # server id
host = String() # server host
class Device(ObjectType, Interface):
"""This class represent a device."""
name = String()
state = String()
connected = Boolean()
properties = List(DeviceProperty, pattern=String())
attributes = List(DeviceAttribute, pattern=String())
commands = List(DeviceCommand, pattern=String())
server = Field(DeviceInfo)
user_actions = List(UserAction, skip=Int(), first=Int())
device_class = String()
# server = String()
pid = Int()
started_date = String()
stopped_date = String()
exported = Boolean()
def resolve_user_actions(self, info, skip=None, first=None):
result = user_actions.get(self.name)
if skip:
result = result[skip:]
if first:
result = result[:first]
return result
async def resolve_state(self, info):
"""This method fetch the state of the device.
:return: State of the device.
:rtype: str
"""
try:
proxy = self._get_proxy()
return await proxy.state()
except (PyTango.DevFailed, PyTango.ConnectionFailed,
PyTango.CommunicationFailed, PyTango.DeviceUnlocked):
return "UNKNOWN"
except Exception as e:
return str(e)
def resolve_properties(self, info, pattern="*"):
"""This method fetch the properties of the device.
:param pattern: Pattern for filtering the result.
Returns only properties that matches the pattern.
:type pattern: str
:return: List of properties for the device.
:rtype: List of DeviceProperty
"""
#TODO:Db calls are not asynchronous in tango
props = db.get_device_property_list(self.name, pattern)
return [DeviceProperty(name=p, device=self.name) for p in props]
async def resolve_attributes(self, info, pattern="*"):
"""This method fetch all the attributes and its' properties of a device.
:param pattern: Pattern for filtering the result.
Returns only properties that match the pattern.
:type pattern: str
:return: List of attributes of the device.
:rtype: List of DeviceAttribute
"""
# TODO: Ensure that result is passed properly, refresh mutable
# arguments copy or pointer ...? Tests are passing ...
def append_to_result(result, klass, attr_info):
if attr_info.writable == PyTango._tango.AttrWriteType.WT_UNKNOWN:
wt = 'READ_WITH_WRITE'
else:
wt = attr_info.writable
data_type = PyTango.CmdArgType.values[attr_info.data_type]
result.append(klass(
name=attr_info.name,
device=self.name,
writable=wt,
datatype=data_type,
dataformat=attr_info.data_format,
label=attr_info.label,
unit=attr_info.unit,
description=attr_info.description,
displevel=attr_info.disp_level,
minvalue=None if attr_info.min_value == "Not specified" else TypeConverter.convert(data_type,attr_info.min_value),
maxvalue=None if attr_info.max_value == "Not specified" else TypeConverter.convert(data_type,attr_info.max_value),
minalarm=None if attr_info.min_alarm == "Not specified" else TypeConverter.convert(data_type,attr_info.min_alarm),
maxalarm=None if attr_info.max_alarm == "Not specified" else TypeConverter.convert(data_type,attr_info.max_alarm)
)
)
result = []
if await self._get_connected():
proxy = self._get_proxy()
attr_infos = proxy.attribute_list_query()
rule = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
sorted_info = sorted(attr_infos, key=attrgetter("name"))
for attr_info in sorted_info:
if rule.match(attr_info.name):
if str(attr_info.data_format) == "SCALAR":
append_to_result(result,
ScalarDeviceAttribute, attr_info)
if str(attr_info.data_format) == "SPECTRUM":
append_to_result(result,
SpectrumDeviceAttribute, attr_info)
if str(attr_info.data_format) == "IMAGE":
append_to_result(result,
ImageDeviceAttribute, attr_info)
return result
async def resolve_commands(self, info, pattern="*"):
"""This method fetch all the commands of a device.
:param pattern: Pattern for filtering of the result.
Returns only commands that match the pattern.
:type pattern: str
:return: List of commands of the device.
:rtype: List of DeviceCommand
"""
if await self._get_connected():
proxy = self._get_proxy()
cmd_infos = proxy.command_list_query()
rule = re.compile(fnmatch.translate(pattern), re.IGNORECASE)
def create_device_command(cmd_info):
return DeviceCommand(name=cmd_info.cmd_name,
tag=cmd_info.cmd_tag,
displevel=cmd_info.disp_level,
intype=cmd_info.in_type,
intypedesc=cmd_info.in_type_desc,
outtype=cmd_info.out_type,
outtypedesc=cmd_info.out_type_desc
)
return [create_device_command(a)
for a in sorted(cmd_infos, key=attrgetter("cmd_name"))
if rule.match(a.cmd_name)]
else:
return []
async def resolve_server(self, info):
""" This method fetch the server infomation of a device.
:return: List server info of a device.
:rtype: List of DeviceInfo
"""
if await self._get_connected():
proxy = self._get_proxy()
dev_info = proxy.info()
return DeviceInfo(id=dev_info.server_id,
host=dev_info.server_host)
def resolve_exported(self, info):
""" This method fetch the infomation about the device if it is exported or not.
:return: True if exported, False otherwise.
:rtype: bool
"""
return self._get_info().exported
def resolve_device_class(self, info):
return self._get_info().class_name
def resolve_pid(self, info):
return self._get_info().pid
def resolve_started_date(self, info):
return self._get_info().started_date
def resolve_stopped_date(self, info):
return self._get_info().stopped_date
async def resolve_connected(self, info):
return await self._get_connected()
def _get_proxy(self):
if not hasattr(self, "_proxy"):
self._proxy = proxies.get(self.name)
return self._proxy
async def _get_connected(self):
if not hasattr(self, "_connected"):
try:
proxy = self._get_proxy()
await proxy.state()
self._connected = True
except (PyTango.DevFailed, PyTango.ConnectionFailed):
self._connected = False
return self._connected
def _get_info(self):
"""This method fetch all the information of a device."""
if not hasattr(self, "_info"):
self._info = db.get_device_info(self.name)
return self._info
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
功能实现:查找给定列表中满足所提供的测试函数的最后一个元素的索引。
解读:
使用列表推导式、enumerate()和next()返回lst中fn返回True的最后一个元素的索引。
"""
def find_last_index(lst, fn):
return len(lst) - 1 - next(i for i, x in enumerate(lst[::-1]) if fn(x))
# Examples
print(find_last_index([1, 2, 3, 4], lambda n: n % 2 == 1))
# output:
# 2
|
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext as _
import moderation
def _get_content_types(self):
content_types = []
for model in sorted(moderation.moderation._registered_models.keys(),
key=lambda obj: obj.__name__):
content_types.append(ContentType.objects.get_for_model(model))
return content_types
try:
from django.contrib.admin.filters import FieldListFilter
except ImportError:
pass
else:
#Django 1.4 filter
class RegisteredContentTypeListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
self._types = _get_content_types(self)
super(RegisteredContentTypeListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg]
def choices(self, cl):
yield {
'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')
}
for type in self._types:
yield {
'selected': smart_unicode(type.id) == self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: type.id}),
'display': unicode(type),
}
try:
from django.contrib.admin.filterspecs import FilterSpec, RelatedFilterSpec
except ImportError:
pass
else:
#Django 1.3 Filterspec
#Untested.
class ContentTypeFilterSpec(RelatedFilterSpec):
def __init__(self, *args, **kwargs):
super(ContentTypeFilterSpec, self).__init__(*args, **kwargs)
self.content_types = _get_content_types()
self.lookup_choices = [(ct.id, ct.name.capitalize())\
for ct in self.content_types]
FilterSpec.filter_specs.insert(0, (lambda f: getattr(f, 'content_type_filter',
False),
ContentTypeFilterSpec))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""mz3 module
This module is reformatted from https://github.com/neurolabusc/surf-ice/blob/master/mz3/mz3.py
"""
__all__ = ['load_mz3_mesh']
##====================================================================================
## dependent libraries
##====================================================================================
import struct
import gzip
import numpy as np
import os
##====================================================================================
##
##====================================================================================
def read(fnm, isVerbose):
invalid_mz3 = (None, None, None, None)
faces = []
verts = []
rbga = []
scalar = []
with open(fnm, 'rb') as f:
MAGIC = np.fromfile(f, '<u2', 1)[0]
isGz = False
if MAGIC != 23117: # incorrect magic: assume gzip
isGz = True
fz = bytearray(gzip.open(fnm, 'r').read())
MAGIC = np.frombuffer(fz, '<u2', 1, 0)[0]
if MAGIC != 23117:
print('Not a valid MZ3 file')
return invalid_mz3
ATTR = np.frombuffer(fz, '<u2', 1, 2)[0]
NFACE = np.frombuffer(fz, '<u4', 1, 4)[0]
NVERT = np.frombuffer(fz, '<u4', 1, 8)[0]
NSKIP = np.frombuffer(fz, '<u4', 1, 16)[0]
isFACE = ATTR & 1 != 0
isVERT = ATTR & 2 != 0
isRGBA = ATTR & 4 != 0
isSCALAR = ATTR & 8 != 0
# quit if file does not make sense
if ATTR > 15:
print('Unable to read future version of MZ3 file')
return invalid_mz3
if NVERT < 1:
print('Unable to read MZ3 files without vertices')
return invalid_mz3
if (NFACE < 1) & isFACE:
print('MZ3 files with isFACE must specify NFACE')
return invalid_mz3
pos = 16 + NSKIP # data
if isFACE: # each face is 3 UINT32 vertex indices
faces = np.frombuffer(fz, '<u4', NFACE * 3, pos)
pos += NFACE * 12
if isVERT: # each vertex is 3 FLOAT32 (xyz)
verts = np.frombuffer(fz, '<f4', NVERT * 3, pos)
pos += NVERT * 12
if isRGBA: # each vertex has UINT32 RGBA value
rbga = np.frombuffer(fz, '<u4', NVERT, pos)
pos += NVERT * 4
if isSCALAR:
fz.seek(0, os.SEEK_END)
NSCALAR = np.floor((len(fz) - pos) / (NVERT
* 4)).astype(int)
verts = np.frombuffer(fz, '<f4', NVERT * NSCALAR, pos)
pos += NVERT * NSCALAR * 4
else:
# read attributes
ATTR = np.fromfile(f, '<u2', 1)[0]
NFACE = np.fromfile(f, '<u4', 1)[0]
NVERT = np.fromfile(f, '<u4', 1)[0]
NSKIP = np.fromfile(f, '<u4', 1)[0]
isFACE = ATTR & 1 != 0
isVERT = ATTR & 2 != 0
isRGBA = ATTR & 4 != 0
isSCALAR = ATTR & 8 != 0
# quit if file does not make sense
if ATTR > 15:
print('Unable to read future version of MZ3 file')
return invalid_mz3
if NVERT < 1:
print('Unable to read MZ3 files without vertices')
return invalid_mz3
if (NFACE < 1) & isFACE:
print('MZ3 files with isFACE must specify NFACE')
return invalid_mz3
pos = 16 + NSKIP # data
if NSKIP > 0: # skip bytes
skip = np.fromfile(f, '<u8', NSKIP)
if isFACE: # each face is 3 UINT32 vertex indices
faces = np.fromfile(f, '<u4', NFACE * 3)
pos += NFACE * 12
if isVERT: # each vertex is 3 FLOAT32 (xyz)
verts = np.fromfile(f, '<f4', NVERT * 3)
pos += NVERT * 12
if isRGBA: # each vertex has UINT32 RGBA value
rbga = np.fromfile(f, '<u4', NVERT)
pos += NVERT * 4
if isSCALAR:
f.seek(0, os.SEEK_END)
NSCALAR = np.floor((f.tell() - pos) / (NVERT
* 4)).astype(int)
verts = np.fromfile(f, '<f4', NVERT * NSCALAR)
pos += NVERT * NSCALAR * 4
# Optional verbose reporting
# report contents
if isVerbose:
print('MAGIC %d ATTR %d' % (MAGIC, ATTR))
print('NFACE %d NVERT %d NSKIP %d' % (NFACE, NVERT, NSKIP))
print(' isFACE %r isVERT %r' % (isFACE, isVERT))
print(' isRGBA %r isSCALAR %r' % (isRGBA, isSCALAR))
if isVerbose & (len(faces) > 0):
NFACE = len(faces) // 3
j = 0
for i in range(NFACE):
print('%d face %d %d %d' % (i, faces[j], faces[j + 1],
faces[j + 2]))
j = j + 3
if isVerbose & (len(verts) > 0):
NVERT = len(verts) // 3
j = 0
for i in range(NVERT):
print('%d vert %g %g %g' % (i, verts[j], verts[j + 1],
verts[j + 2]))
j = j + 3
if isVerbose & (len(rbga) > 0):
for i in range(len(rbga)):
rgba = struct.unpack('4B', struct.pack('I', rbga[i]))
print('%d rgba %d %d %d %d' % (i, rgba[0], rgba[1],
rgba[2], rgba[3]))
if isVerbose & (len(scalar) > 0):
for i in range(len(scalar)):
print('%d scalar %g' % (i, scalar[i]))
return (faces, verts, rbga, scalar)
def load_mz3_mesh(filepath, isVerbose):
(faces, verts, rbga, scalar) = read(filepath, isVerbose)
if verts is None:
print('Invalid file')
return
return (verts, faces)
|
#------------------------------------------------------------------------------
# Copyright (c) 2007, Riverbank Computing Limited
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Riverbank Computing Limited
# Description: <Enthought permissions package component>
#------------------------------------------------------------------------------
# Enthought library imports.
from traits.etsconfig.api import ETSConfig
from traitsui.api import Handler
# Local imports.
from .adapter_base import AdapterBase
from .package_globals import get_permissions_manager
# Register the bundled adapters.
from .adapters import pyface_action
if ETSConfig.toolkit == 'wx':
from .adapters import wx_window
elif ETSConfig.toolkit == 'qt4':
from .adapters import qt4_widget
class SecureProxy(object):
"""The SecureProxy class is a wrapper for an object whose enabled and
visible states can be managed. It attaches one or more permissions to the
object and enables and shows the object only if those permissions allow it.
In all other respects it behaves exactly like the object. It is based on
Tomer Filiba's Object Proxy cookbook recipe."""
__slots__ = ('_ets', '__weakref__')
def __init__(self, proxied, permissions, show=True):
"""Initialise the instance. proxied is the object whose enabled and
visible states are managed according to the permissions of the current
user. permissions is a list of permissions to attach to the object.
show is set if the proxied object should be visible when it is
disabled."""
adapter = object.__getattribute__(self, '_ets')
# Correct the current values.
if not show:
adapter.update_visible(adapter.get_visible())
adapter.update_enabled(adapter.get_enabled())
# Proxying (special cases).
def __getattribute__(self, name):
return getattr(object.__getattribute__(self, '_ets').proxied, name)
def __delattr__(self, name):
delattr(object.__getattribute__(self, '_ets').proxied, name)
def __setattr__(self, name, value):
object.__getattribute__(self, '_ets').setattr(name, value)
def __nonzero__(self):
return bool(object.__getattribute__(self, '_ets').proxied)
def __str__(self):
return str(object.__getattribute__(self, '_ets').proxied)
def __repr__(self):
return repr(object.__getattribute__(self, '_ets').proxied)
# Factories.
_special_names = [
'__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
'__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__',
'__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__',
'__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
'__idiv__', '__idivmod__', '__ifloordiv__', '__ilshift__', '__imod__',
'__imul__', '__int__', '__invert__', '__ior__', '__ipow__',
'__irshift__', '__isub__', '__iter__', '__itruediv__', '__ixor__',
'__le__', '__len__', '__long__', '__lshift__', '__lt__', '__mod__',
'__mul__', '__ne__', '__neg__', '__oct__', '__or__', '__pos__',
'__pow__', '__radd__', '__rand__', '__rdiv__', '__rdivmod__',
'__reduce__', '__reduce_ex__', '__repr__', '__reversed__',
'__rfloorfiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__',
'__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__',
'__rxor__', '__setitem__', '__setslice__', '__sub__', '__truediv__',
'__xor__', 'next',
]
@classmethod
def _ets_class_proxy(cls, theclass):
"""Creates a proxy for the given class."""
def make_method(name):
def method(self, *args, **kw):
return getattr(object.__getattribute__(self, '_ets').proxied, name)(*args, **kw)
return method
namespace = {}
for name in cls._special_names:
if hasattr(theclass, name) and not hasattr(cls, name):
namespace[name] = make_method(name)
return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,),
namespace)
def __new__(cls, proxied, permissions, show=True):
"""Apply a set of permissions to an object. This may be done by
creating a proxy or by modifying the object in situ depending on its
type.
"""
# Create the adapter.
adapter = AdapterBase.factory(proxied, permissions, show)
# Try and adapt the object itself.
adapted = adapter.adapt()
if adapted is None:
# Create a wrapper for the object. The cache is unique per
# deriving class to ensure there are no clashes with any
# sub-classes with the same name.
try:
cache = cls.__dict__['_ets_cache']
except KeyError:
cls._ets_cache = cache = {}
pclass = proxied.__class__
try:
theclass = cache[pclass]
except KeyError:
cache[pclass] = theclass = cls._ets_class_proxy(pclass)
adapted = object.__new__(theclass)
# Save the adapter in the adapted object.
object.__setattr__(adapted, '_ets', adapter)
else:
# Correct the current values.
if not show:
adapter.update_visible(adapter.get_visible())
adapter.update_enabled(adapter.get_enabled())
# Save the adapter in the adapted object.
adapted._ets = adapter
return adapted
class SecureHandler(Handler):
"""The SecureHandler class is a sub-class of the TraitsUI Handler class
that ensures that the enabled and visible state of the items of a TraitsUI
view are updated when the user's authorisation state changes.
"""
def __init__(self, **traits):
"""Initialise the object."""
super(SecureHandler, self).__init__(**traits)
get_permissions_manager().user_manager.on_trait_event(self._refresh,
'user_authenticated')
def init_info(self, info):
"""Reimplemented to save the UIInfo object."""
self._info = info
def _refresh(self):
"""Invoked whenever the current user's authorisation state changes."""
# FIXME: This is (currently) an internal method.
self._info.ui._evaluate_when()
|
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import rospy
class TLClassifier(object):
def __init__(self, path_to_frozen_path):
self._detection_graph = tf.Graph()
with self._detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(path_to_frozen_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
@staticmethod
def _load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#image = self._load_image_into_numpy_array(image)
with self._detection_graph.as_default():
with tf.Session() as sess:
ops = tf.get_default_graph().get_operations()
all_tensor_names = { output.name for op in ops for output in op.outputs }
tensor_dict = {}
# get all tensors graph
for key in [
'num_detections', 'detection_boxes',
'detection_scores', 'detection_classes',
'detection_masks'
]:
tensor_name = key + ":0"
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
# rospy.logwarn(tensor_dict)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
scores = output_dict['detection_scores'][0]
rospy.logwarn(scores)
idx = np.where(np.array(scores[0:3]) > 0.10)
if len(idx) == 0:
idx = -1
else:
idx = int(output_dict['detection_classes'][idx[0][0]][0])
#rospy.logwarn(idx)
return {1: TrafficLight.RED, 2: TrafficLight.GREEN, 3: TrafficLight.YELLOW, -1: TrafficLight.UNKNOWN}[idx]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.