content
stringlengths 5
1.05M
|
|---|
"""Log likelihood constructors: the heart of blueice.
"""
from collections import OrderedDict
from copy import deepcopy
from functools import wraps
import numpy as np
from multihist import Histdd
from scipy import stats
from scipy.special import gammaln
from tqdm import tqdm
from .exceptions import NotPreparedException, InvalidParameterSpecification, InvalidParameter
from .model import Model
from .parallel import create_models_ipyparallel, compute_many
from .pdf_morphers import MORPHERS
from .utils import combine_dicts, inherit_docstring_from
from . import inference
__all__ = ['LogLikelihoodBase', 'BinnedLogLikelihood', 'UnbinnedLogLikelihood', 'LogLikelihoodSum']
##
# Decorators for methods which have to be run after prepare or set_data
##
def _needs_preparation(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if not self.is_prepared:
if not len(self.shape_parameters):
# preparation is going to be trivial, just do it
self.prepare()
else:
raise NotPreparedException("%s requires you to first prepare the likelihood function using prepare()" %
f.__name__)
return f(self, *args, **kwargs)
return wrapper
def _needs_data(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if not self.is_data_set:
raise NotPreparedException("%s requires you to first set the data using set_data()" % f.__name__)
return f(self, *args, **kwargs)
return wrapper
class LogLikelihoodBase:
"""Log likelihood function with several rate and/or shape parameters
likelihood_config options:
unphysical_behaviour
outlier_likelihood
parallelize_models: True (default) or False
block_during_paralellization: True or False (default)
"""
def __init__(self, pdf_base_config, likelihood_config=None, **kwargs):
"""
:param pdf_base_config: dictionary with configuration passed to the Model
:param likelihood_config: dictionary with options for LogLikelihood itself
:param kwargs: Overrides for pdf_base_config, not likelihood config!
:return:
"""
self.pdf_base_config = combine_dicts(pdf_base_config, kwargs, deep_copy=True)
if likelihood_config is None:
likelihood_config = {}
self.config = likelihood_config
self.config.setdefault('morpher', 'GridInterpolator')
# Base model: no variations of any settings
self.base_model = Model(self.pdf_base_config)
self.source_name_list = [s.name for s in self.base_model.sources]
self.source_allowed_negative = [s.config.get("allow_negative",False)
for s in self.base_model.sources]
self.source_apply_efficiency = np.array([s.config.get("apply_efficiency", False)
for s in self.base_model.sources])
self.source_efficiency_names = np.array([s.config.get("efficiency_name", "efficiency")
for s in self.base_model.sources])
self.rate_parameters = OrderedDict() # sourcename_rate -> logprior
self.shape_parameters = OrderedDict() # settingname -> (anchors, logprior, base_z).
# where anchors is dict: representative number -> actual setting
# From here on representative number will be called 'z-score'.
# base_value is the default z-score that will be used.
# We'll take care of sorting the keys in self.prepare()
self.is_prepared = False
self.is_data_set = False
self._has_non_numeric = False
# If there are NO shape parameters:
self.ps = None # ps of the data
# If there are shape parameters:
self.anchor_models = OrderedDict() # dictionary mapping model zs -> actual model
# Interpolators created by morphers. These map zs to...
self.mus_interpolator = None # rates for each source
self.ps_interpolator = None # (source, event) p-values (unbinned), or pmf grid (binned)
# number of events per bin observed in Monte Carlo / calibration data that gave rise to the model.
self.n_model_events_interpolator = lambda x: None
self.n_model_events = None
def prepare(self, n_cores=1, ipp_client=None):
"""Prepares a likelihood function with shape parameters for use.
This will compute the models for each shape parameter anchor value combination.
"""
if len(self.shape_parameters):
self.morpher = MORPHERS[self.config['morpher']](self.config.get('morpher_config', {}),
self.shape_parameters)
zs_list = self.morpher.get_anchor_points(bounds=self.get_bounds())
# Create the configs for each new model
configs = []
for zs in zs_list:
config = deepcopy(self.pdf_base_config)
for i, (setting_name, (anchors, _, _)) in enumerate(self.shape_parameters.items()):
# Translate from zs to settings using the anchors dict. Maybe not all settings are numerical.
config[setting_name] = anchors[zs[i]]
if ipp_client is None and n_cores != 1:
# We have to compute in parallel: must have delayed computation on
config['delay_pdf_computation'] = True
configs.append(config)
# Create the new models
if n_cores == 1:
models = [Model(c) for c in tqdm(configs, desc="Computing/loading models on one core")]
elif ipp_client is not None:
models = create_models_ipyparallel(configs, ipp_client,
block=self.config.get('block_during_paralellization', False))
else:
models = [Model(c) for c in tqdm(configs, desc="Preparing model computation tasks")]
hashes = set()
for m in models:
for s in m.sources:
hashes.add(s.hash)
compute_many(hashes, n_cores)
# Reload models so computation takes effect
models = [Model(c) for c in tqdm(configs, desc="Loading computed models")]
# Add the new models to the anchor_models dict
for zs, model in zip(zs_list, models):
self.anchor_models[tuple(zs)] = model
# Build the interpolator for the rates of each source.
self.mus_interpolator = self.morpher.make_interpolator(f=lambda m: m.expected_events(),
extra_dims=[len(self.source_name_list)],
anchor_models=self.anchor_models)
self.is_data_set = False
self.is_prepared = True
@_needs_preparation
def set_data(self, d):
"""Prepare the dataset d for likelihood function evaluation
:param d: Dataset, must be an indexable object that provides the measurement dimensions
For example, if your models are on 's1' and 's2', d must be something for which d['s1'] and d['s2'] give
the s1 and s2 values of your events as numpy arrays.
"""
self._data = d
self.is_data_set = True
def add_rate_parameter(self, source_name, log_prior=None):
"""Add a rate parameter names source_name + "_rate_multiplier" to the likelihood function..
The values of this parameter will MULTIPLY the expected rate of events for the source.
The rates of sources can also vary due to shape parameters.
:param source_name: Name of the source for which you want to vary the rate
:param log_prior: prior logpdf function on rate multiplier (not on rate itself!)
"""
self.rate_parameters[source_name] = log_prior
def add_shape_parameter(self, setting_name, anchors, log_prior=None, base_value=None):
"""Add a shape parameter to the likelihood function
:param setting_name: Name of the setting to vary
:param anchors: a list/tuple/array of setting values (if they are numeric)
OR a dictionary with some numerical value -> setting values (for non-numeric settings).
:param base_value: for non-numeric settings, the number which represents the base model value of the setting.
For example, if you have LCE maps with varying reflectivities, use
add_shape_variation('s1_relative_ly_map', {0.98: 'lce_98%.pklz', 0.99: 'lce_99%.pklz, ...})
then the argument s1_relative_ly_map of the likelihood function takes values between 0.98 and 0.99.
"""
is_numeric = isinstance(self.pdf_base_config.get(setting_name), (float, int))
if not isinstance(anchors, dict):
# Convert anchors list to a dictionary
if not is_numeric:
raise InvalidParameterSpecification("When specifying anchors only by setting values, "
"base setting must have a numerical default.")
anchors = {z: z for z in anchors}
if not is_numeric:
self._has_non_numeric = True
if not is_numeric and base_value is None:
raise InvalidParameterSpecification("For non-numeric settings, you must specify what number will represent "
"the default value (the base model setting)")
if is_numeric and base_value is not None:
raise InvalidParameterSpecification("For numeric settings, base_value is an unnecessary argument.")
self.shape_parameters[setting_name] = (anchors, log_prior, base_value)
def get_bounds(self, parameter_name=None):
"""Return bounds on the parameter parameter_name"""
if parameter_name is None:
return [self.get_bounds(p) for p in self.shape_parameters.keys()]
if parameter_name in self.shape_parameters:
anchor_settings = list(self.shape_parameters[parameter_name][0].keys())
return min(anchor_settings), max(anchor_settings)
elif parameter_name.endswith('_rate_multiplier'):
for source_name, allow_negative in zip(self.source_name_list,self.source_allowed_negative):
if parameter_name.startswith(source_name) and allow_negative==True:
return float('-inf'), float('inf')
return 0, float('inf')
else:
raise InvalidParameter("Non-existing parameter %s" % parameter_name)
@_needs_data
def __call__(self, livetime_days=None, compute_pdf=False, full_output=False, **kwargs):
"""Evaluate the likelihood function. Pass any values for parameters as keyword arguments.
For values not passed, their base values will be assumed.
For rate uncertainties, pass sourcename_rate_multiplier.
:param lifetime_days: lifetime in days to use, will affect rates of all sources.
:param full_output: instead of returning just the loglikelihood, return also the adjusted mus and ps as well.
:param compute_pdf: compute new PDFs instead of interpolating the PDF at the requested parameters.
"""
result = 0
rate_multipliers, shape_parameter_settings = self._kwargs_to_settings(**kwargs)
if len(self.shape_parameters):
if compute_pdf:
if self._has_non_numeric:
raise NotImplementedError("compute_pdf only works for numerical values")
mus, ps, n_model_events = self._compute_single_pdf(**kwargs)
else:
# We can use the interpolators. They require the settings to come in order:
zs = []
for setting_name, (_, log_prior, _) in self.shape_parameters.items():
z = shape_parameter_settings[setting_name]
zs.append(z)
# Test if the z value is out of range; if so, return -inf (since we can't extrapolate)
minbound, maxbound = self.get_bounds(setting_name)
if not minbound <= z <= maxbound:
return -float('inf')
if log_prior is not None:
result += log_prior(z)
# The RegularGridInterpolators want numpy arrays: give it to them...
zs = np.asarray(zs)
mus = self.mus_interpolator(zs)
ps = self.ps_interpolator(zs)
n_model_events = self.n_model_events_interpolator(zs)
else:
# No shape parameters
mus = self.base_model.expected_events()
ps = self.ps
n_model_events = self.n_model_events
# Apply the rate multipliers
for source_i, source_name in enumerate(self.source_name_list):
mult = rate_multipliers[source_i]
mus[source_i] *= mult
log_prior = self.rate_parameters.get(source_name, None)
if log_prior is not None:
result += log_prior(mult)
# Apply the lifetime scaling
if livetime_days is not None:
mus *= livetime_days / self.pdf_base_config['livetime_days']
# Apply efficiency to those sources that use it:
if True in self.source_apply_efficiency:
effs = []
for sae, sen in zip(self.source_apply_efficiency,
self.source_efficiency_names):
if sae:
# if that particular efficiency is not in the shape parameters, apply 1
effs.append(shape_parameter_settings.get(sen, 1))
mus[self.source_apply_efficiency] *= np.array(effs)
# Check for negative rates. Depending on the config, either error or return -float('inf') as loglikelihood
# If any source is allowed to be negative, check the sources one by one
if not any(self.source_allowed_negative):
if not np.all((mus >= 0) & (mus < float('inf'))):
if self.config.get('unphysical_behaviour') == 'error':
raise ValueError("Unphysical rates: %s" % str(mus))
else:
return -float('inf')
else:
if (not any(mus < float('inf'))) or (np.sum(mus) < 0):
if self.config.get('unphysical_behaviour') == 'error':
raise ValueError("Unphysical rates: %s" % str(mus))
else:
return -float('inf')
for mu,allowed_negative in zip(mus,self.source_allowed_negative):
if not (0 <= mu) and (not allowed_negative):
if self.config.get('unphysical_behaviour') == 'error':
raise ValueError("Unphysical rates: %s" % str(mus))
else:
return -float('inf')
# Perform fits to background calibration data if needed:
# Currently only performed (analytically) for Binned likelihood via the Beeston-Barlow method
mus, ps = self.adjust_expectations(mus, ps, n_model_events)
# Get the loglikelihood. At last!
result += self._compute_likelihood(mus, ps)
if full_output:
return result, mus, ps
else:
return result
def adjust_expectations(self, mus, ps, n_model_events):
"""Adjust uncertain (mus, pmfs) based on the observed data.
If the density is derived from a finite-statistics sample (n_model_events array of events per bin),
we can take into account this uncertainty by modifying the likelihood function.
For a binned likelihood, this means adding the expected number of events for each bin for each source as
nuisance parameters constrained by Poisson terms around the number of events observed in the model.
While these nuisance parameters could be optimized numerically along with the main parameters,
for a given value of the main parameters these per-bin nuisance parameters can often be estimated analytically,
as shown by Beeston & Barlow (1993).
"""
return mus, ps
def _kwargs_to_settings(self, **kwargs):
"""Return shape parameters, rate_multipliers from kwargs.
shape_parmeters is a dict mapping setting name -> value | representative number
rate_multipliers is a list of rate multipliers for each source in self.source_name_list
"""
# Validate the kwargs: must be either shape parameters, or <known_source>_rate_multiplier
for k in kwargs.keys():
if k in self.shape_parameters:
continue
if k.endswith('_rate_multiplier'):
s_name = k[:-16]
if s_name in self.source_name_list:
continue
raise InvalidParameter("%s is not a known shape or rate parameter!" % k)
shape_parameter_settings = dict()
for setting_name, (_, _, base_value) in self.shape_parameters.items():
z = kwargs.get(setting_name)
if z is None:
# Parameter was not given: get the default value of (the number representing) this shape parameter
base_setting = self.pdf_base_config.get(setting_name)
is_numeric = isinstance(base_setting, (float, int))
if is_numeric:
assert base_value is None
z = base_setting
else:
z = base_value
if not isinstance(z, (int, float)):
raise ValueError("Arguments to likelihood function must be numeric, not %s" % type(z))
shape_parameter_settings[setting_name] = z
rate_multipliers = []
for source_i, source_name in enumerate(self.source_name_list):
rate_multipliers.append(kwargs.get(source_name + '_rate_multiplier', 1))
return rate_multipliers, shape_parameter_settings
##
# Convenience functions for uncertainties.
# Adding more general priors is the user's responsibility
# (either provide prior argument to add_x_parameter, or wrap the loglikelihood function)
##
def add_rate_uncertainty(self, source_name, fractional_uncertainty):
"""Adds a rate parameter to the likelihood function with Gaussian prior"""
self.add_rate_parameter(source_name, log_prior=stats.norm(1, fractional_uncertainty).logpdf)
def add_shape_uncertainty(self, setting_name, fractional_uncertainty, anchor_zs=(-2, -1, 0, 1, 2), base_value=None):
"""Adds a shape parameter to the likelihood function, with Gaussian prior around the default value.
:param fractional_uncertainty: Relative uncertainty on the default value.
Other parameters as in add_shape_parameter.
"""
# Call add_shape_parameter without a prior first, then inject the prior later.
# It's a bit of a hack, but there is some validation / default-setting code for base_value we don't want to
# replicate.
self.add_shape_parameter(setting_name, anchor_zs, base_value=base_value)
anchors, log_prior, base_value = self.shape_parameters[setting_name]
self.shape_parameters[setting_name] = (anchors,
stats.norm(base_value, base_value * fractional_uncertainty).logpdf,
base_value)
def _compute_single_model(self, **kwargs):
"""Return a model formed using the base config, using kwargs as overrides"""
_, shape_parameter_settings = self._kwargs_to_settings(**kwargs)
config = combine_dicts(self.pdf_base_config, shape_parameter_settings, deep_copy=True)
config['never_save_to_cache'] = True
return Model(config, **shape_parameter_settings)
##
# Methods to override
##
def _compute_likelihood(self, *args, **kwargs):
raise NotImplementedError
def _compute_single_pdf(self, **kwargs):
"""Return likelihood arguments for a single newly computed model,
formed using the base config, using kwargs as overrides.
Returns mus, ps, n_model_events
"""
raise NotImplementedError
class UnbinnedLogLikelihood(LogLikelihoodBase):
@inherit_docstring_from(LogLikelihoodBase)
def set_data(self, d):
LogLikelihoodBase.set_data(self, d)
if len(self.shape_parameters):
self.ps_interpolator = self.morpher.make_interpolator(f=lambda m: m.score_events(d),
extra_dims=[len(self.source_name_list), len(d)],
anchor_models=self.anchor_models)
else:
self.ps = self.base_model.score_events(d)
@inherit_docstring_from(LogLikelihoodBase)
def _compute_single_pdf(self, **kwargs):
model = self._compute_single_model(**kwargs)
mus = model.expected_events()
ps = model.score_events(self._data)
return mus, ps, None
def _compute_likelihood(self, mus, pdf_values_at_events):
return extended_loglikelihood(mus, pdf_values_at_events,
outlier_likelihood=self.config.get('outlier_likelihood', 1e-12))
class BinnedLogLikelihood(LogLikelihoodBase):
def __init__(self, pdf_base_config, likelihood_config=None, **kwargs):
LogLikelihoodBase.__init__(self, pdf_base_config, likelihood_config, **kwargs)
pdf_base_config['pdf_interpolation_method'] = 'piecewise'
self.model_statistical_uncertainty_handling = self.config.get('model_statistical_uncertainty_handling')
@inherit_docstring_from(LogLikelihoodBase)
def prepare(self, *args):
LogLikelihoodBase.prepare(self, *args)
self.ps, self.n_model_events = self.base_model.pmf_grids()
if len(self.shape_parameters):
self.ps_interpolator = self.morpher.make_interpolator(f=lambda m: m.pmf_grids()[0],
extra_dims=list(self.ps.shape),
anchor_models=self.anchor_models)
if self.model_statistical_uncertainty_handling is not None:
self.n_model_events_interpolator = self.morpher.make_interpolator(f=lambda m: m.pmf_grids()[1],
extra_dims=list(self.ps.shape),
anchor_models=self.anchor_models)
@inherit_docstring_from(LogLikelihoodBase)
def set_data(self, d):
LogLikelihoodBase.set_data(self, d)
# Bin the data in the analysis space
dimnames, bins = zip(*self.base_model.config['analysis_space'])
self.data_events_per_bin = Histdd(bins=bins, axis_names=dimnames)
self.data_events_per_bin.add(*self.base_model.to_analysis_dimensions(d))
@inherit_docstring_from(LogLikelihoodBase)
def _compute_single_pdf(self, **kwargs):
model = self._compute_single_model(**kwargs)
mus = model.expected_events()
ps, n_model_events = model.pmf_grids()
return mus, ps, n_model_events
@_needs_data
@inherit_docstring_from(LogLikelihoodBase)
def adjust_expectations(self, mus, pmfs, n_model_events):
# Avoid mutating the arrays we're also going to return
mus = mus.copy()
pmfs = pmfs.copy()
if self.model_statistical_uncertainty_handling == 'bb_single':
source_i = self.config.get('bb_single_source')
if source_i is None:
raise ValueError("You need to specify bb_single_source to use bb_single_source expectation adjustment")
source_i = self.base_model.get_source_i(source_i)
assert pmfs.shape == n_model_events.shape
# Get the number of events expected for the sources we will NOT adjust
counts_per_bin = pmfs.copy()
for i, (mu, _x) in enumerate(zip(mus, counts_per_bin)):
if i != source_i:
_x *= mu
else:
_x *= 0.
u_bins = np.sum(counts_per_bin, axis=0)
a_bins = n_model_events[source_i]
p_calibration = mus[source_i] / n_model_events[source_i].sum()
w_calibration = pmfs[source_i] / a_bins * n_model_events[source_i].sum()
A_bins_1, A_bins_2 = beeston_barlow_roots(a_bins, w_calibration * p_calibration, u_bins, self.data_events_per_bin.histogram)
assert np.all(A_bins_1 <= 0) # it seems(?) the 1st root is always negative
# For U=0, the solution above is singular; we need to use a special case instead
A_bins_special = (self.data_events_per_bin.histogram + a_bins) / (1. + p_calibration)
A_bins = np.choose(u_bins == 0, [A_bins_2, A_bins_special])
assert np.all(0 <= A_bins)
pmfs[source_i] = A_bins * w_calibration
pmfs[source_i] /= pmfs[source_i].sum()
mus[source_i] = (A_bins * w_calibration).sum() * p_calibration
return mus, pmfs
def _compute_likelihood(self, mus, pmfs):
"""Return binned Poisson log likelihood
:param mus: numpy array with expected rates for each source
:param pmfs: array (sources, *analysis_space) of PMFs for each source in each bin
"""
expected_counts = pmfs.copy()
for mu, _p_bin_source in zip(mus, expected_counts):
_p_bin_source *= mu # Works because of numpy view magic...
expected_total = np.sum(expected_counts, axis=0)
observed_counts = self.data_events_per_bin.histogram
ret = observed_counts * np.log(expected_total) - expected_total - gammaln(observed_counts + 1.).real
return np.sum(ret)
def extended_loglikelihood(mu, ps, outlier_likelihood=0.0):
"""Evaluate an extended unbinned likelihood function
:param mu: array of n_sources: expected number of events
:param ps: array of (n_sources, n_events): pdf value for each source and event
:param outlier_likelihood: if an event has p=0, give it this likelihood (instead of 0, which makes the whole
loglikelihood infinite)
:return: ln(likelihood)
"""
p_events = np.nansum(mu[:, np.newaxis] * ps, axis=0)
if outlier_likelihood != 0:
# Replace all likelihoods which are not positive numbers (i.e. 0, negative, or nan) with outlier_likelihood
p_events[True ^ (p_events > 0)] = outlier_likelihood
return -mu.sum() + np.sum(np.log(p_events))
def beeston_barlow_root1(a, p, U, d):
"""Solution to the Beeston-Barlow equations for a single finite-statics source and several infinite-statistics
sources. This is the WRONG root, as far as we can tell -- DO NOT USE IT!!
We retained it only to keep checking that it is the wrong root. It will be removed soon, when we are more confident.
"""
return ((-U*p - U + a*p + d*p -
np.sqrt(U**2*p**2 + 2*U**2*p + U**2 + 2*U*a*p**2 + 2*U*a*p -
2*U*d*p**2 - 2*U*d*p + a**2*p**2 + 2*a*d*p**2 + d**2*p**2))/(2*p*(p + 1)))
def beeston_barlow_root2(a, p, U, d):
"""Solution to the Beeston-Barlow equations for a single finite-statics source and several infinite-statistics
sources. This is the 'right' root, as far as we can tell anyway."""
return ((-U*p - U + a*p + d*p +
np.sqrt(U**2*p**2 + 2*U**2*p + U**2 + 2*U*a*p**2 + 2*U*a*p -
2*U*d*p**2 - 2*U*d*p + a**2*p**2 + 2*a*d*p**2 + d**2*p**2))/(2*p*(p + 1)))
def beeston_barlow_roots(a, p, U, d):
return beeston_barlow_root1(a, p, U, d), beeston_barlow_root2(a, p, U, d)
class LogLikelihoodSum(object):
"""Class that takes a list of likelihoods to be minimized together, and
provides an interface to the inference methods and evaluation similar to likelihoods.
Note that the pfd_base_config is a bit of a fudge; only storing guesses from the last likelihood.
As different guesses for different likelihoods should be a cause for concern, the safest method is to pass
manual guesses to the minimization.
The likelihood_weights allows you to weight terms in the likelihood-- useful for Asimov estimation (I think) as
well as if you have multiple subvolumes, in which case each subvolume constraint term gets a (1/N) weight to
avoid overconstraining the total llh
"""
def __init__(self, likelihood_list, likelihood_weights=None):
self.likelihood_list = []
self.rate_parameters = dict()
self.shape_parameters = dict()
self.source_list = [] # DOES NOT EXIST IN LF!
# in order to pass to confidence interval:
self.pdf_base_config = {} # might also have to be fudged
# These weights are useful if you need to split the constraints
# among multiple sub-volumes in analysis space.
self.likelihood_weights = likelihood_weights
if likelihood_weights is None:
self.likelihood_weights = [1 for _ in likelihood_list]
self.likelihood_parameters = []
for ll in likelihood_list:
self.likelihood_list.append(ll)
self.rate_parameters.update(ll.rate_parameters)
self.shape_parameters.update(ll.shape_parameters)
parameter_names = []
for rate_parameter_name in ll.rate_parameters.keys():
parameter_names.append(rate_parameter_name + '_rate_multiplier')
base_value = ll.pdf_base_config.get(rate_parameter_name)
if base_value is not None:
self.pdf_base_config[rate_parameter_name] = base_value
for shape_parameter_name in ll.shape_parameters.keys():
parameter_names.append(shape_parameter_name)
base_value = ll.pdf_base_config.get(shape_parameter_name)
if base_value is not None:
self.pdf_base_config[shape_parameter_name] = base_value
self.likelihood_parameters.append(parameter_names)
def __call__(self, compute_pdf=False, livetime_days=None, **kwargs):
ret = 0.
for i, (ll, parameter_names, ll_weight) in enumerate(
zip(self.likelihood_list,
self.likelihood_parameters,
self.likelihood_weights)):
pass_kwargs = {k: v
for k, v in kwargs.items()
if k in parameter_names}
livetime = livetime_days
if isinstance(livetime_days, list):
livetime = livetime_days[i]
ret += ll_weight * ll(compute_pdf=compute_pdf,
livetime_days=livetime,
**pass_kwargs)
return ret
def split_results(self, result_dict):
ret = []
for i,parameter_names in enumerate(self.likelihood_parameters):
ret.append({k: v for k, v in result_dict.items() if k in parameter_names})
return ret
def get_bounds(self, parameter_name=None):
"""Return bounds on the parameter parameter_name,"""
if parameter_name is None:
return [self.get_bounds(p)
for p in self.shape_parameters]
if parameter_name in self.shape_parameters.keys():
bounds = []
for ll in self.likelihood_list:
if parameter_name in ll.shape_parameters.keys():
bounds.append(ll.get_bounds(parameter_name))
bounds = np.array(bounds)
ret= np.max(bounds[:,0]), np.min(bounds[:,1])
if ret[1] <= ret[0]:
raise InvalidParameterSpecification("lower bound %s higher than upper bound!" % parameter_name)
return ret
elif parameter_name.endswith('_rate_multiplier'):
return 0, float('inf')
else:
raise InvalidParameter("Non-existing parameter %s" % parameter_name)
class LogAncillaryLikelihood(object):
"""Ancillary (constraint) analytical likelihoods"""
def __init__(self, func, parameter_list, config=None, func_kwargs=None):
"""
:param func: python function taking an _OrderedDict_ of (named) input values, plus func_kwargs extra arguments.
i.e. func({parameters:config[parameter]}, **func_kwargs)
:param parameter_list: list of names of parameters for which a dict is pulled from the config.
:param config: df config containing default values for parameters
:param func_kwargs: other parameters to pass to function
"""
if config is None:
config = dict()
if func_kwargs is None:
func_kwargs = dict()
self.rate_parameters = dict()
self.shape_parameters = dict()
self.source_list = [] # DOES NOT EXIST IN LF!
# in order to pass to confidence interval
self.pdf_base_config = config # might also have to be fudged
self.func = func
self.func_kwargs = func_kwargs
for parameter_name in parameter_list:
self.shape_parameters.update(OrderedDict([(parameter_name,(None,None,None))]))
def get_bounds(self, parameter_name=None):
if parameter_name is None:
return [self.get_bounds(p) for p in self.shape_parameters]
if parameter_name in self.shape_parameters.keys():
# other likelihoods can be more constrictive.
return -np.inf, np.inf
else:
raise InvalidParameter("Non-existing parameter %s" % parameter_name)
def __call__(self, **kwargs):
# Use an ordered dict here, so function can rely on order of arguments
pass_kwargs = OrderedDict()
for parameter_name in self.shape_parameters:
pass_kwargs[parameter_name] = self.pdf_base_config[parameter_name]
pass_kwargs.update(kwargs)
return self.func(pass_kwargs, **self.func_kwargs)
# Add the inference methods from .inference
for methodname in inference.__all__:
for q in (LogLikelihoodBase, LogLikelihoodSum, LogAncillaryLikelihood):
setattr(q, methodname, getattr(inference, methodname))
|
# -*- coding: utf-8 -*-
import os
import io
import unittest
from testfixtures import TempDirectory
from .. import fs
from .. import localfs
from ..utils import TemporaryFilename
class test_localfs(unittest.TestCase):
def setUp(self):
self.dir = TempDirectory()
def tearDown(self):
self.dir.cleanup()
def test_mode(self):
# label exist not exist read write position
# r - error - error start
# r+ - error - - start
# w truncate create error - -
# w+ truncate create - - -
# x error create error - -
# x+ error create - - -
# a - create error - end
# a+ - create - - end
with TemporaryFilename(self.dir.path, suffix=".txt") as filename:
# Doesn't exist
self.assertRaises(fs.Missing, self._check_r, filename, "123")
self._check_w(filename, "123")
# Exist
self._check_r(filename, "456")
self._check_content(filename, "123")
with TemporaryFilename(self.dir.path, suffix=".txt") as filename:
# Doesn't exist
self.assertRaises(fs.Missing, self._check_rp, filename, "123")
self._check_w(filename, "123")
# Exist
self._check_rp(filename, "456")
self._check_content(filename, "456")
with TemporaryFilename(self.dir.path, suffix=".txt") as filename:
# Doesn't exist
self._check_w(filename, "123")
self._check_content(filename, "123")
# Exist
self._check_w(filename, "456")
self._check_content(filename, "456")
with TemporaryFilename(self.dir.path, suffix=".txt") as filename:
# Doesn't exist
self._check_wp(filename, "123")
self._check_content(filename, "123")
# Exist
self._check_wp(filename, "456")
self._check_content(filename, "456")
with TemporaryFilename(self.dir.path, suffix=".txt") as filename:
# Doesn't exist
self._check_a(filename, "123")
self._check_content(filename, "123")
# Exist
self._check_a(filename, "456")
self._check_content(filename, "123456")
with TemporaryFilename(self.dir.path, suffix=".txt") as filename:
# Doesn't exist
self._check_ap(filename, "123")
self._check_content(filename, "123")
# Exist
self._check_ap(filename, "456")
self._check_content(filename, "123456")
try:
with TemporaryFilename(self.dir.path, suffix=".txt") as filename:
# Doesn't exist
self._check_x(filename, "123")
self._check_content(filename, "123")
# Exist
self.assertRaises(IOError, self._check_x, filename, "456")
with TemporaryFilename(self.dir.path, suffix=".txt") as filename:
# Doesn't exist
self._check_xp(filename, "123")
self._check_content(filename, "123")
# Exist
self.assertRaises(IOError, self._check_xp, filename, "456")
except ValueError:
pass # Python 2 does not have x
def _check_w(self, filename, word):
with localfs.Path(filename, mode="w").open() as f:
f.write(word)
f.seek(0)
self.assertRaises(IOError, f.read)
def _check_wp(self, filename, word):
with localfs.Path(filename, mode="w+").open() as f:
f.write(word)
f.seek(0)
self.assertEqual(word, f.read())
def _check_x(self, filename, word):
with localfs.Path(filename, mode="x").open() as f:
f.write(word)
f.seek(0)
self.assertRaises(IOError, f.read)
def _check_xp(self, filename, word):
with localfs.Path(filename, mode="x+").open() as f:
f.write(word)
f.seek(0)
self.assertEqual(word, f.read())
def _check_a(self, filename, word):
with localfs.Path(filename, mode="a").open() as f:
f.write(word)
f.seek(0)
self.assertRaises(IOError, f.read)
def _check_ap(self, filename, word):
with localfs.Path(filename, mode="a+").open() as f:
fptr = f.tell()
n = len(word)
f.write(word)
try:
# Python 2: seek from current
f.seek(-n, 1)
except io.UnsupportedOperation:
# Python 3: seek from beginning
f.seek(fptr, 0)
self.assertEqual(word, f.read(n))
def _check_r(self, filename, word):
with localfs.Path(filename, mode="r").open() as f:
self.assertRaises(IOError, f.write, word)
f.read()
def _check_rp(self, filename, word):
with localfs.Path(filename, mode="r+").open() as f:
f.write(word)
f.seek(0)
self.assertEqual(word, f.read(len(word)))
def _check_content(self, filename, word):
filename = str(filename)
if os.path.isfile(filename):
with localfs.Path(filename, mode="r").open() as f:
b = f.read() == word
else:
b = None == word
self.assertTrue(b)
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.TestSuite()
testSuite.addTest(test_localfs("test_mode"))
return testSuite
if __name__ == "__main__":
import sys
mysuite = test_suite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
|
from __future__ import absolute_import
from __future__ import unicode_literals
import json
from django import forms
from django.utils.translation import ugettext as _
from corehq import toggles
from corehq.apps.userreports.models import DataSourceConfiguration, \
StaticDataSourceConfiguration
from corehq.apps.userreports.ui.widgets import JsonWidget
from corehq.util.python_compatibility import soft_assert_type_text
import six
class ReportDataSourceField(forms.ChoiceField):
def __init__(self, domain, *args, **kwargs):
self.domain = domain
standard_sources = DataSourceConfiguration.by_domain(self.domain)
custom_sources = list(StaticDataSourceConfiguration.by_domain(domain))
available_data_sources = standard_sources + custom_sources
if toggles.AGGREGATE_UCRS.enabled(domain):
from corehq.apps.aggregate_ucrs.models import AggregateTableDefinition
available_data_sources += AggregateTableDefinition.objects.filter(domain=self.domain)
super(ReportDataSourceField, self).__init__(
choices=[(src.data_source_id, src.display_name) for src in available_data_sources],
*args, **kwargs
)
class JsonField(forms.CharField):
widget = JsonWidget
expected_type = None
default_null_values = (None, '', ())
def __init__(self, expected_type=None, null_values=None, *args, **kwargs):
self.expected_type = expected_type
self.null_values = null_values if null_values is not None else self.default_null_values
super(JsonField, self).__init__(*args, **kwargs)
def prepare_value(self, value):
if isinstance(value, six.string_types):
soft_assert_type_text(value)
try:
return json.loads(value)
except ValueError:
return value
else:
return value
def to_python(self, value):
val = super(JsonField, self).to_python(value)
try:
return json.loads(val)
except:
raise forms.ValidationError(_('Please enter valid JSON. This is not valid: {}'.format(value)))
def validate(self, value):
if value in self.null_values and self.required:
raise forms.ValidationError(self.error_messages['required'])
if self.expected_type and not isinstance(value, self.expected_type):
raise forms.ValidationError(_('Expected {} but was {}'.format(self.expected_type, type(value))))
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
# Import Salt Libs
import salt.states.boto_asg as boto_asg
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoAsgTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.boto_asg
'''
# 'present' function tests: 1
def setup_loader_modules(self):
return {boto_asg: {}}
maxSize = None
def test_present(self):
'''
Test to ensure the autoscale group exists.
'''
name = 'myasg'
launch_config_name = 'mylc'
availability_zones = ['us-east-1a', 'us-east-1b']
min_size = 1
max_size = 1
ret = {'name': name,
'result': None,
'changes': {},
'comment': ''}
mock = MagicMock(side_effect=[False, {'min_size': 2}, ['']])
with patch.dict(boto_asg.__salt__, {'boto_asg.get_config': mock}):
with patch.dict(boto_asg.__opts__, {'test': True}):
comt = 'Autoscale group set to be created.'
ret.update({'comment': comt})
with patch.dict(boto_asg.__salt__,
{'config.option': MagicMock(return_value={})}):
self.assertDictEqual(
boto_asg.present(
name,
launch_config_name,
availability_zones,
min_size,
max_size
),
ret
)
def magic_side_effect(value):
if isinstance(value, int):
if value == 1:
return 4
return value
return ''
comt = 'Autoscale group set to be updated.'
ret.update({'comment': comt, 'result': None})
ret.update({'changes': {'new': {'min_size': 4},
'old': {'min_size': 2}}})
utils_ordered_mock = MagicMock(
side_effect=magic_side_effect
)
with patch.dict(boto_asg.__salt__,
{'config.option': MagicMock(return_value={})}):
with patch.dict(boto_asg.__utils__,
{'boto3.ordered': utils_ordered_mock}):
call_ret = boto_asg.present(
name,
launch_config_name,
availability_zones,
min_size,
max_size
)
self.assertDictEqual(call_ret, ret)
with patch.dict(boto_asg.__salt__,
{'config.option': MagicMock(return_value={})}):
with patch.dict(boto_asg.__utils__,
{'boto3.ordered': MagicMock(return_value='')}):
comt = 'Autoscale group present. '
ret.update({'comment': comt, 'result': True})
ret.update({'changes': {}})
self.assertDictEqual(
boto_asg.present(
name,
launch_config_name,
availability_zones,
min_size,
max_size
),
ret
)
# 'absent' function tests: 1
def test_absent(self):
'''
Test to ensure the named autoscale group is deleted.
'''
name = 'myasg'
ret = {'name': name,
'result': None,
'changes': {},
'comment': ''}
mock = MagicMock(side_effect=[True, False])
with patch.dict(boto_asg.__salt__, {'boto_asg.get_config': mock}):
with patch.dict(boto_asg.__opts__, {'test': True}):
comt = ('Autoscale group set to be deleted.')
ret.update({'comment': comt})
self.assertDictEqual(boto_asg.absent(name), ret)
comt = ('Autoscale group does not exist.')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(boto_asg.absent(name), ret)
|
import pytest
from xclingo import XclingoControl, XclingoContext
class TestXclingo:
def assert_test_case(self, datadir, test_case, auto_tracing):
xcontrol = XclingoControl(
n_solutions=0,
n_explanations=0,
auto_trace=auto_tracing,
)
xcontrol.add('base', [], (datadir / f'{test_case}.lp').read_text())
xcontrol.ground()
result = xcontrol._default_output()
expected = (datadir / f'expected_{test_case}.txt').read_text()
assert expected == result
def test_count_aggregate(self, datadir):
self.assert_test_case(datadir, 'count_aggregate', 'none')
self.assert_test_case(datadir, 'ignore_shows', 'all')
|
from selenium import webdriver
url = "http://www.naver.com/"
browser = webdriver.PhantomJS()
browser.implicitly_wait(3)
browser.get(url)
browser.save_screenshot("website.png")
browser.quit()
|
#!/usr/bin/env python2.7
# vim : set fileencoding=utf-8 expandtab noai ts=4 sw=4 filetype=python :
"""
stitch help <cmd>
Shows extensive help of the command <cmd>.
"""
from __future__ import print_function
import stitch.datastore
def options(parser):
"""The query command has no options"""
parser.add_argument('query')
def execute():
"""Show specific help for a command"""
query = stitch.datastore.env.args.get("query")
print("The Query \"\"\""+ query+ "\"\"\" returns:")
res = stitch.datastore.query(query)
print(stitch.datastore.yaml.dump(res))
|
#!/usr/bin/env python
# This script downloads the most trending 10 wallpapers from a subreddit and saves it to a folder.
"""
Copyright (c) 2021 ShankarCodes. All rights reserved.
You may use, distribute and modify this code under the terms of the
BSD 3-Clause "New" or "Revised" License.
You should have received a copy of the BSD-3-Clause License with
this file. If not visit https://opensource.org/licenses/BSD-3-Clause
Homepage: https://github.com/ShankarCodes/scripts
"""
# Make sure to create a .env file which contains
# ID=<your app id>
# SECRET=<your app secret>
# NAME=<your username>
# PASSWORD=<your reddit password>
# USER_AGENT=<your user agent, for example MyWallpaperDownloader>
# List of sum subreddits for wallpapers.
# Remove or add your own subreddits here
import time
import grequests
import json
import praw
from dotenv import load_dotenv, find_dotenv
import os
import requests
from mimetypes import MimeTypes
import traceback
NUMBER_TOP = 10
NUMBER_CONCURRENT_CONNECTIONS = 8
TIME_FILTER = 'day'
SAVE_PATH = 'saves'
subreddits = [
'SpacePorn',
'EarthPorn',
'wallpapers',
'BotanicalPorn',
'CityPorn',
'WeatherPorn',
'SkyPorn',
'LakePorn',
'VillagePorn',
'BeachPorn',
'WaterPorn',
'ExposurePorn',
'ImaginaryLandscapes',
'ImaginaryTechnology',
'futureporn',
'lightpainting'
]
# subreddits = [
# 'EarthPorn'
# ]
# Imports
load_dotenv(find_dotenv())
# Get credentials
client_id = os.environ.get('ID')
secret = os.environ.get('SECRET')
username = os.environ.get('NAME')
password = os.environ.get('PASSWORD')
user_agent = os.environ.get('USER_AGENT')
assert client_id is not None
assert secret is not None
assert username is not None
assert password is not None
assert user_agent is not None
mime = MimeTypes()
def exception_handler(request, exception):
print('-'*50)
print(f'Request Failed!')
print(request)
print(exception)
print('-'*50)
class WallpaperDownloader:
def __init__(self):
self.reddit = praw.Reddit(client_id=client_id,
client_secret=secret,
password=password,
user_agent=user_agent,
username=username)
self.cache = {}
try:
with open(os.path.join(SAVE_PATH, 'cache.json'), 'r') as cache_file:
self.cache = json.load(cache_file)
except Exception as e:
print('Error while reading cache file')
def get_trending_submissions(self, subreddit_name, limit=NUMBER_TOP):
try:
#return [submission for submission in self.reddit.subreddit(subreddit_name).hot(limit=limit) if not submission.is_self]
# Gets the top posts as specified by TIME_FILTER
return [submission for submission in self.reddit.subreddit(subreddit_name).top(limit=limit, time_filter=TIME_FILTER) if not submission.is_self]
except Exception as e:
print(e)
return []
def generate_download_list(self, submissions):
dl_list = []
for submission in submissions:
# Check if submission has not yet been downloaded
if submission.id not in self.cache:
typ = mime.guess_type(submission.url)[0]
if typ is not None:
if typ.startswith('image'):
# Add the extension to the list.
dl_list += [(submission, typ.split('/')[-1])]
else:
print(
f'Unknown type for submission url: {submission.url}')
else:
print(f'Unknown type for submission url: {submission.url}')
else:
# Id is there in cache, check if that file is present or not.
if not os.path.exists(os.path.join(SAVE_PATH, self.cache[submission.id]['filename'])):
print(f'File is not present but cache found for submission {submission.id}, Downloading')
typ = mime.guess_type(submission.url)[0]
if typ is not None:
if typ.startswith('image'):
# Add the extension to the list.
dl_list += [(submission, typ.split('/')[-1])]
else:
print(
f'Unknown type for submission url: {submission.url}')
else:
print(f'Unknown type for submission url: {submission.url}')
else:
print(f'Cache found for submission {submission.id}, Not downloading')
for dl in dl_list:
print(f'To download:{dl[0].url}')
return dl_list
def download(self, url_list):
try:
all_requests = (grequests.get(u[0].url) for u in url_list)
responses = grequests.map(all_requests, size=NUMBER_CONCURRENT_CONNECTIONS,
exception_handler=exception_handler)
for dl_tuple, response in zip(url_list, responses):
if response is None:
continue
submission = dl_tuple[0]
extension = dl_tuple[1]
try:
# Clean file name
fn = ''.join([i for i in submission.title if i.isalnum() or i in '.-_ ()[]'])[:254]
try:
with open(os.path.join(SAVE_PATH, f'{fn}.{extension}'), 'wb') as save_file:
save_file.write(response.content)
except Exception as e:
print('Error while saving the file to the filesystem')
print('Continuing')
continue
if self.cache.get(submission.id) is None:
self.cache[submission.id] = {}
try:
self.cache[submission.id]['url'] = submission.url
self.cache[submission.id]['title'] = submission.title
self.cache[submission.id]['link'] = 'https://reddit.com' + \
submission.permalink
self.cache[submission.id]['filename'] = f'{fn}.{extension}'
self.cache[submission.id]['author'] = submission.author.name
self.cache[submission.id]['author_id'] = submission.author.id
except Exception as e:
print('Error while adding submission to cache')
try:
self.cache[submission.id]['created'] = submission.created_utc
except Exception as e:
print('Cannot get created time')
try:
self.cache[submission.id]['score'] = submission.score
except Exception as e:
print('Cannot get score')
try:
self.cache[submission.id]['upvote_ratio'] = submission.upvote_ratio
except Exception as e:
print('Cannot get upvote_ration')
except Exception as e:
traceback.print_exc()
del self.cache[submission.id]
print('Error saving file')
except Exception as e:
print(e)
print('Error while getting images')
finally:
with open(os.path.join(SAVE_PATH, 'cache.json'), 'w') as cache_file:
json.dump(self.cache, cache_file, indent=4, sort_keys=True)
def download_wallpapers():
walldl = WallpaperDownloader()
print('Downloading')
submissions = []
for subreddit in subreddits:
print(f'Downloading from subreddit')
submissions += walldl.get_trending_submissions(subreddit)
dl_list = walldl.generate_download_list(submissions)
walldl.download(dl_list)
def main():
download_wallpapers()
if __name__ == '__main__':
main()
|
cid = input('Em que cidade você mora? ') # .strip()
ci = cid.split()
print(f'Essa cidade tem Santo no começo do nome? \033[1;37m{"SANTO" in ci[0].upper()}\033[m')
print(f'Essa cidade tem Santo no começo do nome? \033[0;36m{"SANTO" == cid[:5].upper()}\033[m')
|
# https://docs.python.org/3/library/functions.html
abs(x)
all(iterable)
any(iterable)
ascii(object)
bin(x)
bool(?x)
breakpoint(*args, **kws)
bytearray(?source, ?encoding, ?errors)
bytes(?source, ?encoding, ?errors)
callable(object)
chr(i)
@classmethod()
compile(source, filename, mode, flags=0, dont_inherit=False, optimize=-1)
complex(?real, ?imag)
delattr(object, name)
dict(**kwarg) | dict(mapping|iterable, **kwarg)
dir(?object)
divmod(a, b)
enumerate(iterable, start=0)
eval(expression, ?globals, ?locals)
exec(object, ?globals, ?locals)
filter(function, iterable)
float(?x)
format(value, ?format_spec)
frozenset(iterable)
getattr(object, name, ?default)
globals()
hasattr(object, name)
hash(object)
help(object)
hex(x)
id(object)
input(?prompt)
int(?x, ?base=10)
isinstance(object, classinfo)
issubclass(object, classinfo)
iter(object[, sentinel)
len(s)
list(iterable)
locals()
map(function, iterable, ...)
max(iterable, *, ?key, ?default) | max(arg1, arg2, *args, ?key)
memoryview(object)
min(iterable, *, ?key, ?default) | min(arg1, arg2, *args, ?key)
next(iterator, ?default)
object()
oct(x)
open(file, mode='r|w|x|a|b|t|+', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None)
mode=
'r' # open for reading (default)
'w' # open for writing, truncating the file first
'x' # open for exclusive creation, failing if the file already exists
'a' # open for writing, appending to the end of the file if it exists
'b' # binary mode
't' # text mode (default)
'+' # open for updating (reading and writing)
encoding= locale.getpreferredencoding(False) # default encoding (platform-dependent): 'cp1252'
ord(c)
pow(base, exp, ?mod)
print(*objects, sep=' ', end='\n', file=sys.stdout, flush=False)
property(fget=None, fset=None, fdel=None, doc=None)
range(stop=0) | range(start=0, stop=0, ?step=1)
repr(object)
reversed(seq)
round(number, ?ndigits)
set(?iterable)
setattr(object, name, value)
slice(stop) | slice(start, stop, ?step)
sorted(iterable, *, key=None, reverse=False)
@staticmethod()
str(object='') | str(object=b'', encoding='utf-8', errors='strict')
sum(iterable, /, start=0)
super(?type, ?object_or_type])
tuple(?iterable)
type(?iterable|object) | type(name, bases, dict, **kwds)
vars(?object)
zip(*iterables)
__import__(name, globals=None, locals=None, fromlist=(), level=0))
|
from pathlib import Path
from setuptools import setup, find_packages
long_description = Path('README.rst').read_text('utf-8')
try:
from sagenet import __author__, __email__
except ImportError: # Deps not yet installed
__author__ = __maintainer__ ='Elyas Heidari'
__email__ = 'eheidari@student.ethz.ch'
__version__ = '0.1.2'
setup(name='sagenet',
version = "0.1.2",
description='Spatial reconstruction of dissociated single-cell data',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/MarioniLab/sagenet',
author=__author__,
author_email=__email__,
license='MIT',
platforms=["Linux", "MacOSX"],
packages=find_packages(),
zip_safe=False,
# download_url="https://github.com/MarioniLab/sagenet/archive/refs/tags/SageNet_v0.1.0.1.tar.gz",
project_urls={
"Documentation": "https://sagenet.readthedocs.io/en/latest",
"Source Code": "https://github.com/MarioniLab/sagenet",
},
install_requires=[l.strip() for l in Path("requirements.txt").read_text("utf-8").splitlines()],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Typing :: Typed",
# "Programming Language :: Python :: 3",
# "Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Environment :: Console",
"Framework :: Jupyter",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Visualization",
],
doc=[
'sphinx',
'sphinx_rtd_theme',
'sphinx_autodoc_typehints',
'typing_extensions; python_version < "3.8"',
],
keywords=sorted(
[
"single-cell",
"bio-informatics",
"spatial transcriptomics",
"spatial data analysis",
"single-cell data analysis",
]
),
)
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import libsbml, sys, re, numpy
def clean_formula(formula):
for s in ['(',')','*','/','+','-',',']:
formula = formula.replace(s,' %s '%s)
for i in range(3): formula = formula.replace(" "," ")
match = re.search(r'\de - ',formula) # e.g. 10e-05 cannot be 10e - 05
if match:
my_digit = match.group(0)[0]
formula = formula.replace(match.group(0),my_digit+'e-')
split_formula = formula.split(' ')
n_split_formula = []
for i in split_formula:
try:
i = float(i) # try to convert '1' to '1.0'
i = str(i)
n_split_formula.append(i)
except:
n_split_formula.append(i)
formula = ' '.join(n_split_formula)
return formula
def find_and_replace(pattern, dest_str, repl_str):
re_comp = re.compile(r'%s(?=\W|$)'%(pattern)) # not followed by alphanumeric [a-zA-Z0-9_] or (|) is at end of string ($)
return re_comp.sub(repl_str, dest_str) # substitute pattern with repl_str in dest_str if found
class my_model:
"""class for sbml model import """
def __init__(self, filename):
self._m = self._import_model_from_file(filename) # the sbml model instance
self.species_values, self.species, self.species_names = self._get_species()
self.parameter_values, self.parameters, self.parameter_names = self._get_parameters()
self.rule_list = [rule.getId() for rule in self._m.getListOfRules()]
self.rules = dict([(rule.getId(), clean_formula( rule.getFormula() ) ) for rule in self._m.getListOfRules()])
# if a species is just an assignment/rule, then don't keep it as species in list
if self.rules:
for i in self.rules:
if i in self.species:
self.species.remove(i)
self.event_ass_lists = [event.getListOfEventAssignments() for event in self._m.getListOfEvents()]
self.events = dict([ (event.getId(),event) for event in self._m.getListOfEvents()])
# print self.events
# for event in self.event_ass_lists[0]:
# print event.getVariable()
# print libsbml.formulaToString(event.getMath())
# self.event_triggers = [event.getTrigger().getMath() for event in self._m.getListOfEvents()]
# print libsbml.formulaToString(self.event_triggers[0])
# sys.exit(1)
self.reaction_list = [reaction.getId() for reaction in self._m.getListOfReactions()]
self.reaction_names = dict([(reaction.getId(),reaction.getName()) for reaction in self._m.getListOfReactions()])
self.reactions = dict([(reaction.getId(), clean_formula( reaction.getKineticLaw().getFormula() ) ) for reaction in self._m.getListOfReactions()]) # dict of reaction ids, formulas still with functions in it
self.kinetic_laws = dict([(reaction.getId(),reaction.getKineticLaw()) for reaction in self._m.getListOfReactions()]) # dict of reaction ids, kinetic law objects
for reaction_id in self.reactions:
self.reactions[reaction_id] = self.function_definition_replace(self.reactions[reaction_id],self.kinetic_laws[reaction_id]) # replace functions with function formulas
self.reactions[reaction_id] = self.parameter_replace(reaction_id,self.reactions[reaction_id]) # replace local parameter names in formulas with unique names
if self.rules:
self.reactions[reaction_id] = self.rules_replace(reaction_id,self.reactions[reaction_id]) # replace rules in formulas with rule formulas
self._species2pos = dict( zip(self.species, range(self.species.__len__() ) ) ) # species_1: 0, species_2: 1.
self.N = self._get_stoich_mat() # build stoichiometric matrix
self.diff_equations = self.get_diff_equations()
def rules_replace(self,reaction_id,reaction_formula):
# replace rules in formulas with rule expressions
formula = reaction_formula
for i in range(5): # do this 5 levels deep
for rule in self._m.getListOfRules():
var = rule.getVariable()
formula = find_and_replace(var, formula, '( ' + rule.getFormula() + ' )')
formula = clean_formula(formula)
return formula
def parameter_replace(self,reaction_id,reaction_formula):
# replace parameter names in formulas with unique names
formula = reaction_formula
for i in self.parameters:
try:
rct, p = i.split('__')
if reaction_id == rct:
formula = find_and_replace(p, formula, i)
except:
pass
return formula
def get_diff_equations(self):
""" get the differential equations in dict with species names as keys and the diff eqs as strings """
self.diff_equations = dict([(i,'') for i in self.species])
for s in self._m.getListOfSpecies():
# if s.getConstant() or s.getBoundaryCondition():
if s.getConstant():
self.diff_equations[s.getId()] = '0' # constant species will have 0 - no change over time - on the right hand side of the diff_equation
for j,r in enumerate( self._m.getListOfReactions() ):
modes = [('-','Reactants'), ('+','Products')]
for sign,direction in modes:
for sr in getattr(r,'getListOf'+direction)():
s=self._m.getSpecies(sr.getSpecies())
if s.getBoundaryCondition() or s.getConstant(): # right hand side of const species remain 0
continue
self.diff_equations[sr.getSpecies()] = self.diff_equations[sr.getSpecies()] + ' ' + sign + ' ' + str(sr.getStoichiometry()) + ' * (' + self.reactions[r.getId()]+ ' ) '
self.diff_equations[sr.getSpecies()] = clean_formula(self.diff_equations[sr.getSpecies()])
self.diff_equations[sr.getSpecies()] = self.diff_equations[sr.getSpecies()].strip() # get rid of whitespace leading and lagging
for s,eq in self.diff_equations.items(): # for empty species
if eq == '':
self.diff_equations[s] = '0'
return self.diff_equations
def _get_stoich_mat(self):
""" get the stoichiometric matrix (including constant and boundary condition species)
constant and boundary condition species rows are zeros """
N = numpy.zeros( (len(self.species),self._m.getNumReactions()) )
for i,r in enumerate( self._m.getListOfReactions() ):
modes = [(-1,'Reactants'), (+1,'Products')]
for sign,direction in modes:
for sr in getattr(r,'getListOf'+direction)():
s=self._m.getSpecies(sr.getSpecies())
if s.getBoundaryCondition() or s.getConstant(): # keep entry 0 for const./boundery species, although in/outflux might be specified
continue
j=self._species2pos[sr.getSpecies()]
N[j,i] = sign*sr.getStoichiometry()
return N
def function_definition_replace( self,formula,kinetic_law ):
""" Replace the function definitions in the formulas with the actual formulas. Replace also parameters in function definitions with actual parameters. """
for fd in self._m.getListOfFunctionDefinitions():
children = [kinetic_law.getMath().getChild(x) for x in range(kinetic_law.getMath().getNumChildren())]
for child in children:
if fd.getId() == child.getName(): # if the function is in the formula
arg_list = [child.getChild(y).getName() for y in range(child.getNumChildren())] # list of function arguments
var_list = dict( zip ([fd.getArgument(x).getName() for x in range(fd.getNumArguments())], arg_list)) # dict of function variables as keys, function arguments as values
matchstring = "\s*"+fd.getId()+"\s*\("+ ",".join([".*"]*len(var_list)) +"\)\s*"
match = re.search(matchstring,formula)
fd_formula = ' ' + libsbml.formulaToString(fd.getBody()) + ' ' # clean formula a bit
fd_formula = clean_formula(fd_formula) # clean formula a bit
for arg in var_list: # replace variables with arguments in new formulas
fd_formula = find_and_replace(' '+arg, fd_formula, ' '+var_list[arg])
formula = formula.replace(match.group(0), fd_formula) # replace the function with function_definition
return formula
def _import_model_from_file(self,filename):
"""read the sbml file (check for errors) and return the model instance"""
reader = libsbml.SBMLReader()
self._document = reader.readSBML(filename)
if self._document.getNumErrors():
self._document.printErrors()
return None
else:
self._m = self._document.getModel()
return self._m
def _get_parameters(self):
""" get dictionary of parameter values and compartmets - """
self.parameter_values = {}
self.parameter_names = {}
self.parameters = []
# get all the global (in model) and local (in reactions) parameters
for p in self._m.getListOfParameters(): # global parameters
self.parameters.append( p.getId() )
self.parameter_names[p.getId()] = p.getName()
self.parameter_values[p.getId()] = p.getValue()
for reaction in self._m.getListOfReactions(): # local parameters (assign new name)
rname = reaction.getId()
# rformula = reaction.getKineticLaw().getFormula()
for p in reaction.getKineticLaw().getListOfParameters():
newname = rname+"__"+p.getId()
self.parameter_names[newname] = p.getName()
self.parameter_values[newname] = p.getValue()
self.parameters.append( newname )
for compartment in self._m.getListOfCompartments(): # compartments
self.parameter_values[ compartment.getId() ] = compartment.getSize()
self.parameter_names[ compartment.getId() ] = compartment.getName()
self.parameters.append( compartment.getId() )
return self.parameter_values, self.parameters, self.parameter_names
def _get_species(self):
"""get all species in the model"""
self.species = []
self.species_values = {}
self.species_names = {}
for sbmlspecies in self._m.getListOfSpecies():
self.species_names[ sbmlspecies.getId() ] = sbmlspecies.getName()
self.species_values[ sbmlspecies.getId() ] = sbmlspecies.getInitialConcentration()
self.species.append( sbmlspecies.getId() )
return self.species_values, self.species, self.species_names
|
# TODO: avoid import everything from _mnncengine._nn for visable control
from _mnncengine._nn import *
import _mnncengine._expr as _F
import _mnncengine._nn as _nn
# old call: load_module_from_file(file_name, for_training)
# new call: load_module_from_file(file_name, dynamic=False, shape_mutable=True)
# support two by args and kwargs
def load_module_from_file(file_name, *args, **kwargs):
old_call = len(args) > 0 #for_training
m = _F.load_as_dict(file_name)
inputs_outputs = _F.get_inputs_and_outputs(m)
inputs = []
for key in inputs_outputs[0].keys():
inputs.append(inputs_outputs[0][key] if old_call else key)
outputs = []
for key in inputs_outputs[1].keys():
outputs.append(inputs_outputs[1][key] if old_call else key)
if old_call:
for_training = args[0]
module = _nn.load_module(inputs, outputs, for_training)
else:
dynamic = kwargs.get('dynamic', False)
shape_mutable = kwargs.get('shape_mutable', True)
module = _nn.load_module_from_file(inputs, outputs, file_name, dynamic, shape_mutable)
return module
class Module(_nn._Module):
def __init__(self):
super(Module, self).__init__()
self._children = {}
self._vars = {}
def forward(self, x):
raise NotImplementedError
def __call__(self, x):
raise NotImplementedError("__call__ not implemented, please use 'forward' method in subclasses")
def __setattr__(self, name, value):
self.__dict__[name] = value
def remove_from(dicts):
if name in dicts:
del dicts[name]
if isinstance(value, (Module, _nn._Module)):
remove_from(self._children)
value.set_name(name)
self._children[name] = value
self._register_submodules([value])
return
if isinstance(value, _F.Var):
value.name = name
if name in self._vars:
self._vars[name].replace(value)
else:
self._vars[name] = value
self._add_parameter(value)
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
from memsource import Memsource
def get_action(module_params):
"""Get action to do based on module.params"""
trimmed_list = {
k: v
for k, v in module_params.items()
if v is not None and k not in get_default_argspec().keys() and k != "state"
}
if module_params.get("state") == "absent":
action = "delete"
elif not module_params.get("uid"):
action = "create"
elif list(trimmed_list.keys()) == ["uid"]:
action = "read"
else:
action = "update"
return action
def get_memsource_client(module_params):
"""Return a memsource client instance"""
if os.environ.get("MEMSOURCE_TOKEN"):
# TO BE IMPLEMENTED in python-memsource
memsource_client = None
elif module_params.get("memsource_token"):
# TO BE IMPLEMENTED in python-memsource
memsource_client = None
elif os.environ.get("MEMSOURCE_USERNAME") and os.environ.get("MEMSOURCE_PASSWORD"):
memsource_client = Memsource(
os.environ.get("MEMSOURCE_USERNAME"), os.environ.get("MEMSOURCE_PASSWORD")
)
elif module_params.get("memsource_username") and module_params.get(
"memsource_password"
):
memsource_client = Memsource(
module_params.get("memsource_username"),
module_params.get("memsource_password"),
)
else:
memsource_client = None
return memsource_client
def get_default_argspec():
"""
Provides default argument spec for the options documented in the memsource doc fragment.
"""
return dict(
memsource_username=dict(type="str"),
memsource_password=dict(type="str", no_log=True),
memsource_token=dict(type="str", no_log=True),
)
|
from django.test import TestCase
from ted_project.factories import UserFactory, FaceFactory, PictureFactory
class TestFaceModel(TestCase):
def setUp(self):
self.face = FaceFactory(name='test_name')
def test_Face_attributes(self):
self.assertEqual(self.face.name, 'test_name')
class TestPictureModel(TestCase):
def setUp(self):
self.picture = PictureFactory(url='test.url')
def test_picture_attributes(self):
self.assertEqual(self.picture.face.name, 'test_name')
self.assertEqual(self.picture.url, 'test.url')
|
import os.path
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, random_split
from config.ConfigDataset import ConfigDataset
from config.ConfigHyperparams import ConfigHyperparams
from config.ConfigCheckpoints import ConfigCheckpoints
from config.ConfigEEGNet import ConfigEEGNet
from model.eegnet_pt import EEGNet
class EEGNetTrain():
def __init__(self, config_dataset: ConfigDataset,
config_hyperparams: ConfigHyperparams,
config_checkpoints: ConfigCheckpoints,
config_eegnet: ConfigEEGNet) -> None:
# Dataset config
self.config_dataset = config_dataset
# Hyperparams
self.config_hyperparams = config_hyperparams
# Checkpoints
self.checkpoint_epoch = None
self.checkpoint_loss = None
self.config_checkpoints = config_checkpoints
# EEGNet hyperparams
self.config_eegnet = config_eegnet
self.loss = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.config_eegnet.get_model().parameters(), lr=self.config_hyperparams.learning_rate)
self.checkpoint_restart()
self.dataset_prepare(config_hyperparams.batch_size, config_hyperparams.num_workers)
# Set training device
if torch.cuda.is_available():
self.config_eegnet.get_model().cuda()
def dataset_prepare(self, batch_size:int = 32, num_workers: int = 2) -> None:
# Data load
dataset = self.config_dataset.make_dataset()
# Split into train and validation sets
length = dataset.__len__()
len_train = int(length * 0.8)
len_test = int(length * 0.1)
len_val = length - len_train - len_test
train_data, test_data, val_data = random_split(dataset, [len_train, len_test, len_val])
self.train_loader = DataLoader(dataset=train_data, batch_size=batch_size, num_workers=num_workers)
self.test_loader = DataLoader(dataset=test_data, batch_size=batch_size, num_workers=num_workers)
self.val_loader = DataLoader(dataset=val_data, batch_size=batch_size, num_workers=num_workers)
def fit(self) -> None:
print(self.config_eegnet.get_model())
losses_train = list()
if self.config_hyperparams.overfit_on_batch:
batch0 = self.train_loader.__iter__().next()
if self.config_hyperparams.overfit_on_batch:
batches_to_train_on = 1
else:
batches_to_train_on = self.train_loader.__len__()
# Restore epoch from checkpoint data
if not self.config_checkpoints.start_fresh:
print(f"Continue from epoch: {self.checkpoint_epoch}. Validation loss: {self.checkpoint_loss:.2f}")
epoch_start = self.checkpoint_epoch + 1
else:
epoch_start = 0
for epoch in range(epoch_start, self.config_hyperparams.epochs_max):
batch_counter = 1
for batch in self.train_loader:
if self.config_hyperparams.overfit_on_batch:
batch = batch0
print(f"\rEpoch: {epoch} training step 1/6: Batch: {batch_counter}/{batches_to_train_on}", end="")
# Manipulate shape of x, y to become suitable for Conv1d()
x, y = batch
x = x.permute(0, 2, 1)
y = y.max(1)[1] # For CrossEntropyLoss()
if torch.cuda.is_available():
x = x.cuda()
# 1. Forward propagation
print(f"\rEpoch: {epoch} training step 2/6: Batch: {batch_counter}/{batches_to_train_on}", end="")
l = self.config_eegnet.get_model()(x)
# 2. Compute loss
print(f"\rEpoch: {epoch} training step 3/6: Batch: {batch_counter}/{batches_to_train_on}", end="")
if torch.cuda.is_available():
y = y.cuda()
J = self.loss(l, y)
# 3. Zero the gradients
print(f"\rEpoch: {epoch} training step 4/6: Batch: {batch_counter}/{batches_to_train_on}", end="")
self.config_eegnet.get_model().zero_grad()
# 4. Backward propagation
print(f"\rEpoch: {epoch} training step 5/6: Batch: {batch_counter}/{batches_to_train_on}", end="")
J.backward()
# 5. Step in the optimizer
print(f"\rEpoch: {epoch} training step 6/6: Batch: {batch_counter}/{batches_to_train_on}", end="")
self.optimizer.step()
losses_train.append(J.item())
if self.config_hyperparams.overfit_on_batch:
break
batch_counter += 1
loss_validation = self.validate()
print(f"\tTrain loss: {torch.Tensor(losses_train).mean():.2f}", end="\t")
print(f"Validation loss: {loss_validation:.2f}")
# Save model state after an epoch
if self.config_checkpoints.has_to_save_checkpoint(epoch):
self.checkpoint_save(epoch, loss_validation)
return
def validate(self) -> float:
losses_val = list()
for batch in self.val_loader:
x, y = batch
x = x.permute(0, 2, 1)
y = y.max(1)[1]
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
# 1. Forward propagation
with torch.no_grad():
l = self.config_eegnet.get_model()(x)
# 2. Compute loss
J = self.loss(l, y)
losses_val.append(J.item())
return torch.Tensor(losses_val).mean()
def checkpoint_save(self, epoch: int, loss_val: float) -> None:
checkpoint_data = {
"epoch" : epoch,
"loss" : loss_val,
"model" : self.config_eegnet.get_model().state_dict(),
"optimizer" : self.optimizer.state_dict()
}
checkpoint_file = f"{self.config_eegnet.get_model().__class__.__name__}-epoch-{epoch}.pt.tar"
torch.save(checkpoint_data, checkpoint_file)
def checkpoint_load(self, checkpoint_filepath: str) -> tuple:
# https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_a_general_checkpoint.html
checkpoint_data = torch.load(checkpoint_filepath)
self.config_eegnet.get_model().load_state_dict(checkpoint_data["model"])
self.optimizer.load_state_dict(checkpoint_data["optimizer"])
epoch, loss = checkpoint_data["epoch"], checkpoint_data["loss"]
return epoch, loss
def checkpoint_restart(self):
if not self.config_checkpoints.start_fresh:
print("Loading checkpoint...", end="")
# Find last checkpoint file
checkpoint_file = None
for file in os.scandir():
if file.is_file() and file.name.endswith(".pt.tar"):
if checkpoint_file is None:
checkpoint_file = file
elif file.stat().st_mtime > checkpoint_file.stat().st_mtime:
checkpoint_file = file
if checkpoint_file is None:
self.config_checkpoints.start_fresh = True
print("no checkpoints found")
return
self.checkpoint_epoch, self.checkpoint_loss = self.checkpoint_load(checkpoint_file.path)
self.config_eegnet.get_model().train()
print("ok")
# Add commandline arguments
def parser_add_cmdline_arguments():
parser = argparse.ArgumentParser(description="Train EEGNet for the 1s duration EEG sample")
# Datasets
datasets = parser.add_argument_group("Datasets", "Select and manipulate datasets")
datasets.add_argument("dataset", choices=["5f", "cla", "halt", "freeform", "nomt"], default="cla",
help="BCI EEG dataset to work with. More info: https://doi.org/10.6084/m9.figshare.c.3917698.v1\n"
"5f - 5F set. 5 finger gestures\n"
"cla - CLA set. 3 gestures\n"
"halt - HaLT set. 5 gestures\n"
"freeform - FREEFORM set\n"
"nomt - NoMT set")
datasets.add_argument("--no_download", action="store_false",
help="Don't download dataset files. Default: False")
datasets_options = datasets.add_mutually_exclusive_group()
datasets_options.add_argument("--dataset_merge", action="store_false",
help="Merge data from all subjects into a single dataset. Default: True")
datasets_options.add_argument("--dataset_subjects", nargs='+', type=int, metavar="n",
help="Merge data from specific test subjects into a single dataset. "
"Takes zero-based single index or a zero-based list of indices")
# Checkpoints
checkpoints = parser.add_argument_group("Checkpoints", "Manipulate model chekpoints behaviour")
checkpoints.add_argument("--start_fresh", action="store_true",
help="Ignores checkpoints. Default: False")
checkpoints2 = checkpoints.add_mutually_exclusive_group()
checkpoints2.add_argument("--no_checkpoints", action="store_true",
help="Make no checkpoints. Default: False")
checkpoints2.add_argument("--checkpoint_every_epoch", type=int, metavar="val", default=1,
help="Save model to checkpoint every 'val' epochs. Default: 1")
# Hyperparams
hyperparams = parser.add_argument_group("Hyperparams", "Set neuralnet training hyperparams")
hyperparams.add_argument("--learning_rate", type=float, metavar="val", default=1e-2,
help="Set learning rate. Default: 1e-2", )
hyperparams.add_argument("--epochs_max", type=int, metavar="val", default=10,
help="Set maximum number of training epochs. Default: 10")
hyperparams.add_argument("--batch_size", type=int, metavar="val", default=32,
help="Set training batch size. Default: 32")
hyperparams.add_argument("--num_workers", type=int, metavar="val", default=2,
help="Set number of CPU worker threads to prepare dataset batches. Default: 2")
hyperparams.add_argument("--overfit_on_batch", action="store_true",
help="Test if model overfits on a single batch of training data")
# EEGNet hyperparams
eegnet_hyperparams = parser.add_argument_group("EEGNet hyperparams")
eegnet_hyperparams.add_argument("--eegnet_nb_classes", type=int, metavar="Classes", default=4,
help="Number of classification categories. Default=4")
eegnet_hyperparams.add_argument("--eegnet_kernel_length", type=int, metavar="Krnl_Length", default=63,
help="Length of temporal convolution in first layer. We found "
"that setting this to be half the sampling rate worked "
"well in practice. For the SMR dataset in particular "
"since the data was high-passed at 4Hz we used a kernel "
"length of 31. "
"Must be odd number!",
)
eegnet_hyperparams.add_argument("--eegnet_channels", type=int, metavar="Chnls", default=64,
help="Number of channels in the EEG data. Default: 64")
eegnet_hyperparams.add_argument("--eegnet_samples", type=int, metavar="Freq", default=128,
help="Sample frequency (Hz) in the EEG data. Default: 128Hz", )
eegnet_hyperparams.add_argument("--eegnet_f1", type=int, metavar="F1", default=8,
help="Number of temporal filters. Default: 8")
eegnet_hyperparams.add_argument("--eegnet_d", type=int, metavar="D", default=2,
help="Number of spatial filters to learn within each temporal convolution. Default: 2")
eegnet_hyperparams.add_argument("--eegnet_dropout_rate", type=float, metavar="dr", default=0.5,
help="Dropout rate in Block 1")
return parser.parse_args()
'''
# Entry point
'''
# Commandline arguments
args = parser_add_cmdline_arguments()
# Dataset config
if args.dataset_merge:
config_dataset = ConfigDataset(dataset_type=args.dataset, download=args.no_download, dataset_subjects=None)
else:
config_dataset = ConfigDataset(dataset_type=args.dataset, download=args.no_download, dataset_subjects=args.dataset_subjects)
config_hyperparams = ConfigHyperparams(epochs_max=args.epochs_max, learning_rate=args.learning_rate,
batch_size=args.batch_size, num_workers=args.num_workers,
overfit_on_batch=args.overfit_on_batch)
config_checkpoints = ConfigCheckpoints(checkpoint_every_epoch=args.checkpoint_every_epoch,
start_fresh=args.start_fresh, no_checkpoints=args.no_checkpoints)
config_eegnet = ConfigEEGNet(nb_classes=args.eegnet_nb_classes, channels=args.eegnet_channels, samples=args.eegnet_samples,
kernel_length=args.eegnet_kernel_length, f1=args.eegnet_f1, d=args.eegnet_d,
dropout_rate=args.eegnet_dropout_rate)
trainer = EEGNetTrain(config_dataset, config_hyperparams, config_checkpoints, config_eegnet)
trainer.fit()
|
# %%
import helper as hp
# Set FIRED base Folder (location where you downloaded the dataset)
hp.FIRED_BASE_FOLDER = "/Volumes/Data/NILM_Datasets/FIRED"
# hp.FIRED_BASE_FOLDER = "~/FIRED"
# load 1Hz power data of the television for complete recording range
television = hp.getPower("television", 1)
print(television)
# %%
# load 2 hours of 50Hz power data of powermeter09 (Fridge) of day 2020.08.03
startTs, stopTs = hp.getRecordingRange("2020.08.03 17:25:00", "2020.08.03 19:25:00")
fridge = hp.getMeterPower("powermeter09", 50, startTs=startTs, stopTs=stopTs)
#Plotting the data is straightforward:
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
import matplotlib.dates as mdates
# Generate timestamps
start = fridge["timestamp"]
end = start+(len(fridge["data"])/fridge["samplingrate"])
timestamps = np.linspace(start, end, len(fridge["data"]))
dates = [datetime.fromtimestamp(ts) for ts in timestamps]
# Plot
fig, ax = plt.subplots()
ax.plot(dates, fridge["data"]["p"], label="active power")
ax.plot(dates, fridge["data"]["q"], label="reactive power")
# format plot
ax.set(xlabel='Time of day', ylabel='Power [W/var]', title='Fridge')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.setp(ax.get_xticklabels(), ha="center", rotation=35)
plt.legend()
plt.show()
# %%
smartmeter = hp.getMeterPower(hp.getSmartMeter(), 50, startTs=startTs, stopTs=stopTs)
# Plot
fig, axes = plt.subplots(3, sharex=True, title='Fridge')
for i,ax in enumerate(axes):
ax.plot(dates, smartmeter[i]["data"]["p"], label="active power")
ax.plot(dates, smartmeter[i]["data"]["q"], label="reactive power")
# format plot
axes[0].set(title='Fridge')
axes[1].set(ylabel='Power [W/var]')
axes[-1].set(xlabel='Time of day')
axes[-1].xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.setp(axes[-1].get_xticklabels(), ha="center", rotation=35)
plt.legend()
plt.show()
# %%
# Data is now loaded on the fly over rsync
hp.RSYNC_ALLOWED = True
# load two seconds of high freq data powermeter09 (Fridge)
startTs, stopTs = hp.getRecordingRange("2020.08.03 17:34:02", "2020.08.03 17:34:04")
fridge = hp.getMeterVI("powermeter09", startTs=startTs, stopTs=stopTs)
# Generate timestamps
start = fridge["timestamp"]
end = start+(len(fridge["data"])/fridge["samplingrate"])
timestamps = np.linspace(start, end, len(fridge["data"]))
dates = [datetime.fromtimestamp(ts) for ts in timestamps]
# Plot
fig, ax = plt.subplots()
ax.plot(dates, fridge["data"]["i"], label="current")
# format plot
ax.set(xlabel='Time of day', ylabel='Current [mA]', title='Fridge')
plt.setp(ax.get_xticklabels(), ha="center", rotation=35)
plt.show()
|
def calc_bmi(x,y):
ans = y/(x*x)
return ans
x = 2
y = 80
if ans < 18:
print("痩せ気味")
elif 18 <= ans < 25:
print("普通")
elif ans >= 25:
print("太り気味")
|
from typing import Union, Set
from src.api import APITask, APISection
from src.core.errors import TaskError
from src.core.models import Task, Section, Project
from src.utils import Colors
class TaskManager:
def __init__(self, token: str, project: Project):
self.project = project
self.api_section = APISection(token)
self.api = APITask(token)
def create(self, task_name: str, task_description: str, section_name: str):
if len(task_name) < 3:
raise TaskError("Task name must be at least three characters")
section: Section = self.api_section.fetch_by_name(section_name, self.project.id)
if not section:
print(Colors.wrap(Colors.RED, "Section not found"))
exit(145)
task: Task = Task(
name=task_name, description=task_description, project_id=self.project.id
)
task.section_id = section.id
self.api.create(task)
print(Colors.wrap(Colors.GREEN, "Task added successfully"))
def update(self, task: Task):
print(Task(task))
name: str = str(
input(Colors.wrap(Colors.YELLOW, "[?] Type name (Enter to skip):"))
)
if name and len(name):
task.name = name
description: str = str(
input(Colors.wrap(Colors.YELLOW, "[?] Type description (Enter to skip): "))
)
if description and len(description):
task.description = description
self.api.update(task)
print(Colors.wrap(Colors.GREEN, "Task updated successfully"))
def move(self, task_id: int, section_id: int):
task: Task = self.api.move(task_id, section_id)
print(task)
print(Colors.wrap(Colors.GREEN, "Task moved successfully"))
def complete(self, task_name: str):
task: Task = self.fetch(task_name)
if task:
updated_task: Task = self.api.complete(task.id)
print(updated_task)
print(Colors.wrap(Colors.GREEN, "Task completed successfully"))
else:
print(Colors.wrap(Colors.RED, "Task not found"))
def fetch(self, name_or_id: Union[int, str]) -> Union[Task, None]:
try:
task_id: int = int(name_or_id)
task: Task = self.api.fetch_by_id(task_id)
return task
except ValueError:
task_name: str = str(name_or_id)
task: Task = self.api.fetch_by_name(task_name, self.project.id)
return task
def list_all(self, section_name: str, task_status: str, listing_format="long"):
tasks = self.__get_tasks_by_section(section_name, task_status)
if tasks and len(tasks):
for task in tasks:
if listing_format == "short":
task.show_summary()
else:
print(task)
print(
Colors.wrap(Colors.CYAN, "Total number of tasks {}".format(len(tasks)))
)
else:
print(Colors.wrap(Colors.RED, "No task is found in this project"))
def __get_tasks_by_section(self, section_name: str, task_status: str):
"""Get all tasks based on section name"""
if section_name != "all":
section: Section = self.api_section.fetch_by_name(
section_name, self.project.id
)
if not section:
print(Colors.wrap(Colors.RED, "Section not found"))
exit(48)
return self.api.fetch_all_by_section(section.id, task_status)
else:
return self.api.fetch_all(self.project.id, task_status)
|
import pathlib
import yaml
import os
import logging
logger = logging.getLogger(__name__)
with open("settings.yaml", "r", encoding="utf-8") as f:
settings = yaml.safe_load(f)
# check for required directories
required_directories = []
required_directories.append(settings["OUTPUT_DIR_MP3_FILES"])
for each_dir in required_directories:
if not os.path.isdir(pathlib.Path(each_dir)):
os.makedirs(pathlib.Path(each_dir))
logger.warning(f"{each_dir} had to be created")
|
# Imports
from . import *
from ..utils import misc
from ..utils import pubtools
from ..parsing.grammars import frontiers
class FrontiersParser(Parser):
'''Parser designed to interact with Frontiers
'''
def parse_document(self, document):
'''Extract and parse references from a document.
Args:
document (str) : raw text of document
Returns:
list of parsed references
'''
# Extract raw reference strings
raw_refs = self.extract_refs(document)
# Parse reference strings
parsed_refs = []
for raw_ref in raw_refs:
parsed_ref = self.parse_ref(raw_ref)
ref_info = {
'raw' : repr(raw_ref),
'ref' : parsed_ref,
}
parsed_refs.append(ref_info)
# Return parsed references
return parsed_refs
def extract_refs(self, html):
'''Extract references from document.
Args:
html (str) : Document full text
Return:
refs : List of references
'''
# Parse HTML
html_parse = BS(html)
# Get references
refs = html_parse.findAll(
'div',
{'class' : re.compile('references', re.I)}
)
# Return references
return refs
def parse_ref(self, ref):
'''Parse a raw reference.
'''
# Get text from BeautifulSoup object
ref_txt = ''.join(ref.findAll(text=True))
# Extract reference
parsed_ref = frontiers.scan(frontiers.reference, ref_txt)
# Get first match, if any
if parsed_ref:
parsed_ref = parsed_ref[0]
# Get DOI
doi_link = ref.find(
'a',
href=re.compile('dx\.doi\.org')
)
if doi_link:
try:
doi_href = doi_link['href']
doi_short = re.sub('(?:http://)?dx\.doi\.org/', '', doi_href)
parsed_ref['doi'] = doi_short
except:
pass
# Get PubMed ID
pmid_text = ref.find(
text=re.compile('pubmed abstract', re.I)
)
if pmid_text:
try:
pmid_link = pmid_text.findParent('a')
pmid_href = pmid_link['href']
pmid_match = re.search('termtosearch=(\d+)', pmid_href, re.I)
pmid = pmid_match.groups()[0]
parsed_ref['pmid'] = pmid
except:
pass
# Return reference
return parsed_ref
|
# Quebrando um número
'''Crie um programa que leia um número Real qualquer
pelo teclado e mostre na tela a sua porção inteira
Ex: O número 6,127 tem a parte inteira 6'''
from math import trunc
num = float(input('Digite um número qualquer: '))
print('O valor digitado foi \033[7;40m''{}\033[m e a sua porção inteira é \033[1;36m''{}'.format(num, trunc(num)))
|
"""
%%
%% Construction du point P(t) d'une courbe de Bézier pour
%% la valeur t du paramètre, par l'algorithme de Casteljau.
%%
%% Données : XP, YP, ZP coordonnées des points de contrôle
%% t valeur du paramètre
%%
%% Résultats : x,y,z coordonnées du point P(t) dans R^3
%%
"""
import numpy as np
def cast3d(t,XP,YP,ZP):
m = (np.shape(XP)[0]) - 1
xx = XP.copy()
yy = YP.copy()
zz = ZP.copy()
for kk in range(0,m):
xxx = xx.copy()
yyy = yy.copy()
zzz = zz.copy()
for k in range(kk,m):
xx[k+1] = (1-t)*xxx[k]+t*xxx[k+1]
yy[k+1] = (1-t)*yyy[k]+t*yyy[k+1]
zz[k+1] = (1-t)*zzz[k]+t*zzz[k+1]
x=xx[m]
y=yy[m]
z=zz[m]
return(x,y,z)
|
"""Manual test all examples."""
# Import local modules
from photoshop.api import Application
from pathlib import Path
root = Path(__file__).parent.parent.parent.joinpath("examples")
for script_file in root.glob("*.py"):
try:
exec(script_file.read_text())
except Exception as err:
print(f"Test failed: {script_file}", str(err), end="\n")
# Clear up and close all documents.
app = Application()
while app.documents.length:
app.activeDocument.close()
|
print("Hallo Python-Welt")
# Kommentarzeile
'''
Kommentarblock
'''
'''
Datentypen (Datenarten) in Python:
Bezeichnung | Fachbegriff | Abkürzung | Beispiele
============================================================================================================
Ganzzahl | Integer | int | 23, 42, -10, 0
------------------------------------------------------------------------------------------------------------
Fließkommazahlen | Float | float | 3.14, -1.6, 0.0, 23.0, 1.666
------------------------------------------------------------------------------------------------------------
Zeichenketten | String | str | "Hallo", "23", "0.0", "Du bist ein guter Mensch."
'''
message = "Guten Tag, wie geht es Dir?"
print(message)
message = "Super, dass Du die Motivation hast, Python zu lernen."
print(message)
print(type(message)) # gibt den Datentyp von message aus - str (String)
x = 23 # Variablenname: x Variablenwert: 23 Variablentyp: Int (Integer)
y = 42 # Variablenname: y Variablenwert: 42 Variablentyp: Int (Integer)
# Variablenname immer mit einem Kleinbuchstaben beginnen.
sum = x + y
print(sum)
print(x + y)
print(sum + sum)
x = 0
y = 13
sum = x + y
print(sum)
firstName = "ada"
lastName = "lovelace"
fullName = firstName + " " + lastName # hier werden drei Strings aneinander gehängt
# Das + Zeichen ist hier keine Addition. Sondern die Strings werden
# aneinander gehängt.
print(fullName)
print("Ada Lovelace schrieb in den 40er-Jahren des 19. Jahrhunderts das erste Computerprogramm der Welt.")
print("Gemessen an dem Stand der Forschung der damaligen Zeit ist ihr Werk visionär.")
print(fullName.upper()) # fullName in Grossbuchstaben ausgeben
print(fullName.lower()) # fullName in Kleinbuchstaben ausgeben
x = 3.14 # Variablenname: x Variablenwert: 3.14 Variablentyp: float
print(type(x))
print(x)
# Warum ist das ein Fehler:
# print("Hallo " + 23)
# Warum ist das kein Fehler:
print("Hallo " + str(23))
#Hilfe
print(type(23))
print(type(str(23)))
# addieren
x = 1 + 2
# subtrahieren
x = 10 - 5
# multiplizieren
x = 4 * 4
# potenzieren
x = 3 ** 3
print(x)
# dividieren
x = 20 / 10
x = 10.0 / 3.0
print(x) # 3.3333333
x = 10 / 3
print(x) # 3.3333333 Python wandelt die Ganzzahlen (int) in Fließkommazalen (float) um
# Fachausdruck für Typumwandlung: "casten"
# Was passiert wenn ich eine float-Zahl wie 3.14 in eine int-Zahl umwandle (caste)?
print("Achtung:")
print(int(3.9))
print("Hier wird aus der float-Zahl 3.14 die Ganzahl 3.")
print("Der Teil links hinter den Komma wir einfach abgeschnitten. - Es wird nicht gerundet!!!")
print(2 + 3 * 4) # Punkt vor Strichrechnung
print((2 + 3) * 4) # Klammerrechnung
# Sonderzeichen
# Tabulator \t Newline \n
print("/tHallo\n\n")
message = "\n\n\t1.Zeile\n2.Zeile\n\t3.Zeile\n4.Zeile"
print(message)
print("\n\n\t" + "1.Zeile" + "\n" + "2.Zeile" + "\n\t" + "3.Zeile" + "\n" + "4.Zeile")
message = "1.Zeile\n\t2.Zweile\n\t\t2.Zeile\n\t\t\t3.Zeile\n\t\t\t\t4.Zeile"
print(message)
|
from .base import CustomTestClass
import json
class TestContactUs(CustomTestClass):
def setUp(self) -> None:
self.base_data = {
"name": "Test",
"email": "testuser@example.com",
"query": "This is a test query blah blah blah blah blah blah blah blah blah blah blah blah blah",
}
def test_contact_us(self):
response = self.client.post(
self.base_url + "/api/contact-us",
data=json.dumps(self.base_data),
)
self.assertEqual(response.status_code, 201)
def test_invalid_data(self):
data = self.base_data.copy()
data.update({"extra_field": True})
response = self.client.post(self.base_url + "/api/contact-us", data=data)
self.assertEqual(response.status_code, 400)
data = self.base_data.copy()
data["email"] = "invalidaddr@123"
response = self.client.post(self.base_url + "/api/contact-us", data=data)
self.assertEqual(response.status_code, 400)
|
from defusedxml.lxml import fromstring
from lxml import etree
from six.moves import range
from six.moves.urllib.parse import urlparse
from zeep import ns
from zeep.exceptions import XMLSyntaxError
from zeep.parser import absolute_location
class NamePrefixGenerator(object):
def __init__(self, prefix='_value_'):
self._num = 1
self._prefix = prefix
def get_name(self):
retval = '%s%d' % (self._prefix, self._num)
self._num += 1
return retval
class UniqueNameGenerator(object):
def __init__(self):
self._unique_count = {}
def create_name(self, name):
if name in self._unique_count:
self._unique_count[name] += 1
return '%s__%d' % (name, self._unique_count[name])
else:
self._unique_count[name] = 0
return name
class ImportResolver(etree.Resolver):
"""Custom lxml resolve to use the transport object"""
def __init__(self, transport):
self.transport = transport
def resolve(self, url, pubid, context):
if urlparse(url).scheme in ('http', 'https'):
content = self.transport.load(url)
return self.resolve_string(content, context)
def parse_xml(content, transport, base_url=None):
parser = etree.XMLParser(remove_comments=True, resolve_entities=False)
parser.resolvers.add(ImportResolver(transport))
try:
return fromstring(content, parser=parser, base_url=base_url)
except etree.XMLSyntaxError as exc:
raise XMLSyntaxError("Invalid XML content received (%s)" % exc.message)
def load_external(url, transport, base_url=None):
if base_url:
url = absolute_location(url, base_url)
response = transport.load(url)
return parse_xml(response, transport, base_url)
def max_occurs_iter(max_occurs, items=None):
assert max_occurs is not None
generator = range(0, max_occurs if max_occurs != 'unbounded' else 2**31-1)
if items is not None:
for i, sub_kwargs in zip(generator, items):
yield sub_kwargs
else:
for i in generator:
yield i
def create_prefixed_name(qname, schema):
if not qname:
return
if schema and qname.namespace:
prefix = schema.get_shorthand_for_ns(qname.namespace)
if prefix:
return '%s:%s' % (prefix, qname.localname)
elif qname.namespace in ns.NAMESPACE_TO_PREFIX:
prefix = ns.NAMESPACE_TO_PREFIX[qname.namespace]
return '%s:%s' % (prefix, qname.localname)
if qname.namespace:
return qname.text
return qname.localname
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from os import path
from yaml import safe_load
from ao.model.render import Render
from unittest import TestCase, expectedFailure
from pytest import mark
class RenderTest(TestCase):
@staticmethod
def parse_yaml(filename):
filepath = path.join(path.dirname(__file__), 'fixtures/{}.yaml'.format(filename))
with open(filepath, 'r') as stream:
return safe_load(stream.read())
def test__01__render__pass(self):
# prepare
directory = path.join(path.dirname(__file__),"../../data/templates")
data = RenderTest.parse_yaml("render")
# run - test should fail if any exception occurs
try:
render = Render(directory=directory)
dirpath = render.getDirectory()
version = render.getVersion()
templates = render.getTemplates()
result = render.render( data=data, template_name="action")
except Exception as exc:
self.fail("Failed with {}".format(str(exc)))
# check
self.assertEqual(dirpath, directory)
self.assertIsNotNone(version)
self.assertIsNotNone(templates)
self.assertIsNotNone(result)
|
# /*
# * Copyright (C) 2020 ACTUAL Systems, Inc.
# *
# * http://www.actualhq.com
# *
# * In collaboration with UNICEF/Giga: https://gigaconnect.org
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
def pause():
programPause = input("Press the <ENTER> key to continue...")
import argparse #For input arguments
import sys
import io
sys.path.insert(0, "./")
import numpy as np
import pandas as pd
from computeBandwidth import computeBandwidth
from findNumberOfNeighbors import findNumberOfNeighbors
from computeSchoolCensus import computeSchoolCensus
from computeCosts import computeCosts
from consolidateSchools import consolidateSchools
import time
#Analysis Config
sensitivity = 0
clipping = 0
#Set up inputs
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--Country", help="Country for Analysis", action="store")
args = parser.parse_args()
#Import School DataFrame
#country = 'Kenya'
#country = 'Sierra Leone'
pjDefault = 1
if args.Country == 'Honduras':
schoolDataPath = 'Honduras Data/HN_School_DN_2G_3G_4G.xlsx'
popDataPath = 'Honduras Data/hnd_ppp_2020_UNadj.tif'
import projectInputsHonduras as pjIn
pjDefault = 0
if args.Country == 'Rwanda':
schoolDataPath = 'Rwanda Data/RW_connectivity_GIGA_GSMA_DistanceNodes.xlsx'
popDataPath = 'Rwanda Data/rwa_ppp_2020_UNadj.tif'
import projectInputsRwanda as pjIn
pjDefault = 0
if args.Country == 'Kenya':
schoolDataPath = 'Kenya Data/Primary Schools list GPS Coordinates 2020_Tusome Data_14Feb2020_ITU.xlsx'
popDataPath = 'Kenya Data/ken_ppp_2020_UNadj.tif'
if args.Country == 'Sierra Leone':
schoolDataPath = 'Sierra Leone Data/sl_school_connectivity.xlsx'
popDataPath = 'Sierra Leone Data/sle_ppp_2020_UNadj.tif'
print("Country:", args.Country)
print("Loading from file", schoolDataPath)
schoolData = pd.read_excel(schoolDataPath)
#Add columns to DF for computed values
schoolData['Number of Pupils'] = ''
schoolData['Number of Teachers'] = ''
schoolData['Number of Classrooms'] = ''
schoolData['Schools Within 10 km'] = ''
schoolData['Schools Within 1 km'] = ''
schoolData['Distance to Nearest School'] = ''
schoolData['Population Within 1 km'] = ''
schoolData['Population Within 10 km'] = ''
schoolData['Local Population'] = ''
schoolData['Local Households'] = ''
schoolData['Bandwidth'] = ''
schoolData['Overnight Comms Cost'] = ''
schoolData['Annual Comms Cost'] = ''
schoolData['Overnight Power Cost'] = ''
schoolData['Annual Power Cost'] = ''
schoolData['Overnight Cost'] = ''
schoolData['Annual Cost'] = ''
schoolData['Tech'] = ''
#Import project global data
print("Importing Project Config")
if (pjDefault == 1):
import projectInputs as pjIn
import techInputs as techIn
dictionaries = ["configuration","usage", "EMIS"]
techTypes = ['fiber','cell2G','cell3G','cell4G','WISP','satellite']
#=======Drop schools that are within range of each other (assume same school building)
radiusSameSchool = 10 #m
schoolData = consolidateSchools(schoolData,radiusSameSchool,1)
#=======Compute population of each school
#Compute number of schools within a given radius of each school
print("Computing populations for each school")
radius = 10 #km
verbose_schooldata = 0
nearestSchoolData = findNumberOfNeighbors(schoolData,radius,verbose_schooldata)
schoolData['Schools Within 10 km'] = nearestSchoolData['numPoints']
schoolData['Distance to Nearest School'] = nearestSchoolData['nearestNeighbor']
#Compute population around school
verbose_census = 0
#Figure students in each school
studentPopRatio = pjIn.demo['schoolAgeFraction'] * pjIn.demo['schoolEnrollmentFraction']
if 'num_students' in schoolData.columns:
schoolData['Number of Pupils'] = schoolData['num_students']
else:
schoolData['Population Within 10 km'] = computeSchoolCensus(schoolData,radius,popDataPath,verbose_census)
schoolData['Number of Pupils'] = np.ceil(studentPopRatio * schoolData['Population Within 10 km'] /(schoolData['Schools Within 10 km']+1))
schoolData['Number of Teachers'] = np.ceil(schoolData['Number of Pupils']/pjIn.demo['studentTeacherRatio'])
#Edge Cases
schoolData['Number of Teachers'][schoolData['Number of Teachers'] < 1] = 1 #Should be at least 1 teacher....
schoolData['Number of Teachers'][schoolData['Number of Pupils'] == 0] = 0 #Unless there are no students
schoolData['Number of Classrooms'] = np.ceil(schoolData['Number of Teachers'] * pjIn.demo['teacherClassroomRatio'])
print(schoolData['Number of Pupils'].describe())
print(schoolData['Number of Teachers'].describe())
#Compute local population (within 1 km) who will come to the school for internet
radiusLocalInternet = 1 #km
verbose_localpop = 0
nearestSchoolInternetData = findNumberOfNeighbors(schoolData,radiusLocalInternet,verbose_localpop)
schoolData['Schools Within 1 km'] = np.maximum(1,nearestSchoolInternetData['numPoints'])
#Divide population among local schools to not overcount bandwidth needs
schoolData['Population Within 1 km'] = computeSchoolCensus(schoolData,radiusLocalInternet,popDataPath,verbose_localpop)
schoolData['Local Population'] = schoolData['Population Within 1 km']/schoolData['Schools Within 1 km']
schoolData['Local Households'] = np.floor(schoolData['Local Population']/pjIn.demo['peoplePerHousehold'])
print(schoolData['Local Population'].describe())
#=======Compute Bandwidth
print("Computing Bandwidth")
for schoolIndex in schoolData.index:
#for schoolIndex in range(10):
print("Computing BW for:",schoolIndex)
verbose = 0
finalTechSelection = 0
result = []
specificSchoolData = schoolData.iloc[schoolIndex]
#Compute bandwidth
BWNom = computeBandwidth(specificSchoolData,pjIn,verbose)
schoolData['Bandwidth'][schoolIndex] = BWNom
#=======Figure out correct tech option -- to revise later
schoolData['Tech'][((schoolData['Bandwidth'] < techIn.cell4G['speed']) &
(schoolData['Tech'] == '') &
(schoolData['Type of Cell Coverage'] == '4G'))] = 'cell4G'
schoolData['Tech'][(schoolData['Distance to Nearest Fiber'] < 10) &
(schoolData['Tech'] == '')] = 'fiber'
schoolData['Tech'][((schoolData['Distance to Nearest Fiber'] >= 10) &
(schoolData['Distance to Nearest Fiber'] < 20))] = 'WISP'
schoolData['Tech'][((schoolData['Bandwidth'] < techIn.cell2G['speed']) &
(schoolData['Tech'] == '') &
(schoolData['Type of Cell Coverage'] == '2G'))] = 'cell2G'
schoolData['Tech'][((schoolData['Bandwidth'] < techIn.cell3G['speed']) &
(schoolData['Tech'] == '') &
(schoolData['Type of Cell Coverage'] == '3G'))] = 'cell3G'
schoolData['Tech'][(schoolData['Tech'] == '')] = 'satellite'
print(schoolData['Tech'].describe())
#=======Compute Costs
verbose_costs = 1
schoolData['Overnight Comms Cost'], schoolData['Annual Comms Cost'], schoolData['Overnight Power Cost'], schoolData['Annual Power Cost'] = computeCosts(schoolData,pjIn,techIn,techTypes,verbose_costs)
schoolData['Overnight Cost'] = schoolData['Overnight Comms Cost'] + schoolData['Overnight Power Cost']
schoolData['Annual Cost'] = schoolData['Annual Comms Cost'] + schoolData['Annual Power Cost']
#print(schoolData['Overnight Cost'])
schoolData.to_csv("school_output.csv")
# if sensitivity == 1:
# verbose = 0
#
# for dicts in dictionaries:
# for key, value in getattr(pjIn,dicts).items():
# if isinstance(value, list):
#
# ##########
# #Run for nominal
# BWNom = computePerformance(specificSchoolData,pjIn,verbose)
#
#
# ##########
# #Run for max input value
# holdvalue = value[0]
# value[0] = holdvalue * (1 + value[2])
# BWMaxInput = computePerformance(specificSchoolData,pjIn,verbose)
#
# ##########
# #Run for min input value
# value[0] = holdvalue * (1 - value[1])
#
# #Inputs should not be negative - clip and store
# if value[0] < 0:
# value[0] = 0
#
# BWMinInput = computePerformance(specificSchoolData,pjIn,verbose)
#
# #Return value from placeholder
# value[0] = holdvalue
#
#
#
# ##########Computation. Handle nan (ex. if project returns nothing)
#
# if np.isnan(BWMinInput):
# BWMax = BWMaxInput
# BWMin = -1
# else:
# BWMax = np.maximum(BWMaxInput,BWMinInput)
# BWMin = np.minimum(BWMaxInput,BWMinInput)
#
#
# result = result + [key,BWNom,BWMin,BWMax,(BWMax-BWMin)]
# #resultDF.append(result)
# result = np.reshape(result,(-1,5))
#
#
# resultDF = pd.DataFrame(result, columns=['Variable','BWNom','BWMin','BWMax','BWRange'])
# resultDF['BWNom'] = pd.to_numeric(resultDF['BWNom'], errors='ignore')
# resultDF['BWMin'] = pd.to_numeric(resultDF['BWMin'], errors='ignore')
# resultDF['BWMax'] = pd.to_numeric(resultDF['BWMax'], errors='ignore')
# resultDF['BWRange'] = pd.to_numeric(resultDF['BWRange'], errors='ignore')
# pd.options.display.float_format = '{:.2f}'.format
#
#
# resultDF = resultDF.sort_values(by=['BWRange'],ascending=False)
# #print(resultDF)
#Plot
#
# Y = np.arange(len(resultDF.index))
# range = list(resultDF["BWRange"].astype(np.float))
# range = np.array(range)
# start = list(resultDF["BWMin"].astype(np.float))
# start = np.array(start)
#
# plt.barh(Y,range, left=start)
#plt.show()
|
import os
import textwrap
import yaml
from cloudmesh.common.util import readfile
class Converter:
def __init__(self, filename=None, template=None):
# data/catalog/azure/bot_services.yaml"
if not os.path.exists(filename):
raise ValueError("file can not be found")
self.content = readfile(filename)
self.template_form = None
if template is not None:
self.template_form = readfile(template)
self.data = yaml.safe_load(self.content)
self.data["edit_url"] = "https://github.com/laszewsk/nist/blob/main/catalog/" + \
str(filename).split("catalog/")[1]
day, month, year = str(self.data["modified"]).split("-")
import calendar
self.data["label"] = "wrong"
self.data["title"] = self.data["name"]
self.data["year"] = year
self.data["month"] = calendar.month_abbr[int(month)].lower()
self.data["url"] = self.data["documentation"]
if "http" not in self.data["url"]:
raise ValueError("url not found")
def dedent(self, text):
return textwrap.dedent(text).strip() + "\n"
def template(self):
return self.dedent(self.template_form.format(**self.data))
def bibtex(self):
bibtex_entry = """
@misc{{{id},
title={{{title}}},
name={{{name}}},
author={{{author}}},
howpubllished={{Web Page}},
month = {month},
year = {{{year}}},
url = {{{url}}}
}}
"""
return self.dedent(bibtex_entry.format(**self.data))
def hugo_markdown(self):
for entry in ["tags", "categories"]:
self.data[entry] = "\n".join(["- " + value for value in self.data[entry]])
# description: {description}
# author: {author}
markdown_entry = textwrap.dedent("""
---
date: {modified}
title: {title}
tags:
{tags}
categories:
{categories}
linkTitle: MISSING
draft: False
github_url: {edit_url}
---
{{{{% pageinfo %}}}}
{description}
{{{{% /pageinfo %}}}}
## Description
{description}
## Version
{version}
## Documentation
{documentation}
## SLA
{sla}
## Data
{data}
""")
return self.dedent(markdown_entry.format(**self.data))
def markdown(self):
self.data["tags"] = ", ".join(self.data["tags"])
self.data["categories"] = ", ".join(self.data["categories"])
markdown_entry = """
# {title}
* Author: {author}
* Version: {version}
* Modified: {modified}
* Created: {created}
* <{documentation}>
* Tags: {tags}
* Categories: {categories}
## Description
{description}
## SLA
{sla}
## Data
{data}
"""
return self.dedent(markdown_entry.format(**self.data))
|
#!/usr/bin/python
import requests
import json
# Import modules for CGI handling
import cgi, cgitb
# Create instance of FieldStorage
form = cgi.FieldStorage()
# Get data from fields
user_title = form.getvalue('search_title')
print "Content-type: text/html\n\n";
# Setting attributes to send to Wikipedia API
baseurl = 'http://en.wikipedia.org/w/api.php'
search_atts = {}
search_atts['action'] = 'query'
search_atts['list'] = 'search'
search_atts['srwhat'] = 'text'
search_atts['format'] = 'json'
search_atts['srsearch'] = user_title
search_resp = requests.get(baseurl, params = search_atts)
search_data = search_resp.json()
title = search_data["query"]["search"][0]["title"]
# Make the title with no space which will be needed for making a url link to send for summary
title_w_no_space = ""
for i in title:
if i==" ":
title_w_no_space = title_w_no_space + "_"
else:
title_w_no_space = title_w_no_space + i
# Getting related topics using the result given by Wikipedia API
topics = []
for key in search_data["query"]["search"]:
topics.append (key["title"])
topics = topics [1:len(topics)]
# Summarizing the content:
# setting attributes for to send to Smmry API
link_for_smmry = 'https://en.wikipedia.org/wiki/' + title_w_no_space
smmry_base_url = 'http://api.smmry.com/'
#smmry_atts = {}
#smmry_atts ['SM_URL'] = 'https://en.wikipedia.org/wiki/Guyana'
#smmry_atts ['SM_API_KEY'] = '6F297A53E3' # represents your registered API key.
# Optional, X represents the webpage to summarize.
#smmry_atts ['SM_LENGTH'] = N # Optional, N represents the number of sentences returned, default is 7
#smmry_atts ['SM_KEYWORD_COUNT'] = N # Optional, N represents how many of the top keywords to return
#smmry_atts ['SM_QUOTE_AVOID'] # Optional, summary will not include quotations
#smmry_atts ['SM_WITH_BREAK'] # Optional, summary will contain string [BREAK] between each sentence
api_key_link = '&SM_API_KEY=6F297A53E3&SM_URL='
api_lenght = 'SM_LENGTH=7&SM_WITH_BREAK'
#print api_key_link
api_link = smmry_base_url + api_lenght + api_key_link + link_for_smmry
#smmry_resp = requests.get('http://api.smmry.com/&SM_API_KEY=6F297A53E3&SM_URL=https://en.wikipedia.org/wiki/Guyana')
smmry_resp = requests.get(api_link)
smmry_data = smmry_resp.json()
content= '<p>Try adding another key word.</p><a style="color:white;" id="backbtn" href="#" onclick="myFunction()" >Go back.</a>'
try:
content = smmry_data['sm_api_content']
except:
pass
content_with_non_ascii = ""
for word in content:
if ord(word) < 128:
content_with_non_ascii+=word
else:
content_with_non_ascii+= "?"
if len(content_with_non_ascii) >0:
content = content_with_non_ascii
# replacing "[BREAK]"s with a new line
while "[BREAK]" in content:
length = len (content)
break_position = content.find("[BREAK]")
content = content [0:break_position] + "<br><br>" + content [break_position+7: length]
print '<div id="all-cont-alt"><div class="select-nav"><div id="nav-top-main"><a id="backbtn" href="#" onclick="myFunction()" ><i style=" position: relative;margin-left: 25px;background-color: #00cfb9;padding: 13px;top: 74px;border-radius: 16px;color: #ffffff;text-align: left;" class= "fa fa-chevron-left fa-2x"></i></a><h1>Geddit</h1></div></div>'
print '<div id="loaddddd"></div><div id="contentss">'
print '<h1 id="user-title">'
print user_title
print "</h1>"
print content
print '</div></div>'
print '<h3 class="related">Related Topics</h3>'
print '<div id="rel-holder">'
for key in topics:
if all(ord(c) < 128 for c in key):
print '<h5 class="related-topics" onclick="relatedFunction();">'
print key
print '</h5>'
else:
pass
print '</div>'
|
import requests
import json
import os
from dotenv import load_dotenv
load_dotenv(verbose=True)
class RestClient:
def __init__(self):
self.base_url = os.getenv("BASE_URL")
self.token = None
def auth(self):
if (self.token):
return self.token
url = f"{self.base_url}/oauth/token"
payload = {
'client_id': os.getenv('CLIENT_ID'),
'grant_type': 'password',
'username': os.getenv('USER_NAME'),
'password': os.getenv('PASSWORD')
}
files = []
headers = {}
response = requests.request("POST",
url,
headers=headers,
data=payload,
files=files)
print(response.text)
self.token = json.loads(response.text)['access_token']
return self.token
def get_collection(self, collection):
url = f"{self.base_url}/{collection}"
payload = {}
headers = {'Authorization': f'Bearer {self.auth()}'}
result = []
while True:
response = requests.request("GET",
url,
headers=headers,
data=payload)
print(response.text)
resp_json = json.loads(response.text)
result.extend(resp_json["_items"])
if not 'next' in resp_json['_links']:
break
url = f"{self.base_url}/{resp_json['_links']['next']['href']}"
return result
# print(json.dumps(result, indent=4, sort_keys=True))
def post_collection(self, collection, list):
url = f"{self.base_url}/{collection}"
print(f"Trying to post to {url}")
payload = list
headers = {
'Authorization': f'Bearer {self.auth()}',
'Content-Type': 'application/json'
}
# print(json.dumps(payload, indent=4, sort_keys=True))
response = requests.request("POST",
url,
headers=headers,
data=json.dumps(payload))
print(response.text)
return [obj["_id"] for obj in json.loads(response.text)["_items"]]
def delete_collection(self, collection):
print(f"Deleting {collection}")
for obj in self.get_collection(collection):
url = f"{self.base_url}/{collection}/{obj['_id']}"
headers = {
'Authorization': f'Bearer {self.auth()}',
'If-Match': obj['_etag']
}
response = requests.request("DELETE",
url,
headers=headers,
data={})
if response.text:
print(response.text)
|
import os
from django.contrib.gis.db import models
from django.conf import settings
class Layer(models.Model):
name = models.CharField(max_length=50,unique=True)
def __unicode__(self):
return u"%s" % (self.name)
class Feature(models.Model):
fid = models.AutoField(primary_key=True)
layer = models.ForeignKey(Layer)
geom = models.PolygonField(srid=4326)
objects = models.GeoManager()
def __unicode__(self):
return u"Feature %d of layer %s" % (self.fid, self.layer)
class Attribute(models.Model):
key = models.CharField(max_length=50)
value = models.TextField()
feature = models.ForeignKey(Feature)
def __unicode__(self):
return u"%s::%s" % (self.key,self.value)
rastdir = os.path.abspath(os.path.join(settings.MEDIA_ROOT, "xyquery_rasters"))
if not os.path.exists(rastdir):
os.mkdir(rastdir)
class Raster(models.Model):
layer = models.ForeignKey(Layer)
filepath = models.FilePathField(path=rastdir, recursive=True)
def __unicode__(self):
return u"Raster layer %s" % (self.layer)
|
'''
Read and write data to a Lego Mindstorm NXT brick using serial bluetooth
connection. You'll need to modify __init__ for unix style serial port
identification in order to use this on Linux.
Blue enables raw byte transfer
TypeBlue utilizes NXT mailbox number for type identification.
Usage:
1. Enable a bluetooth serial port to accept connection requests from NXT.
2. Find and connect computer from NXT bluetooth menu. Note serial port
number; store in comport_num.
3. From python try this code, note the try finally and make sure the connection
is established so that you are not waiting all the time for timeouts! It is
a real pain getting the comport back from a dropped connection.
import blueNXT
try:
b = blueNXT.TypeBlue(comport_num)
b.put('Hello NXT!')
b.putall(False, True, 1, 'two')
b.get()
finally:
b.close()
4. Write an interface to remote control your robots and share!
'''
__author__ = 'Justin Shaw'
import sys
import serial
import struct
import time
class Blue:
'''
A bluetooth connection to a Lego NXT brick
'''
huh = struct.pack('h', 2432) # don't really know what this is
def __init__(self, comport=9, filename=None, mode='r', timeout=10):
'''
comport - integer com number for serial port
filename and mode are for debug
'''
if filename is None:
self.s = serial.Serial('COM%d' % comport, timeout=timeout)
else:
self.s = open(filename, mode)
def get(self):
'''
Return payload, payload
Get next message from NXT, return un-molested payload i.e. bytes.
Use get_int() for integers and get_bool() for booleans
'''
sz = self.s.read(2)
payload = None
box = None
if len(sz) == 2:
sz = struct.unpack('h', sz)[0]
# print 'sz', sz
if 0 < sz < 1000:
msg = self.s.read(sz)
# print 'msg', msg
dat = msg[:4]
# for c in dat:
# print ord(c)
# print struct.unpack('h', msg[:2])
box = ord(dat[2]) + 1
payload = msg[4:-1]
return payload, box
def put(self, payload, box=1):
'''
Send a raw message to NXT
payload -- bytes to send
box -- 1 to 10, which mail box on NXT to place message in
'''
# sz msg----> 0
# 0123456789 ... n
payload += chr(0)
pl_sz = len(payload)
sz = pl_sz + 4
header = struct.pack('h2sbb', sz, self.huh, box - 1, pl_sz)
out = struct.pack('6s%ds' % pl_sz, header, payload)
# print 'out', out
dat = out[2:6]
# for c in dat:
# print ord(c)
# print
# self.s.write('\x11\x00\x80\t\x00\r<0123456789>\x00')
self.s.write(out)
def __del__(self):
try:
self.close()
except:
pass
def close(self):
self.s.close()
class TypeBlue(Blue):
'''
Use mailbox number for type information:
1 -- string
2 -- int
3 -- bool
else -- string
'''
def get(self):
'''
Get a message off port. Determine type from box number:
1 -- string
2 -- int
3 -- bool
'''
msg, box = Blue.get(self)
if box == 2:
out = struct.unpack('i', msg)[0]
elif box == 3:
out = not not(ord(msg))
else:
out = msg
return out
def put(self, val):
'''
Put a message on port. Use box to indicate type:
1 -- string
2 -- int
3 -- bool
'''
if type(val) == type(''):
msg = val
box = 1
elif type(val) == type(0):
msg = struct.pack('i', val)
box = 2
elif type(val) == type(False):
msg = struct.pack('b', not not val)
box = 3
return Blue.put(self, msg, box)
def putall(self, *vals):
'''
Send several values to NXT
'''
for v in vals:
self.put(v)
def Blue__test__():
'''
Test that the formats are consistant by reading and writing
to a file. No real bluetooth required.
'''
# read
b = Blue(filename='text.dat')
target = '<0123456789>'
for i in range(10):
msg, box = b.get()
assert msg == target, '%s != %s' % (msg, target)
# write
b = Blue(filename='junk', mode='wb')
b.put(target, 2)
b = Blue(filename='junk')
got, box = b.get()
assert box == 2
assert got == target, '%s != %s' % (got, target)
b = Blue(filename='num.dat')
# type
b = TypeBlue(filename='junk', mode='wb')
b.put(target)
b.put(1)
b.put(False)
b = TypeBlue(filename='junk')
got = b.get()
assert got == target
got = b.get()
assert got == 1
got = b.get()
assert got == False
def tblue():
'''
Real bluetooth test.
'''
try:
b = TypeBlue('COM10')
for i in range(20):
## only uncomment these if you have the NXT code sending data!
# print b.get()
# print b.get()
# print b.get()
# b.put(42)
# b.put(False)
b.put('HERE % d' % i)
b.put(i)
if i < 10:
b.put(False)
else:
b.put(True)
time.sleep(.25)
finally:
del b
# tblue()
# Blue__test__()
|
import ctypes
so10 = ctypes.CDLL("./so10.so")
def c_array(values):
ArrayType = ctypes.c_double * len(values)
return ArrayType(*values)
so10.average.restype = ctypes.c_double
v1 = []
print(so10.average(c_array(v1), len(v1)))
v2 = [1]
print(so10.average(c_array(v2), len(v2)))
v3 = [1, 2]
print(so10.average(c_array(v3), len(v3)))
v4 = [1, 2, 3, 4]
print(so10.average(c_array(v4), len(v4)))
|
N,i=int(input()),1
while N>1:
if N%2: N=3*N+1
else: N//=2
i+=1
print(i)
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\situations\situation_simple.py
# Compiled at: 2017-05-24 04:11:51
# Size of source mod 2**32: 10990 bytes
from sims4.tuning.tunable import TunableList, TunableTuple
from situations.situation import Situation
from situations.tunable import TunableSituationPhase, TunableSituationCondition
import alarms, clock, interactions.utils.exit_condition_manager, services, sims4.log
logger = sims4.log.Logger('Situations')
class SituationSimple(Situation):
INSTANCE_TUNABLES = {'_phases':TunableList(tunable=TunableSituationPhase(description='\n Situation reference.\n ')),
'_exit_conditions':TunableList(description='\n A list of condition groups of which if any are satisfied, the group is satisfied.\n ',
tunable=TunableTuple(conditions=TunableList(description='\n A list of conditions that all must be satisfied for the\n group to be considered satisfied.\n ',
tunable=TunableSituationCondition(description='\n A condition for a situation or single phase.\n '))))}
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self._phase = None
self._phase_index = -1
self._exit_condition_manager = interactions.utils.exit_condition_manager.ConditionalActionManager()
self._phase_exit_condition_manager = interactions.utils.exit_condition_manager.ConditionalActionManager()
self._phase_duration_alarm_handle = None
def _destroy(self):
self._remove_exit_conditions()
self._remove_phase_exit_conditions()
super()._destroy()
def _initialize_situation_jobs(self):
initial_phase = self.get_initial_phase_type()
for job_tuning in initial_phase.jobs_gen():
self._add_job_type(job_tuning[0], job_tuning[1])
def start_situation(self):
super().start_situation()
self._attach_exit_conditions()
self._transition_to_next_phase()
def _load_situation_states_and_phases(self):
super()._load_situation_states_and_phases()
self._attach_exit_conditions()
self._load_phase()
def _save_custom(self, seed):
super()._save_custom(seed)
remaining_time = 0 if self._phase_duration_alarm_handle is None else self._phase_duration_alarm_handle.get_remaining_time().in_minutes()
seed.add_situation_simple_data(self._phase_index, remaining_time)
return seed
@classmethod
def should_load_after_time_jump(cls, seed):
elapsed_time = services.current_zone().time_elapsed_since_last_save().in_minutes()
if elapsed_time > seed.duration_override:
return False
seed.duration_override -= elapsed_time
return True
def on_time_jump(self):
elapsed_time = services.current_zone().time_elapsed_since_last_save()
while True:
if self._phase_duration_alarm_handle is None:
break
phase_duration = self._phase_duration_alarm_handle.get_remaining_time()
if elapsed_time > phase_duration:
elapsed_time -= phase_duration
self._transition_to_next_phase()
else:
phase_duration -= elapsed_time
self._remove_phase_exit_conditions()
self._attach_phase_exit_conditions(duration_override=(phase_duration.in_minutes()))
break
return True
@classmethod
def _verify_tuning_callback(cls):
super()._verify_tuning_callback()
if len(cls._phases) == 0:
logger.error('Simple Situation {} has no tuned phases.', cls, owner='sscholl')
if cls._phases[(len(cls._phases) - 1)].get_duration() != 0:
logger.error('Situation {} last phase does not have a duration of 0.', cls, owner='sscholl')
@classmethod
def get_tuned_jobs(cls):
job_list = []
initial_phase = cls.get_initial_phase_type()
for job in initial_phase.jobs_gen():
job_list.append(job[0])
return job_list
@classmethod
def get_initial_phase_type(cls):
return cls._phases[0]
@classmethod
def get_phase(cls, index):
if cls._phases == None or index >= len(cls._phases):
return
return cls._phases[index]
def _transition_to_next_phase(self, conditional_action=None):
new_index = self._phase_index + 1
new_phase = self.get_phase(new_index)
logger.debug('Transitioning from phase {} to phase {}', self._phase_index, new_index)
self._remove_phase_exit_conditions()
self._phase_index = new_index
self._phase = new_phase
self._attach_phase_exit_conditions()
for job_type, role_state_type in new_phase.jobs_gen():
self._set_job_role_state(job_type, role_state_type)
client = services.client_manager().get_first_client()
if client:
output = sims4.commands.AutomationOutput(client.id)
if output:
output('SituationPhaseTransition; Phase:{}'.format(new_index))
def _load_phase(self):
seedling = self._seed.situation_simple_seedling
logger.debug('Loading phase {}', seedling.phase_index)
self._phase_index = seedling.phase_index
self._phase = self.get_phase(self._phase_index)
self._attach_phase_exit_conditions(seedling.remaining_phase_time)
def get_phase_state_name_for_gsi(self):
return str(self._phase_index)
def _attach_phase_exit_conditions(self, duration_override=None):
self._phase_exit_condition_manager.attach_conditions(self, self._phase.exit_conditions_gen(), self._transition_to_next_phase)
duration = duration_override if duration_override is not None else self._phase.get_duration()
if duration != 0:
self._phase_duration_alarm_handle = alarms.add_alarm(self, clock.interval_in_sim_minutes(duration), self._transition_to_next_phase)
def _remove_phase_exit_conditions(self):
self._phase_exit_condition_manager.detach_conditions(self)
if self._phase_duration_alarm_handle is not None:
alarms.cancel_alarm(self._phase_duration_alarm_handle)
self._phase_duration_alarm_handle = None
def _attach_exit_conditions(self):
self._remove_exit_conditions()
self._exit_condition_manager.attach_conditions(self, self.exit_conditions_gen(), self._situation_ended_callback)
def _remove_exit_conditions(self):
self._exit_condition_manager.detach_conditions(self)
def exit_conditions_gen(self):
for ec in self._exit_conditions:
yield ec
def _situation_ended_callback(self, conditional_action=None):
logger.debug('Situation exit condition met: {}', self)
self._self_destruct()
|
import os
import numpy as np
from datetime import datetime
from math import *
# import jdutil
import bisect
import pdb
from taskinit import *
try:
from astropy.io import fits as pyfits
except:
try:
import pyfits
except ImportError:
raise ImportError('Neither astropy nor pyfits exists in this CASA installation')
# from astropy.constants import R_sun, au
def read_horizons(ephemfile=None):
# inputs:
# ephemfile:
# OBSERVER output from JPL Horizons for topocentric coordinates with for example
# target=Sun, observer=VLA=-5
# extra precision, quantities 1,17,20, REFRACTION
# routine goes through file to find $$SOE which is start of ephemeris and ends with $$EOE
# outputs: a Python dictionary containing the following:
# timestr: date and time as a string
# time: modified Julian date
# ra: right ascention, in rad
# dec: declination, in rad
# rastr: ra in string
# decstr: dec in string
# p0: solar p angle, CCW with respect to the celestial north pole
# delta: distance from the disk center to the observer, in AU
# delta_dot: time derivative of delta, in the light of sight direction. Negative means it is moving toward the observer
#
if not ephemfile or ephemfile.isspace():
raise ValueError, 'Please specify input ephem file!'
if not os.path.isfile(ephemfile):
raise ValueError, 'The specified input ephem file does not exist!'
# initialize the return dictionary
ephem0 = dict.fromkeys(['time', 'timestr', 'ra', 'dec', 'rastr', 'decstr', 'delta', 'delta_dot', 'p0'])
f = open(ephemfile, 'rU') # force the universal newline mode
lines = f.readlines()
f.close()
nline = len(lines)
for i in range(nline):
line = lines[i]
if line[0:5] == '$$SOE': # start recording
istart = i + 1
if line[0:5] == '$$EOE': # end recording
iend = i
newlines = lines[istart:iend]
nrec = len(newlines)
ephem_ = []
for j in range(nrec):
line = newlines[j]
dt = datetime.strptime(line[1:18], '%Y-%b-%d %H:%M')
newdtstr = dt.strftime('%Y-%m-%dT%H:%M')
ephem0['timestr'] = newdtstr
mjd = qa.quantity(newdtstr)
# jd=jdutil.datetime_to_jd(dt)
# mjd=jdutil.jd_to_mjd(jd)
ephem0['time'] = mjd
ephem0['rastr'] = line[23:36]
ephem0['decstr'] = line[37:50]
ephem0['ra'] = {'unit': 'rad', 'value': radians(
(long(line[23:25]) + long(line[26:28]) / 60. + float(line[29:36]) / 3600.) * 15.)} # in rad
ephem0['dec'] = {'unit': 'rad', 'value': radians(
long(line[38:40]) + long(line[41:43]) / 60. + float(line[44:50]) / 3600.)} # in rad
ephem0['p0'] = {'unit': 'deg', 'value': float(line[51:59])}
ephem0['delta'] = {'unit': 'au', 'value': float(line[70:86])}
ephem0['delta_dot'] = {'unit': 'km/s', 'value': float(line[88:98])}
if line[37] == '-':
ephem0['dec']['value'] = -ephem0['dec']['value']
ephem_.append(ephem0.copy())
# convert list of dictionary to a dictionary of arrays
times = [ep['time']['value'] for ep in ephem_]
ras = [ep['ra']['value'] for ep in ephem_]
decs = [ep['dec']['value'] for ep in ephem_]
p0s = [ep['p0']['value'] for ep in ephem_]
deltas = [ep['delta']['value'] for ep in ephem_]
ephem = {'times': times, 'ras': ras, 'decs': decs, 'p0s': p0s, 'deltas': deltas}
return ephem
def read_msinfo(msfile=None, msinfofile=None):
# read MS information #
msinfo = dict.fromkeys(['msfile', 'scans', 'fieldids', 'btimes', 'btimestr', 'inttimes', 'ras', 'decs'])
ms.open(msfile)
scans = ms.getscansummary()
scanids = sorted(scans.keys(), key=lambda x: int(x))
nscanid = len(scanids)
btimes = []
btimestr = []
etimes = []
fieldids = []
inttimes = []
dirs = []
ras = []
decs = []
for i in range(nscanid):
btimes.append(scans[scanids[i]]['0']['BeginTime'])
etimes.append(scans[scanids[i]]['0']['EndTime'])
fieldid = scans[scanids[i]]['0']['FieldId']
fieldids.append(fieldid)
dir = ms.getfielddirmeas('PHASE_DIR', fieldid)
dirs.append(dir)
ras.append(dir['m0'])
decs.append(dir['m1'])
inttimes.append(scans[scanids[i]]['0']['IntegrationTime'])
ms.close()
btimestr = [qa.time(qa.quantity(btimes[i], 'd'), form='fits', prec=10)[0] for i in range(nscanid)]
msinfo['msfile'] = msfile
msinfo['scans'] = scans
msinfo['fieldids'] = fieldids
msinfo['btimes'] = btimes
msinfo['btimestr'] = btimestr
msinfo['inttimes'] = inttimes
msinfo['ras'] = ras
msinfo['decs'] = decs
if msinfofile:
np.savez(msinfofile, msfile=msfile, scans=scans, fieldids=fieldids, btimes=btimes, btimestr=btimestr,
inttimes=inttimes, ras=ras, decs=decs)
return msinfo
def ephem_to_helio(msinfo=None, ephem=None, reftime=None, polyfit=None):
## 1. Take a solar ms database, read the scan and field information, find out the pointings (in RA and DEC)
## 2. Compare with the ephemeris of the solar disk center (in RA and DEC)
## 3. Generate VLA pointings in heliocentric coordinates
## inputs:
## msinfo: CASA MS information, output from read_msinfo
## ephem: solar ephem, output from read_horizons
## reftime: list of reference times (e.g., used for imaging)
# CASA standard time format, either a single time (e.g., '2012/03/03/12:00:00'
## or a time range (e.g., '2012/03/03/12:00:00~2012/03/03/13:00:00'. If the latter,
## take the midpoint of the timerange for reference. If no date specified, take
## the date of the first scan
# polyfit: ONLY works for MS database with only one source with continously tracking; not recommanded unless scan length is too long and want to have very high accuracy
## return value:
## helio: a list of VLA pointing information
## reftimestr: reference time, in FITS format string
## reftime: reference time, in mjd format
## ra: actual RA of VLA pointing at the reference time (interpolated)
## dec: actual DEC of VLA pointing at the reference time (interpolated)
## # CASA knows only RA and DEC of the closest field (e.g. in clean) #
## ra_fld: right ascention of the CASA reference pointing direction
## dec_fld: declination of the CASA reference pointing direction
## raoff: RA offset of the actual VLA pointing to solar center
## decoff: DEC offset of the actual VLA pointing to solar center
## refx: heliocentric X offset of the actual VLA pointing to solar center
## refy: heliocentric Y offset of the actual VLA pointing to solar center
######## Example #########
# msfile='sun_C_20140910T221952-222952.10s.cal.ms'
# ephemfile='horizons_sun_20140910.radecp'
# ephem=vla_prep.read_horizons(ephemfile=ephemfile)
# msinfo=vla_prep.read_msinfo(msfile=msfile)
# polyfit=0
# reftime = '22:25:20~22:25:40'
if not ephem:
raise ValueError, 'Please provide information of the MS database!'
if not msinfo:
raise ValueError, 'Please provide information of the MS database!'
if isinstance(msinfo, str):
try:
msinfo0 = np.load(msinfo)
except:
raise ValueError, 'The specified input msinfo file does not exist!'
elif isinstance(msinfo, dict):
msinfo0 = msinfo
else:
raise ValueError, 'msinfo should be either a numpy npz or a dictionary'
print 'msinfo is derived from: ', msinfo0['msfile']
scans = msinfo0['scans']
fieldids = msinfo0['fieldids']
btimes = msinfo0['btimes']
inttimes = msinfo0['inttimes']
ras = msinfo0['ras']
decs = msinfo0['decs']
ra_rads = [ra['value'] for ra in ras]
dec_rads = [dec['value'] for dec in decs]
# fit 2nd order polynomial fits to the RAs and DECs #
if polyfit:
cra = np.polyfit(btimes, ra_rads, 2)
cdec = np.polyfit(btimes, dec_rads, 2)
# find out pointing direction according to the input time or timerange #
if not reftime:
raise ValueError, 'Please specify a reference time for pointing/imaging!'
if isinstance(reftime, str):
reftime = [reftime]
if (not isinstance(reftime, list)):
print 'input "reftime" is not a valid list. Abort...'
nreftime = len(reftime)
helio = []
for reftime0 in reftime:
helio0 = dict.fromkeys(
['reftimestr', 'reftime', 'ra', 'dec', 'ra_fld', 'dec_fld', 'raoff', 'decoff', 'refx', 'refy', 'p0'])
helio0['reftimestr'] = reftime0
if '~' in reftime0:
# if reftime0 is specified as a timerange
[tbg0, tend0] = reftime0.split('~')
tbg_d = qa.getvalue(qa.convert(qa.totime(tbg0), 'd'))[0]
tend_d = qa.getvalue(qa.convert(qa.totime(tend0), 'd'))[0]
tdur_s = (tend_d - tbg_d) * 3600. * 24.
# if no date is specified, add up the date of the first scan
if tend_d < 1.:
if tend_d >= tbg_d:
tend_d += int(btimes[0])
else:
tend_d += int(btimes[0]) + 1
if tbg_d < 1.:
tbg_d += int(btimes[0])
tref_d = (tbg_d + tend_d) / 2.
else:
# if reftime0 is specified as a single value
tref_d = qa.getvalue(qa.convert(qa.totime(reftime0), 'd'))
# if no date is specified, add up the date of the first scan
if tref_d < 1.:
tref_d += int(btimes[0])
tbg_d = tref_d
# use the intergration time
ind = bisect.bisect_left(btimes, tref_d)
tdur_s = inttims[ind - 1]
helio0['reftime'] = tref_d
helio0['date-obs'] = qa.time(qa.quantity(tbg_d, 'd'), form='fits', prec=10)[0]
helio0['exptime'] = tdur_s
# find out RA and DEC coords according to the reference time
# if polyfit, then use the 2nd order polynomial coeffs
ind = bisect.bisect_left(btimes, tref_d)
if ind > 1:
dt = tref_d - btimes[ind - 1]
if ind < len(btimes):
scanlen = btimes[ind] - btimes[ind - 1]
(ra_b, ra_e) = (ras[ind - 1]['value'], ras[ind]['value'])
(dec_b, dec_e) = (decs[ind - 1]['value'], decs[ind]['value'])
if ind >= len(btimes):
scanlen = btimes[ind - 1] - btimes[ind - 2]
(ra_b, ra_e) = (ras[ind - 2]['value'], ras[ind - 1]['value'])
(dec_b, dec_e) = (decs[ind - 2]['value'], decs[ind - 1]['value'])
if ind == 1: # only one scan exists (e.g., imported from AIPS)
ra_b = ras[ind - 1]['value']
ra_e = ra_b
dec_b = decs[ind - 1]['value']
dec_e = dec_b
scanlen = 10. # radom value
dt = 0.
if ind < 1:
raise ValueError, 'Reference time does not fall into the scan list!'
if polyfit:
ra = cra[0] * tref_d ** 2. + cra[1] * tref_d + cra[2]
dec = cdec[0] * tref_d ** 2. + cdec[1] * tref_d + cdec[2]
# if not, use linearly interpolated RA and DEC at the beginning of this scan and next scan
else:
ra = ra_b + (ra_e - ra_b) / scanlen * dt
dec = dec_b + (dec_e - dec_b) / scanlen * dt
if ra < 0:
ra += 2. * np.pi
if ra_b < 0:
ra_b += 2. * np.pi
# compare with ephemeris from JPL Horizons
time0s = ephem['times']
ra0s = ephem['ras']
dec0s = ephem['decs']
p0s = ephem['p0s']
delta0s = ephem['deltas']
ind = bisect.bisect_left(time0s, tref_d)
dt0 = time0s[ind] - time0s[ind - 1]
dt_ref = tref_d - time0s[ind - 1]
dra0 = ra0s[ind] - ra0s[ind - 1]
ddec0 = dec0s[ind] - dec0s[ind - 1]
dp0 = p0s[ind] - p0s[ind - 1]
ddelta0 = delta0s[ind] - delta0s[ind - 1]
ra0 = ra0s[ind - 1] + dra0 / dt0 * dt_ref
dec0 = dec0s[ind - 1] + ddec0 / dt0 * dt_ref
p0 = p0s[ind - 1] + dp0 / dt0 * dt_ref
delta0 = delta0s[ind - 1] + ddelta0 / dt0 * dt_ref
if ra0 < 0:
ra0 += 2. * np.pi
# RA and DEC offset in arcseconds
decoff = degrees((dec - dec0)) * 3600.
raoff = degrees((ra - ra0) * cos(dec)) * 3600.
# Convert into heliocentric offsets
prad = -radians(p0)
refx = (-raoff) * cos(prad) - decoff * sin(prad)
refy = (-raoff) * sin(prad) + decoff * cos(prad)
helio0['ra'] = ra # ra of the actual pointing
helio0['dec'] = dec # dec of the actual pointing
helio0['ra_fld'] = ra_b # ra of the field, used as the reference in e.g., clean
helio0['dec_fld'] = dec_b # dec of the field, used as the refenrence in e.g., clean
helio0['raoff'] = raoff
helio0['decoff'] = decoff
helio0['refx'] = refx
helio0['refy'] = refy
helio0['p0'] = p0
# helio['r_sun']=np.degrees(R_sun.value/(au.value*delta0))*3600. #in arcsecs
helio.append(helio0)
return helio
def getbeam(imagefile=None, beamfile=None):
if not imagefile:
raise ValueError, 'Please specify input images'
bmaj = []
bmin = []
bpa = []
beamunit = []
bpaunit = []
chans = []
nimg = len(imagefile)
for n in range(nimg):
img = imagefile[n]
if not os.path.exists(img):
raise ValueError, 'The input image does not exist!'
ia.open(img)
sum = ia.summary()
bmaj_ = []
bmin_ = []
bpa_ = []
if sum.has_key('perplanebeams'): # beam vary with frequency
nbeams = sum['perplanebeams']['nChannels']
beams = sum['perplanebeams']['beams']
chans_ = [key[1:] for key in beams.keys()]
chans_.sort(key=float)
for chan in chans_:
bmaj0 = beams['*' + chan]['*0']['major']['value']
bmaj_.append(bmaj0)
bmin0 = beams['*' + chan]['*0']['minor']['value']
bmin_.append(bmin0)
bpa0 = beams['*' + chan]['*0']['positionangle']['value']
bpa_.append(bpa0)
beamunit_ = beams['*' + chans_[0]]['*0']['major']['unit']
bpaunit_ = beams['*' + chans_[0]]['*0']['positionangle']['unit']
if sum.has_key('restoringbeam'): # only one beam
bmaj_.append(sum['restoringbeam']['major']['value'])
bmin_.append(sum['restoringbeam']['minor']['value'])
bpa_.append(sum['restoringbeam']['positionangle']['value'])
beamunit_ = sum['restoringbeam']['major']['unit']
bpaunit_ = sum['restoringbeam']['positionangle']['unit']
nbeams = 1
chans_ = [0]
bmaj.append(bmaj_)
bmin.append(bmin_)
bpa.append(bpa_)
beamunit.append(beamunit_)
bpaunit.append(bpaunit_)
chans.append(chans_)
if beamfile: # write beams to ascii file
print 'Writing beam info to ascii file...'
f = open(beamfile, 'w')
f.write('CHANNEL No., BMAJ (' + beamunit[0] + '), BMIN (' + beamunit[0] + '), BPA (' + bpaunit[0] + ')')
f.write("\n")
for n in range(nimg):
f.write('----For image: ' + imagefile[n] + '----')
f.write('\n')
chans_ = chans[n]
for i in range(len(chans_)):
f.write(str(chans_[i]) + ', ' + str(bmaj[n][i]) + ', ' + str(bmin[n][i]) + ', ' + str(bpa[n][i]))
f.write("\n")
f.close()
return bmaj, bmin, bpa, beamunit, bpaunit
def imreg(imagefile=None, fitsfile=None, beamfile=None, helio=None, offsetfile=None, toTb=None, scl100=None,
verbose=False):
if not imagefile:
raise ValueError, 'Please specify input image'
if not helio:
raise ValueError, 'Please specify input coordinate info for image registration. Use ephem_to_helio to derive that info'
if not fitsfile:
fitsfile = [img + '.fits' for img in imagefile]
# if len(imagefile) != len(helio):
# raise ValueError, 'Number of input images does not equal to number of helio coord headers!'
if len(imagefile) != len(fitsfile):
raise ValueError, 'Number of input images does not equal to number of output fits files!'
# get restoring beam info
(bmajs, bmins, bpas, beamunits, bpaunits) = getbeam(imagefile=imagefile, beamfile=beamfile)
nimg = len(imagefile)
if verbose:
print str(nimg) + ' images to process...'
for n in range(nimg):
if verbose:
print 'processing image #' + str(n)
img = imagefile[n]
fitsf = fitsfile[n]
hel = helio[n]
bmaj = bmajs[n]
bmin = bmins[n]
beamunit = beamunits[n]
if not os.path.exists(img):
raise ValueError, 'Please specify input image'
if os.path.exists(fitsf):
raise ValueError, 'Specified fits file already exists!'
else:
p0 = hel['p0']
ia.open(img)
imr = ia.rotate(pa=str(-p0) + 'deg')
imr.tofits(fitsf, history=False)
imr.close()
sum = ia.summary()
ia.close()
# construct the standard fits header
# RA and DEC of the reference pixel crpix1 and crpix2
(imra, imdec) = (sum['refval'][0], sum['refval'][1])
# find out the difference of the image center to the CASA reference center
# RA and DEC difference in arcseconds
ddec = degrees((imdec - hel['dec_fld'])) * 3600.
dra = degrees((imra - hel['ra_fld']) * cos(hel['dec_fld'])) * 3600.
# Convert into image heliocentric offsets
prad = -radians(hel['p0'])
dx = (-dra) * cos(prad) - ddec * sin(prad)
dy = (-dra) * sin(prad) + ddec * cos(prad)
if offsetfile:
try:
offset = np.load(offsetfile)
except:
raise ValueError, 'The specified offsetfile does not exist!'
reftimes_d = offset['reftimes_d']
xoffs = offset['xoffs']
yoffs = offset['yoffs']
timg_d = hel['reftime']
ind = bisect.bisect_left(reftimes_d, timg_d)
xoff = xoffs[ind - 1]
yoff = yoffs[ind - 1]
else:
xoff = hel['refx']
yoff = hel['refy']
if verbose:
print 'offset of image phase center to visibility phase center (arcsec): ', dx, dy
print 'offset of visibility phase center to solar disk center (arcsec): ', xoff, yoff
(crval1, crval2) = (xoff + dx, yoff + dy)
# update the fits header to heliocentric coordinates
hdu = pyfits.open(fitsf, mode='update')
header = hdu[0].header
(cdelt1, cdelt2) = (
-header['cdelt1'] * 3600., header['cdelt2'] * 3600.) # Original CDELT1, 2 are for RA and DEC in degrees
header['cdelt1'] = cdelt1
header['cdelt2'] = cdelt2
header['cunit1'] = 'arcsec'
header['cunit2'] = 'arcsec'
header['crval1'] = crval1
header['crval2'] = crval2
header['ctype1'] = 'HPLN-TAN'
header['ctype2'] = 'HPLT-TAN'
header['date-obs'] = hel['date-obs']
try:
# this works for pyfits version of CASA 4.7.0 but not CASA 4.6.0
header.update('exptime', hel['exptime'])
header.update('p_angle', hel['p0'])
except:
# this works for astropy.io.fits
header.append(('exptime', hel['exptime']))
header.append(('p_angle', hel['p0']))
# header.update('comment', 'Fits header updated to heliocentric coordinates by Bin Chen')
# update intensity units, i.e. to brightness temperature?
if toTb:
data = hdu[0].data # remember the data order is reversed due to the FITS convension
dim = data.ndim
sz = data.shape
keys = header.keys()
values = header.values()
# which axis is frequency?
faxis = keys[values.index('FREQ')][-1]
faxis_ind = dim - int(faxis)
if header['BUNIT'].lower() == 'jy/beam':
header['BUNIT'] = 'K'
for i in range(sz[faxis_ind]):
nu = header['CRVAL' + faxis] + header['CDELT' + faxis] * (i + 1 - header['CRPIX' + faxis])
if header['CUNIT' + faxis] == 'KHz':
nu *= 1e3
if header['CUNIT' + faxis] == 'MHz':
nu *= 1e6
if header['CUNIT' + faxis] == 'GHz':
nu *= 1e9
if len(bmaj) > 1: # multiple (per-plane) beams
bmajtmp = bmaj[i]
bmintmp = bmin[i]
else: # one single beam
bmajtmp = bmaj[0]
bmintmp = bmin[0]
if beamunit == 'arcsec':
bmaj0 = np.radians(bmajtmp / 3600.)
bmin0 = np.radians(bmajtmp / 3600.)
if beamunit == 'arcmin':
bmaj0 = np.radians(bmajtmp / 60.)
bmin0 = np.radians(bmintmp / 60.)
if beamunit == 'deg':
bmaj0 = np.radians(bmajtmp)
bmin0 = np.radians(bmintmp)
if beamunit == 'rad':
bmaj0 = bmajtmp
bmin0 = bmintmp
beam_area = bmaj0 * bmin0 * np.pi / (4. * log(2.))
k_b = qa.constants('k')['value']
c_l = qa.constants('c')['value']
factor = 2. * k_b * nu ** 2 / c_l ** 2 # SI unit
jy_to_si = 1e-26
# print nu/1e9, beam_area, factor
factor2 = 1.
if scl100:
factor2 = 100.
if faxis == '3':
data[:, i, :, :] *= jy_to_si / beam_area / factor * factor2
if faxis == '4':
data[i, :, :, :] *= jy_to_si / beam_area / factor * factor2
hdu.flush()
hdu.close()
|
#!/usr/bin/env python3
import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="dev-container",
version="0.2",
author="Marcel Hollerbach",
author_email="mail@bu5hm4n.de",
description="A small bin tool that will spin up a docker for you. The docker will be picked from your CI tools, and can be used instead of the host system",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/marcelhollerbach/dev-container",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
license="Apache 2.0",
scripts=['dev-container'],
install_requires=['pyyaml']
)
|
"""Contains the class object.
Used to create two-dimensional thermal objects, and apply some methods. It does
not include the compute method.
"""
from ... import mats
import os
import copy
import numpy as np
class Object:
"""Object class.
This class creates a two-dimensional thermal object. It includes two
methods to apply and remove fields.
"""
def __init__(self, amb_temperature, material='Cu', dx=0.01, dy=0.01,
dt=0.1, size=(10, 10), file_name=None,
boundaries=(0, 0, 0, 0), Q=[], Q0=[], initial_state=False,
materials_path=False):
"""Thermal object initialization.
`amb_temperature` is the ambient temperature of the whole system.
`materials` is the background material present in `material_path`.
`dx`, `dy` are the space steps along the x- and y-axis, respectively.
`dt` is the time step. `file_name` is the file name where the
temperature is saved. `boundaries` is a list of four entries that
define the boundary condition for temperature (left, right, bottom,
top). If 0 the boundary condition is insulation. `initial_state` is the
initial state of the materials. True if there are an applied field and
False if them field is absent. `materials_path` is absolute path of the
materials database. If false, then the materials database is the
standard heatrapy database. `Q` is a list of fixed heat source
coefficient and `Q0` is a list of temperature dependent heat source
coefficient.
"""
# check the validity of inputs
boundaries = tuple(boundaries)
Q = list(Q)
Q0 = list(Q0)
cond01 = isinstance(amb_temperature, float)
cond01 = cond01 or isinstance(amb_temperature, int)
cond05 = isinstance(dx, int) or isinstance(dx, float)
cond06 = isinstance(dt, int) or isinstance(dt, float)
cond07 = isinstance(file_name, str)
cond07 = cond07 or (file_name is None)
cond08 = isinstance(boundaries, tuple)
cond09 = isinstance(Q, list)
cond10 = isinstance(Q0, list)
cond11 = isinstance(initial_state, bool)
condition = cond01 and cond05
condition = condition and cond06 and cond07 and cond08 and cond09
condition = condition and cond10 and cond11
if not condition:
raise ValueError
self.materials = [material]
self.materials_name = [material]
self.boundaries = boundaries
self.amb_temperature = amb_temperature
if materials_path is False:
tadi = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + self.materials[0] + '/' + 'tadi.txt'
tadd = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + self.materials[0] + '/' + 'tadd.txt'
cpa = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + self.materials[0] + '/' + 'cpa.txt'
cp0 = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + self.materials[0] + '/' + 'cp0.txt'
k0 = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + self.materials[0] + '/' + 'k0.txt'
ka = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + self.materials[0] + '/' + 'ka.txt'
rho0 = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + self.materials[0] + '/' + 'rho0.txt'
rhoa = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + self.materials[0] + '/' + 'rhoa.txt'
lheat0 = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + self.materials[0] + '/' + 'lheat0.txt'
lheata = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + self.materials[0] + '/' + 'lheata.txt'
self.materials[0] = mats.CalMatPro(
tadi, tadd, cpa, cp0, k0, ka, rho0, rhoa, lheat0, lheata)
else:
tadi = materials_path + self.materials[0] + '/' + 'tadi.txt'
tadd = materials_path + self.materials[0] + '/' + 'tadd.txt'
cpa = materials_path + self.materials[0] + '/' + 'cpa.txt'
cp0 = materials_path + self.materials[0] + '/' + 'cp0.txt'
k0 = materials_path + self.materials[0] + '/' + 'k0.txt'
ka = materials_path + self.materials[0] + '/' + 'ka.txt'
rho0 = materials_path + self.materials[0] + '/' + 'rho0.txt'
rhoa = materials_path + self.materials[0] + '/' + 'rhoa.txt'
lheat0 = materials_path + self.materials[0] + '/' + 'lheat0.txt'
lheata = materials_path + self.materials[0] + '/' + 'lheata.txt'
self.materials[0] = mats.CalMatPro(
tadi, tadd, cpa, cp0, k0, ka, rho0, rhoa, lheat0, lheata)
self.materials_index = [None]
self.size = size
self.file_name = file_name
self.dx = dx
self.dy = dy
self.dt = dt
self.temperature = []
self.latent_heat = []
self.lheat = []
self.Cp = []
self.rho = []
self.Q = []
self.Q0 = []
self.k = []
self.materials_index = []
self.state = []
for i in range(self.size[0]):
self.state.append([])
self.materials_index.append([])
self.temperature.append([])
self.latent_heat.append([])
self.lheat.append([])
self.Cp.append([])
self.rho.append([])
self.Q.append([0. for i in range(self.size[1])])
self.Q0.append([0. for i in range(self.size[1])])
self.k.append([])
for j in range(self.size[1]):
self.materials_index[-1].append(0)
self.temperature[-1].append([amb_temperature, amb_temperature])
self.state[-1].append(initial_state)
if initial_state:
value = self.materials[self.materials_index[i][j]]
self.Cp[-1].append(value.cpa(self.amb_temperature))
value = self.materials[self.materials_index[i][j]]
self.rho[-1].append(value.rhoa(self.amb_temperature))
value = self.materials[self.materials_index[i][j]]
self.k[-1].append(value.ka(self.amb_temperature))
self.latent_heat[-1].append(
self.materials[self.materials_index[i][j]].lheata()
)
self.lheat[-1].append([])
value = self.materials[self.materials_index[i][j]]
for lh in value.lheata():
if self.temperature[i][j][1] < lh[0] and lh[1] > 0.:
self.lheat[-1].append([lh[0], 0.])
if self.temperature[i][j][1] > lh[0] and lh[1] > 0.:
self.lheat[-1].append([lh[0], lh[1]])
if self.temperature[i][j][1] < lh[0] and lh[1] < 0.:
self.lheat[-1].append([lh[0], -lh[1]])
if self.temperature[i][j][1] > lh[0] and lh[1] < 0.:
self.lheat[-1].append([lh[0], 0.])
else:
value = self.materials[self.materials_index[i][j]]
self.Cp[-1].append(value.cp0(self.amb_temperature))
value = self.materials[self.materials_index[i][j]]
self.rho[-1].append(value.rho0(self.amb_temperature))
value = self.materials[self.materials_index[i][j]]
self.k[-1].append(value.k0(self.amb_temperature))
self.latent_heat[-1].append(
self.materials[self.materials_index[i][j]].lheat0()
)
self.lheat[-1].append([])
value = self.materials[self.materials_index[i][j]]
for lh in value.lheat0():
if self.temperature[i][j][1] < lh[0] and lh[1] > 0.:
self.lheat[-1].append([lh[0], 0.])
if self.temperature[i][j][1] > lh[0] and lh[1] > 0.:
self.lheat[-1].append([lh[0], lh[1]])
if self.temperature[i][j][1] < lh[0] and lh[1] < 0.:
self.lheat[-1].append([lh[0], -lh[1]])
if self.temperature[i][j][1] > lh[0] and lh[1] < 0.:
self.lheat[-1].append([lh[0], 0.])
if Q != []:
self.Q = Q
if Q0 != []:
self.Q0 = Q0
self.time_passed = 0.
self.Q_ref = copy.copy(self.Q)
self.Q0_ref = copy.copy(self.Q0)
if file_name:
line = 'time(s)'
for i in range(size[0]):
for j in range(size[1]):
line = line + ',T[' + str(i) + '][' + str(j) + '] (K)'
line = line + '\n'
f = open(self.file_name, 'a')
f.write(line)
f.close()
def activate(self, initial_point, final_point, shape='square'):
"""Activation of the material.
Activates a given piece of material. If `shape` is `'square'`, then the
`initial_point` is the tuple (x,y) of the bottom left point and the
`final_point` is the tuple (x,y) of the top right point. If the shape
is `'circle'`, the `initial_point` is the tuple (x,y) of the center of
the circle and `final_point` is its radius.
"""
# check the validity of inputs
if isinstance(shape, str):
if shape == 'square':
value = isinstance(initial_point, tuple)
if value and isinstance(final_point, tuple):
condition = len(initial_point) == 2
condition = condition and len(final_point) == 2
else:
condition = False
elif shape == 'circle':
value = isinstance(final_point, int)
value = value or isinstance(final_point, float)
value = value and isinstance(initial_point, tuple)
if value:
condition = len(initial_point) == 2
else:
condition = False
else:
condition = False
else:
condition = False
if not condition:
raise ValueError
if shape == 'square':
initial_point_x = int(initial_point[0])
initial_point_y = int(initial_point[1])
final_point_x = int(final_point[0])
final_point_y = int(final_point[1])
for i in range(initial_point_x, final_point_x):
for j in range(initial_point_y, final_point_y):
if self.state[i][j] is False:
value = self.temperature[i][j][0]
self.temperature[i][j][0] = value + \
self.materials[self.materials_index[i][j]].tadi(
self.temperature[i][j][0])
value = self.materials_index[i][j]
self.rho[i][j] = self.materials[value].rhoa(
self.temperature[i][j][0])
self.Cp[i][j] = self.materials[value].cpa(
self.temperature[i][j][0])
self.k[i][j] = self.materials[value].ka(
self.temperature[i][j][0])
self.lheat[i][j] = []
valh = self.materials[value].lheata()
self.latent_heat[i][j] = valh
for lh in self.latent_heat[i][j]:
cond = self.temperature[i][j][0] < lh[0]
if cond and lh[1] > 0.:
self.lheat[i][j].append([lh[0], 0.])
cond = self.temperature[i][j][0] > lh[0]
if cond and lh[1] > 0.:
self.lheat[i][j].append([lh[0], lh[1]])
cond = self.temperature[i][j][0] < lh[0]
if cond and lh[1] < 0.:
self.lheat[i][j].append([lh[0], -lh[1]])
cond = self.temperature[i][j][0] > lh[0]
if cond and lh[1] < 0.:
self.lheat[i][j].append([lh[0], 0.])
self.state[i][j] = True
else:
message = 'point ({:d},{:d})'.format(i, j)
message = message + ' already activated'
print(message)
if shape == 'circle':
initial_point_x = int(initial_point[0])
initial_point_y = int(initial_point[1])
radius = int(final_point)
for i in range(self.size[0]):
for j in range(self.size[1]):
value = (i-initial_point_x)**2+(j-initial_point_y)**2
length = np.sqrt(value)
if length <= radius:
if self.state[i][j] is False:
value = self.temperature[i][j][0]
index_value = self.materials_index[i][j]
self.temperature[i][j][0] = value + \
self.materials[index_value].tadi(
self.temperature[i][j][0])
self.rho[i][j] = self.materials[index_value].rhoa(
self.temperature[i][j][0])
self.Cp[i][j] = self.materials[index_value].cpa(
self.temperature[i][j][0])
self.k[i][j] = self.materials[index_value].ka(
self.temperature[i][j][0])
self.lheat[i][j] = []
valh = self.materials[index_value].lheata()
self.latent_heat[i][j] = valh
for lh in self.latent_heat[i][j]:
value = self.temperature[i][j][0] < lh[0]
if value and lh[1] > 0.:
self.lheat[i][j].append([lh[0], 0.])
value = self.temperature[i][j][0] > lh[0]
if value and lh[1] > 0.:
self.lheat[i][j].append([lh[0], lh[1]])
value = self.temperature[i][j][0] < lh[0]
if value and lh[1] < 0.:
self.lheat[i][j].append([lh[0], -lh[1]])
value = self.temperature[i][j][0] > lh[0]
if value and lh[1] < 0.:
self.lheat[i][j].append([lh[0], 0.])
self.state[i][j] = True
else:
message = 'point ({:d},{:d})'.format(i, j)
message = message + ' already activated'
print(message)
def deactivate(self, initial_point, final_point, shape='square'):
"""Deactivation of the material.
Deactivates a given piece of material. If `shape` is `'square'`, then
the `initial_point` is the tuple (x,y) of the bottom left point and the
`final_point` is the tuple (x,y) of the top right point. If the shape
is `'circle'`, the `initial_point` is the tuple (x,y) of the center of
the circle and `final_point` is its radius.
"""
# check the validity of inputs
if isinstance(shape, str):
if shape == 'square':
value = isinstance(initial_point, tuple)
if value and isinstance(final_point, tuple):
condition = len(initial_point) == 2
condition = condition and len(final_point) == 2
else:
condition = False
elif shape == 'circle':
value = isinstance(final_point, int)
value = value or isinstance(final_point, float)
value = value and isinstance(initial_point, tuple)
if value:
condition = len(initial_point) == 2
else:
condition = False
else:
condition = False
else:
condition = False
if not condition:
raise ValueError
if shape == 'square':
initial_point_x = int(initial_point[0])
initial_point_y = int(initial_point[1])
final_point_x = int(final_point[0])
final_point_y = int(final_point[1])
for i in range(initial_point_x, final_point_x):
for j in range(initial_point_y, final_point_y):
if self.state[i][j] is True:
value = self.temperature[i][j][0]
self.temperature[i][j][0] = value - \
self.materials[self.materials_index[i][j]].tadd(
self.temperature[i][j][0])
value = self.materials_index[i][j]
self.rho[i][j] = self.materials[value].rho0(
self.temperature[i][j][0])
self.Cp[i][j] = self.materials[value].cp0(
self.temperature[i][j][0])
self.k[i][j] = self.materials[value].k0(
self.temperature[i][j][0])
self.lheat[i][j] = []
valh = self.materials[value].lheat0()
self.latent_heat[i][j] = valh
for lh in self.latent_heat[i][j]:
cond = self.temperature[i][j][0] < lh[0]
if cond and lh[1] > 0.:
self.lheat[i][j].append([lh[0], 0.])
cond = self.temperature[i][j][0] > lh[0]
if cond and lh[1] > 0.:
self.lheat[i][j].append([lh[0], lh[1]])
cond = self.temperature[i][j][0] < lh[0]
if cond and lh[1] < 0.:
self.lheat[i][j].append([lh[0], -lh[1]])
cond = self.temperature[i][j][0] > lh[0]
if cond and lh[1] < 0.:
self.lheat[i][j].append([lh[0], 0.])
self.state[i][j] = False
else:
message = 'point ({:d},{:d})'.format(i, j)
message = message + ' already deactivated'
print(message)
if shape == 'circle':
initial_point_x = int(initial_point[0])
initial_point_y = int(initial_point[1])
radius = int(final_point)
for i in range(self.size[0]):
for j in range(self.size[1]):
value = (i-initial_point_x)**2+(j-initial_point_y)**2
length = np.sqrt(value)
if length <= radius:
if self.state[i][j] is False:
value = self.temperature[i][j][0]
index_value = self.materials_index[i][j]
self.temperature[i][j][0] = value - \
self.materials[index_value].tadd(
self.temperature[i][j][0])
self.rho[i][j] = self.materials[index_value].rho0(
self.temperature[i][j][0])
self.Cp[i][j] = self.materials[index_value].cp0(
self.temperature[i][j][0])
self.k[i][j] = self.materials[index_value].k0(
self.temperature[i][j][0])
self.lheat[i][j] = []
valh = self.materials[index_value].lheat0()
self.latent_heat[i][j] = valh
for lh in self.latent_heat[i][j]:
value = self.temperature[i][j][0] < lh[0]
if value and lh[1] > 0.:
self.lheat[i][j].append([lh[0], 0.])
value = self.temperature[i][j][0] > lh[0]
if value and lh[1] > 0.:
self.lheat[i][j].append([lh[0], lh[1]])
value = self.temperature[i][j][0] < lh[0]
if value and lh[1] < 0.:
self.lheat[i][j].append([lh[0], -lh[1]])
value = self.temperature[i][j][0] > lh[0]
if value and lh[1] < 0.:
self.lheat[i][j].append([lh[0], 0.])
self.state[i][j] = False
else:
message = 'point ({:d},{:d})'.format(i, j)
message = message + ' already deactivated'
print(message)
def square(self, material='Gd', initial_point=(3, 3), length=(3, 3),
state=False, materials_path=False):
"""Material adding with rectangle shape.
Adds a new material with a rectangle shape, where `initial_point` is
the bottom left (x,y) tuple, `length` is the length along the two axis,
state is the initial state of the material and `materials_path` is the
absolute path of the materials database.
"""
# check the validity of inputs
value = isinstance(initial_point, tuple)
if value and isinstance(length, tuple):
cond1 = len(initial_point) == 2
cond1 = cond1 and len(length) == 2
else:
cond1 = False
cond2 = isinstance(material, str)
cond3 = isinstance(state, bool)
cond4 = isinstance(materials_path, str) or materials_path is False
if not cond1 and cond2 and cond3 and cond4:
raise ValueError
initial_point_x = int(initial_point[0])
initial_point_y = int(initial_point[1])
final_point_x = initial_point_x + int(length[0])
final_point_y = initial_point_y + int(length[1])
if material in self.materials_name:
index = self.materials_name.index(material)
else:
index = len(self.materials)
self.materials_name.append(material)
if materials_path is False:
value = self.materials_name[index]
tadi = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'tadi.txt'
tadd = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'tadd.txt'
cpa = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'cpa.txt'
cp0 = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'cp0.txt'
k0 = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'k0.txt'
ka = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'ka.txt'
rho0 = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'rho0.txt'
rhoa = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'rhoa.txt'
lheat0 = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'lheat0.txt'
lheata = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'lheata.txt'
self.materials.append(mats.CalMatPro(
tadi, tadd, cpa, cp0, k0, ka, rho0, rhoa, lheat0, lheata))
else:
value = self.materials_name[index]
tadi = materials_path + value + '/' + 'tadi.txt'
tadd = materials_path + value + '/' + 'tadd.txt'
cpa = materials_path + value + '/' + 'cpa.txt'
cp0 = materials_path + value + '/' + 'cp0.txt'
k0 = materials_path + value + '/' + 'k0.txt'
ka = materials_path + value + '/' + 'ka.txt'
rho0 = materials_path + value + '/' + 'rho0.txt'
rhoa = materials_path + value + '/' + 'rhoa.txt'
lheat0 = materials_path + value + '/' + 'lheat0.txt'
lheata = materials_path + value + '/' + 'lheata.txt'
self.materials.append(mats.CalMatPro(
tadi, tadd, cpa, cp0, k0, ka, rho0, rhoa, lheat0, lheata))
for i in range(initial_point_x, final_point_x):
for j in range(initial_point_y, final_point_y):
if state is False:
self.state[i][j] = False
self.materials_index[i][j] = index
self.rho[i][j] = self.materials[index].rho0(
self.temperature[i][j][0])
self.Cp[i][j] = self.materials[index].cp0(
self.temperature[i][j][0])
self.k[i][j] = self.materials[index].k0(
self.temperature[i][j][0])
self.lheat[i][j] = []
valh = self.materials[index].lheat0()
self.latent_heat[i][j] = valh
for lh in self.latent_heat[i][j]:
if self.temperature[i][j][0] < lh[0] and lh[1] > 0:
self.lheat[i][j].append([lh[0], 0.])
if self.temperature[i][j][0] > lh[0] and lh[1] > 0:
self.lheat[i][j].append([lh[0], lh[1]])
if self.temperature[i][j][0] < lh[0] and lh[1] < 0:
self.lheat[i][j].append([lh[0], -lh[1]])
if self.temperature[i][j][0] > lh[0] and lh[1] < 0:
self.lheat[i][j].append([lh[0], 0.])
else:
self.state[i][j] = True
self.materials_index[i][j] = index
self.rho[i][j] = self.materials[index].rhoa(
self.temperature[i][j][0])
self.Cp[i][j] = self.materials[index].cpa(
self.temperature[i][j][0])
self.k[i][j] = self.materials[index].ka(
self.temperature[i][j][0])
self.lheat[i][j] = []
valh = self.materials[index].lheata()
self.latent_heat[i][j] = valh
for lh in self.latent_heat[i][j]:
if self.temperature[i][j][0] < lh[0] and lh[1] > 0:
self.lheat[i][j].append([lh[0], 0.])
if self.temperature[i][j][0] > lh[0] and lh[1] > 0:
self.lheat[i][j].append([lh[0], lh[1]])
if self.temperature[i][j][0] < lh[0] and lh[1] < 0:
self.lheat[i][j].append([lh[0], -lh[1]])
if self.temperature[i][j][0] > lh[0] and lh[1] < 0:
self.lheat[i][j].append([lh[0], 0.])
def circle(self, material='Gd', initial_point=(3, 3), radius=3,
state=False, materials_path=False):
"""Material adding with circle shape.
Adds a new material with a circle shape, where `initial_point` is the
(x,y) tuple of the center of the circle, `radius` is the radius of the
circle, state is the initial state of the material and `materials_path`
is the absolute path of the materials database.
"""
# check the validity of inputs
if isinstance(initial_point, tuple):
cond1 = len(initial_point) == 2
else:
cond1 = False
cond2 = isinstance(radius, int) or isinstance(radius, float)
cond3 = isinstance(material, str)
cond4 = isinstance(state, bool)
cond5 = isinstance(materials_path, str) or materials_path is False
if not cond1 and cond2 and cond3 and cond4 and cond5:
raise ValueError
initial_point_x = int(initial_point[0])
initial_point_y = int(initial_point[1])
if material in self.materials_name:
index = self.materials_name.index(material)
else:
index = len(self.materials)
self.materials_name.append(material)
if materials_path is False:
value = self.materials_name[index]
tadi = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'tadi.txt'
tadd = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'tadd.txt'
cpa = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'cpa.txt'
cp0 = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'cp0.txt'
k0 = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'k0.txt'
ka = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'ka.txt'
rho0 = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'rho0.txt'
rhoa = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'rhoa.txt'
lheat0 = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'lheat0.txt'
lheata = os.path.dirname(os.path.realpath(__file__)) + \
'/../../database/' + value + '/' + 'lheata.txt'
self.materials.append(mats.CalMatPro(
tadi, tadd, cpa, cp0, k0, ka, rho0, rhoa, lheat0, lheata))
else:
value = self.materials_name[index]
tadi = materials_path + value + '/' + 'tadi.txt'
tadd = materials_path + value + '/' + 'tadd.txt'
cpa = materials_path + value + '/' + 'cpa.txt'
cp0 = materials_path + value + '/' + 'cp0.txt'
k0 = materials_path + value + '/' + 'k0.txt'
ka = materials_path + value + '/' + 'ka.txt'
rho0 = materials_path + value + '/' + 'rho0.txt'
rhoa = materials_path + value + '/' + 'rhoa.txt'
lheat0 = materials_path + value + '/' + 'lheat0.txt'
lheata = materials_path + value + '/' + 'lheata.txt'
self.materials.append(mats.CalMatPro(
tadi, tadd, cpa, cp0, k0, ka, rho0, rhoa, lheat0, lheata))
for i in range(self.size[0]):
for j in range(self.size[1]):
length = np.sqrt((i-initial_point_x)**2+(j-initial_point_y)**2)
if length <= radius:
if state is False:
self.state[i][j] = False
self.materials_index[i][j] = index
self.rho[i][j] = self.materials[index].rho0(
self.temperature[i][j][0])
self.Cp[i][j] = self.materials[index].cp0(
self.temperature[i][j][0])
self.k[i][j] = self.materials[index].k0(
self.temperature[i][j][0])
self.lheat[i][j] = []
valh = self.materials[index].lheat0()
self.latent_heat[i][j] = valh
for lh in self.latent_heat[i][j]:
if self.temperature[i][j][0] < lh[0] and lh[1] > 0:
self.lheat[i][j].append([lh[0], 0.])
if self.temperature[i][j][0] > lh[0] and lh[1] > 0:
self.lheat[i][j].append([lh[0], lh[1]])
if self.temperature[i][j][0] < lh[0] and lh[1] < 0:
self.lheat[i][j].append([lh[0], -lh[1]])
if self.temperature[i][j][0] > lh[0] and lh[1] < 0:
self.lheat[i][j].append([lh[0], 0.])
else:
self.state[i][j] = True
self.materials_index[i][j] = index
self.rho[i][j] = self.materials[index].rhoa(
self.temperature[i][j][0])
self.Cp[i][j] = self.materials[index].cpa(
self.temperature[i][j][0])
self.k[i][j] = self.materials[index].ka(
self.temperature[i][j][0])
self.lheat[i][j] = []
valh = self.materials[index].lheata()
self.latent_heat[i][j] = valh
for lh in self.latent_heat[i][j]:
if self.temperature[i][j][0] < lh[0] and lh[1] > 0:
self.lheat[i][j].append([lh[0], 0.])
if self.temperature[i][j][0] > lh[0] and lh[1] > 0:
self.lheat[i][j].append([lh[0], lh[1]])
if self.temperature[i][j][0] < lh[0] and lh[1] < 0:
self.lheat[i][j].append([lh[0], -lh[1]])
if self.temperature[i][j][0] > lh[0] and lh[1] < 0:
self.lheat[i][j].append([lh[0], 0.])
def power_add(self, initial_point, final_point, power, shape='square',
power_type='Q'):
"""Power adding.
Adds a power matrix to the thermal object. If `shape` is `'square'`,
then the `initial_point` is the tuple (x,y) of the bottom left point
and the `final_point` is the tuple (x,y) of the top right point. If the
`shape` is `'circle'`, the `initial_point` is the tuple (x,y) of the
center of the circle and `final_point` is its radius. `power` is the
value of the power to add, and `power_type` is the type of power to be
introduced, which has the value `'Q'` if it is temperature dependent
and `'Q0'` if it is temperature independent.
"""
# check the validity of inputs
if isinstance(shape, str):
if shape == 'square':
value = isinstance(initial_point, tuple)
if value and isinstance(final_point, tuple):
cond1 = len(initial_point) == 2
cond1 = cond1 and len(final_point) == 2
else:
cond1 = False
elif shape == 'circle':
value = isinstance(final_point, int)
value = value or isinstance(final_point, float)
value = value and isinstance(initial_point, tuple)
if value:
cond1 = len(initial_point) == 2
else:
cond1 = False
else:
cond1 = False
else:
cond1 = False
cond2 = isinstance(power, int) or isinstance(power, float)
if isinstance(power_type, str):
if power_type == 'Q' or power_type == 'Q0':
cond3 = True
else:
cond3 = False
else:
cond3 = False
if not cond1 and cond2 and cond3:
raise ValueError
if power_type == 'Q':
if shape == 'square':
initial_point_x = int(initial_point[0])
initial_point_y = int(initial_point[1])
final_point_x = int(final_point[0])
final_point_y = int(final_point[1])
for i in range(initial_point_x, final_point_x):
for j in range(initial_point_y, final_point_y):
self.Q[i][j] = power
if shape == 'circle':
initial_point_x = int(initial_point[0])
initial_point_y = int(initial_point[1])
radius = int(final_point)
for i in range(self.size[0]):
for j in range(self.size[1]):
value = (i-initial_point_x)**2+(j-initial_point_y)**2
length = np.sqrt(value)
if length <= radius:
self.Q[i][j] = power
if power_type == 'Q0':
if shape == 'square':
initial_point_x = int(initial_point[0])
initial_point_y = int(initial_point[1])
final_point_x = int(final_point[0])
final_point_y = int(final_point[1])
for i in range(initial_point_x, final_point_x):
for j in range(initial_point_y, final_point_y):
self.Q0[i][j] = power
if shape == 'circle':
initial_point_x = int(initial_point[0])
initial_point_y = int(initial_point[1])
radius = int(final_point)
for i in range(self.size[0]):
for j in range(self.size[1]):
value = (i-initial_point_x)**2+(j-initial_point_y)**2
length = np.sqrt(value)
if length <= radius:
self.Q0[i][j] = power
def power_reset(self, power_type='Q'):
"""Power reset.
Resets the power matrix with `power_type` `'Q'` or `'Q0'`, which
corresponds to the power temperature dependent and temperature
independent, respectively.
"""
# check the validity of inputs
if isinstance(power_type, str):
if power_type == 'Q' or power_type == 'Q0':
condition = True
else:
condition = False
else:
condition = False
if not condition:
raise ValueError
self.Q = []
self.Q0 = []
|
import re
from docxtpl import DocxTemplate, RichText
from docx.shared import Pt
####test Result including checkpoint and HLA
def checkPoint(TMBFileName, NeoStatisFileName, MMRFileName, POLFileName):
with open(TMBFileName,'r',encoding='utf-8') as snv:
line = snv.readlines()[1]
line = line.strip('\n').split('\t')
nonsys_num = int(line[1])
sys_num = int(line[2])
if line[3] == '-':
kaks = 'N/A'
else:
kaks = round(float(line[3]), 2)
tmb_result = round(float(line[6]),2)
with open(NeoStatisFileName,'r',encoding='utf-8') as neo:
line = neo.readline()
line = line.strip('\n').split('\t')
neo_num = int(line[1])
with open(MMRFileName,'r',encoding='utf-8') as mmr:
mmr_Benign = 1
for line in mmr:
line = line.strip('\n').split('\t')
if line[4] != "良性":
mmr_Benign = 0
break
with open(POLFileName, 'r', encoding='utf-8') as pol:
pol_Benign = 1
for line in pol:
line = line.strip('\n').split('\t')
if line[4] != "良性":
pol_Benign = 0
break
######################TMB
if tmb_result >= 9 :
tmb_mean = '高'
elif tmb_result < 3:
tmb_mean = '低'
else:
tmb_mean = '中'
#
if nonsys_num >= 248:
nonsys_mean = '高'
tmb_des = "本检测得到的TMB结果高于平均值"
tmb_status = "TMB_H(>248个非同义突变位点)"
elif nonsys_num < 143:
nonsys_mean = '低'
tmb_des = "本检测得到的TMB结果低于平均值"
tmb_status = "TMB_L(0-143个非同义突变位点)"
else:
nonsys_mean = '中'
tmb_des = "本检测得到的TMB结果位于中位数附近"
tmb_status = "TMB_M(143-248个非同义突变位点)"
#
if sys_num >= 100:
sys_mean = '高'
elif sys_num < 57:
sys_mean = '低'
else:
sys_mean = '中'
#
if kaks == 'N/A':
kaks_mean = 'N/A'
kaks_des = ''
elif kaks >= 3:
kaks_mean = '较高'
kaks_des = '*非同义突变/同义突变比值(A/S ratio)是一种衡量肿瘤进化保守性的指标,这一指标越大代表肿瘤的增殖能力越高,本次检测从样本中测量到的A/S ratio大于3.0,反映样本中的肿瘤细胞的增殖能力处于较高状态;'
elif kaks < 2.5:
kaks_mean = '较低'
kaks_des='*非同义突变/同义突变比值(A/S ratio)是一种衡量肿瘤进化保守性的指标,这一指标越大代表肿瘤的增殖能力越高,本次检测从样本中测量到的A/S ratio小于2.5,反映样本中的肿瘤细胞的增殖能力处于较低状态;'
else:
kaks_mean = '中性'
kaks_des='*非同义突变/同义突变比值(A/S ratio)是一种衡量肿瘤进化保守性的指标,这一指标越大代表肿瘤的增殖能力越高,本次检测从样本中测量到的A/S ratio大于2.5小于3.0,反映样本中的肿瘤细胞的增殖能力处于中性状态;'
#
if mmr_Benign == 1:
mmr_result = "未发现致病/可能致病突变"
mmr_mean = "正常"
else:
mmr_result = "发现致病/可能致病突变"
mmr_mean = "异常"
#
if pol_Benign == 1:
pol_result = "未发现致病/可能致病突变"
pol_mean = "正常"
else:
pol_result = "发现致病/可能致病突变"
pol_mean = "异常"
content = {
'tmb_result': str(tmb_result)+'/Mb',
'tmb_mean':tmb_mean,
'nonsys_num':str(nonsys_num)+'个',
'nonsys_mean':nonsys_mean,
'sys_num':str(sys_num)+'个',
'sys_mean':sys_mean,
'kaks_value':kaks,
'kaks_mean':kaks_mean,
'mmr_result':mmr_result,
'mmr_mean':mmr_mean,
'pol_result':pol_result,
'pol_mean':pol_mean,
'neo_num':str(neo_num)+'个',
'tmb_des':tmb_des,
'tmb_status':tmb_status
}
#print (content)
return content
def separateHLA(HLA):
HLA_A = []
HLA_B = []
HLA_C = []
for item in HLA:
item = item[4:] ###delete HLA-
if re.match('^A', item) is not None:
HLA_A.append(item)
if re.match('^B', item) is not None:
HLA_B.append(item)
if re.match('^C', item) is not None:
HLA_C.append(item)
content ={
'cols':['\n'.join(HLA_A),'\n'.join(HLA_B),'\n'.join(HLA_C)],
'value':{'A':HLA_A,'B':HLA_B,'C':HLA_C}
}
return content
def getHeterozy(HLA):
if len(HLA) == 1:
return 'isozy' ##纯合
elif len(HLA) == 0:
return 'NA' #无
else:
return 'heterozy' #杂合
def getMean(normal,tumor):
mean = RichText('注:人群的HLA-I分型存在广泛的多态性,本报告所列举的HLA-I分型结果由基因测序获得')
B62 = ['B*15:02', 'B*15:12', 'B*15:13', 'B*46:01', 'B*52:01']
B44 = ['B*18:01', 'B*37:01', 'B*40:01', 'B*40:02', 'B*40:06', 'B*44:02', 'B*44:03', 'B*45:01']
PD1 = ['B*15:01'] + B62 + B44
####
if getHeterozy(normal['A']) =='heterozy' and getHeterozy(normal['B']) =='heterozy' and getHeterozy(normal['C']) =='heterozy':
mean.add('\n*本次检测从正常体细胞检测到所有的等位基因都处于杂合状态,最新研究显示相较于至少一个位置的等位基因为纯合状态的患者,全部为杂合状态的患者在接受anti-PD-1药物治疗时生存期中位数更长(Chowell ')
mean.add('et al., Science)',italic=True)
if getHeterozy(tumor['A']) =='heterozy' and getHeterozy(tumor['B'])=='heterozy' and getHeterozy(tumor['C'])=='heterozy' :
mean.add('\n*40%的非小细胞肺癌患者的肿瘤样本中检测到HLA-I的杂合性缺失,这与亚克隆的新抗原负载高相关(McGranahan ')
mean.add('et al., Cell',italic=True)
mean.add('),而本次检测未发现肿瘤样本和正常体细胞中存在HLA-I的分型差异')
if getHeterozy(normal['A']) =='isozy' or getHeterozy(normal['B'])=='isozy' or getHeterozy(normal['C'])=='isozy' :
mean.add('\n*本次检测从正常细胞中检测到存在至少一个等位基因为纯合状态,根据最新研究显示相较于所有等位基因都处于杂合状态的患者,至少一个等位基因为纯合状态的患者在接受anti-PD-1药物治疗时生存期中位数较短(Chowell ')
mean.add('et al., Science)',italic=True)
###
if len(set(normal['B']) & set(B62))>0 and len(set(tumor['B']) & set(B62))>0:
mean.add('\n*最新研究显示携带有HLA-B62 supertype的等位基因的患者接受anti-PD-1治疗的预后较差(Chowell ')
mean.add('et al., Science',italic=True)
mean.add('),本次检测从正常细胞和肿瘤组织中都检测到HLA-B62 supertype中的等位基因')
elif len(set(normal['B']) & set(B62))==0 and len(set(tumor['B']) & set(B62))>0:
mean.add('\n*最新研究显示携带有HLA-B62 supertype的等位基因的患者接受anti-PD-1治疗的预后较差(Chowell ')
mean.add('et al., Science',italic=True)
mean.add('),本次检测从肿瘤组织中检测到HLA-B62 supertype中的等位基因')
elif len(set(normal['B']) & set(B62))>0 and len(set(tumor['B']) & set(B62))==0:
mean.add('\n*最新研究显示携带有HLA-B62 supertype的等位基因的患者接受anti-PD-1治疗的预后较差(Chowell ')
mean.add('et al., Science',italic=True)
mean.add('),本次检测从正常细胞中检测到HLA-B62 supertype中的等位基因')
###
if 'B*15:01' in normal['B'] and 'B*15:01' in tumor['B']:
mean.add('\n*最新研究显示携带有HLA-B62 supertype的B*15:01等位基因的患者接受anti-PD-1治疗的预后较差,这可能是因为B*15:01 的分子结构会影响T细胞对肿瘤细胞的识别能力(Chowell ')
mean.add('et al., Science',italic=True)
mean.add('),本次检测从正常组织和肿瘤组织中都检测到B*15:01')
elif not 'B*15:01' in normal['B'] and 'B*15:01' in tumor['B']:
mean.add('\n*最新研究显示携带有HLA-B62 supertype的B*15:01等位基因的患者接受anti-PD-1治疗的预后较差,这可能是因为B*15:01 的分子结构会影响T细胞对肿瘤细胞的识别能力(Chowell ')
mean.add('et al., Science',italic=True)
mean.add('),本次检测从肿瘤组织中检测到B*15:01')
elif 'B*15:01' in normal['B'] and not 'B*15:01' in tumor['B']:
mean.add('\n*最新研究显示携带有HLA-B62 supertype的B*15:01等位基因的患者接受anti-PD-1治疗的预后较差,这可能是因为B*15:01 的分子结构会影响T细胞对肿瘤细胞的识别能力(Chowell ')
mean.add('et al., Science',italic=True)
mean.add('),本次检测从正常细胞中检测到B*15:01')
###
if len(set(normal['B']) & set(B44))>0 and len(set(tumor['B']) & set(B44))>0:
mean.add('\n*最新研究显示携带有HLA-B44 supertype的等位基因的患者接受anti-PD-1治疗的生存期中位数更长(Chowell ')
mean.add('et al., Science',italic=True)
mean.add('),本次检测从正常细胞和肿瘤组织中都检测到HLA-B44 supertype中的等位基因')
elif len(set(normal['B']) & set(B44))==0 and len(set(tumor['B']) & set(B44))>0:
mean.add('\n*最新研究显示携带有HLA-B44 supertype的等位基因的患者接受anti-PD-1治疗的生存期中位数更长(Chowell ')
mean.add('et al., Science',italic=True)
mean.add('),本次检测从肿瘤组织中检测到HLA-B44 supertype中的等位基因')
elif len(set(normal['B']) & set(B44))>0 and len(set(tumor['B']) & set(B44))>0:
mean.add('\n*最新研究显示携带有HLA-B44 supertype的等位基因的患者接受anti-PD-1治疗的生存期中位数更长(Chowell ')
mean.add('et al., Science',italic=True)
mean.add('),本次检测从正常细胞中检测到HLA-B44 supertype中的等位基因')
#####
if len(set(normal['B']) & set(PD1))==0 and len(set(tumor['B']) & set(PD1))==0:
mean.add('\n*本次检测未发现任何已知的影响anti-PD-1治疗预后的HLA等位基因')
###
#mean='\n'.join(mean)
return mean
def HLA(HLAFileName):
with open(HLAFileName,'r',encoding='utf-8') as hla:
line = hla.readlines()
normal = line[0].strip('\n').split(',')
tumor = line[1].strip('\n').split(',')
normal = separateHLA(normal)
tumor = separateHLA(tumor)
mean = getMean(normal['value'],tumor['value'])
content={
'hla_contents':[
{'label':'正常细胞中的HLA-I分型:','cols':normal['cols']},
{'label':'肿瘤组织中的HLA-I分型:','cols':tumor['cols']}
],
'hla_mean':mean
}
return content
|
#!/usr/bin/env python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for endpoints.py."""
from tests.common.lib import endpointstest
import webtest
from protorpc import message_types
from titan.common.lib.google.apputils import basetest
from titan import endpoints
class EndpointsTest(endpointstest.EndpointsTestCase):
def CreateWsgiApplication(self):
"""Returns the wsgi application for the service endpoint testing."""
return endpoints.EndpointsApplication([GoodService], restricted=False)
def testEndpointsApplication(self):
# You do not want to copy this test; it is specifically for the internal
# behavior of the endpoints application.
#
# Verify that the object correctly delegates all behavior except middleware.
app = webtest.TestApp(self._endpoints_application)
internal_discovery_url = '/_ah/spi/BackendService.getApiConfigs'
response = app.post(
internal_discovery_url, '{}', content_type='application/json',
headers={'X-Appengine-Peer': 'apiserving'})
self.assertEqual(200, response.status_int)
# Verify error behavior copied from apiserving.api_server.
self.assertRaises(
TypeError, endpoints.EndpointsApplication, protocols='foo')
def testServices(self):
# Verify that the service method returns the correct message type
# (defaults to VoidMessage)
service = self.GetServiceStub(GoodService)
self.assertEquals(message_types.VoidMessage(), service.foo())
# This also happens to verify that the decorator delegation works.
@endpoints.api(name='titan', version='v1')
class GoodService(endpoints.Service):
@endpoints.method(name='foo.bar', path='foo/asdf')
def foo(self, unused_request):
return message_types.VoidMessage()
if __name__ == '__main__':
basetest.main()
|
from pathlib import Path
from . import cdfread, cdfwrite
from .cdf_factory import CDF
from .epochs import CDFepoch as cdfepoch # noqa: F401
try:
# This is an optional dependency for astropy time conversions
from .epochs_astropy import CDFAstropy as cdfastropy
except:
pass
try:
# Another optional dependency for XArray <-> cdf conversions
from .xarray_to_cdf import xarray_to_cdf
from .cdf_to_xarray import cdf_to_xarray
except:
pass
__all__ = ['CDF', 'xarray_to_cdf', 'cdf_to_xarray']
|
#coding: utf-8
#------------------------------------------------------------
# Um programa que recebe qualquer coisa, retornando o seu
# tipo primitivo e todas informações sobre o dado recebido.
#------------------------------------------------------------
# Dissecando uma variável - Exercício #004
#------------------------------------------------------------
from time import sleep
e = input('Digite algo: ')
print(f'O tipo primitivo desse valor é {type(e)}')
sleep(1)
print('''\nSe você digita um número, o tipo primitivo também retorna str ?')
Você pode verificar se o dado é um número e outras coisas mais.\n''')
sleep(4)
print('Só tem espaços? {}'.format(e.isspace()))
print('É um número? {}'.format(e.isnumeric()))
print('É alfabético ? {}'.format(e.isalpha()))
print('É alfanumérico? {}'.format(e.isalnum()))
print('Está em minúsculo? {}'.format(e.islower()))
print('Está em maiúsculo? {}'.format(e.isupper()))
|
# coding: utf8
# try something like
import os
raise RuntimeError("remove this line to use!")
# connect to pycon-tech (django) database
pg=DAL('postgres://postgres:saraza@localhost/pycon2010')
# configure path to common/talkdata
PATH = "C:\\PuTTY\\2010\\"
def index(): return dict(message="hello from pycontech.py")
def navbar():
# import django navbar
rows = pg.executesql("SELECT id, name, url, parent_id FROM navbar_navbarentry ORDER BY parent_id, \"order\"")
db(db.navbar.id>0).delete()
for row in rows:
db.navbar.insert(
id=row[0],
title=row[1],
url=row[2],
parent_id=row[3],
)
response.view = "generic.html"
return dict(rows=rows)
def flatpages():
# import django flatpages
rows = pg.executesql("SELECT id, url, title, content FROM django_flatpage ORDER BY id")
db(db.plugin_flatpage.id>0).delete()
for row in rows:
parts = row[1].split("/")
controller = parts[2]
function = parts[3] or 'index'
arg = parts[4:] and parts[4] or ''
db.plugin_flatpage.insert(
id=row[0],
controller=controller,
function=function,
arg=arg,
title=row[2],
body=row[3],
lang="es",
format='HTML',
)
response.view = "generic.html"
return dict(rows=rows)
def users():
# import django auth_users, gmaps_address and usermgr_profile into auth_user
sql = """
select a.id, a.first_name, a.last_name, a.email, a.date_joined,
p.url, p.affiliation, p.bio, p.location, p.email_ok, p.listing_ok, p.sponsor_ok,
g.latitude, g.longitude
from auth_user a
left join usermgr_userprofile p on p.user_id=a.id
left join gmaps_gaddress g on g.name=a.username
order by a.id
"""
rows = pg.executesql(sql)
# remove old users
db(db.auth_user.id>0).delete()
for row in rows:
db.auth_user.insert(
id=row[0],
first_name=row[1],
last_name=row[2],
email=row[3],
created_on=row[4],
personal_home_page=row[5],
company_home_page=row[5],
company_name=row[6],
resume=row[7],
address=row[8],
confirmed=row[9],
include_in_delegate_listing=row[10],
sponsors=row[11],
latitude=row[12],
longitude=row[13],
country="Argentina", #!!!!!
)
response.view = "generic.html"
return dict(rows=rows)
def talks():
# import pycontech event/schedule/scheduledevent into activity
# change FROM order to import events with no proposals
# (keynotes, plenary, lightning, etc.)
sql = """
select e.id, COALESCE(p.title, e._title), COALESCE(p.duration, e._duration), COALESCE(p.summary, e._summary), p.description, p.submitter_id, p.level, p.categories,
se.start, se.room_id, e.type
from propmgr_proposal p
left join schedule_event e on p.id = e.proposal_id
left join schedule_scheduledevent se on se.event_id = e.id
"""
rows = pg.executesql(sql)
levels = {'B': "Beginner",'I':"Intermediate", 'A':"Advanced", None: ''}
types = {"E": 'talk', "P": 'plenary', "B": 'break', None: 'talk'}
# remove old talks
db(db.activity.id>0).delete()
for row in rows:
event_id, title, duration, summary, description, submitter_id, level_id, categories, start, room_id, event_type = row
autor = db.auth_user[submitter_id]
authors = autor and "%s %s" % (autor.first_name, autor.last_name) or ''
status = start and 'accepted' or 'pending'
activity_id = db.activity.insert(
authors=authors,
title=title,
duration=duration,
abstract=summary,
description=description,
created_by=submitter_id,
level=levels[level_id],
scheduled_datetime=start,
scheduled_room=room_id,
status=status,
categories=categories and categories.split(", ") or [],
confirmed=True,
type=types[event_type],
)
# insert author(s):
if submitter_id: # todo: pycon-tech sometime doesn't have authors!?
db.author.insert(user_id=submitter_id, activity_id=activity_id)
# activate speaker flag (for speaker page):
q = db.auth_user.id==submitter_id
q &= db.auth_user.speaker==False
r = db(q).update(speaker=True)
response.view = "generic.html"
return dict(rows=rows)
def sponsors():
# import sponsorship_websitelogo into sponsors
sql = """
select s.id, s.name, s.level, s.index, s.logo,
s.url, s.alt
from sponsorship_websitelogo s
where s.visible=True
order by s.level, s.index
"""
rows = pg.executesql(sql)
# change the mapping according your event:
levels = {"8:Organizer": "Organizer",
"7:Thanks": "Agradecimiento Especial",
"3:Gold": "Sponsor Oro",
"4:Silver": "Sponsor Plata",
"5:Bronze": "Sponsor Bronce",
"51:Bronze": "Sponsor Bronce",
"52:Pyme": "Sponsor Pyme",
"9:Colaborator": "Colaborador",}
# remove old sponsors
db(db.sponsor.id>0).delete()
for row in rows:
# Manual uploads (programatically read and store the logo image)
filename = row[4]
stream = open(os.path.join(PATH, filename),'rb')
logo = db.sponsor.logo.store(stream, filename)
# do the work:
db.sponsor.insert(
id=row[0],
name=row[1],
level=levels[row[2]],
number=row[3],
logo=logo,
url=row[5],
alt=row[6],
)
response.view = "generic.html"
return dict(rows=rows)
|
from lettuce import *
from selenium import webdriver
@before.all
def setup_browser():
print("starting browser")
world.browser = webdriver.Firefox()
world.browser.implicitly_wait(3)
@after.all
def teardown_browser_and_server(total):
print("terminating browser")
world.browser.quit()
|
#!/usr/bin/env python
__author__ = 'Florian Hase'
#========================================================================
import os
import copy
import time
import uuid
import pickle
import subprocess
import numpy as np
import multiprocessing
from gryffin.utilities import Logger
from gryffin.utilities.decorators import thread
class DescriptorGenerator(Logger):
eta = 1e-3
max_iter = 10**3
def __init__(self, config):
self.config = config
self.is_generating = False
self.exec_name = '%s/descriptor_generator/generation_process.py' % self.config.get('home')
# define registers
self.auto_gen_descs = {}
self.comp_corr_coeffs = {}
self.gen_descs_cov = {}
self.min_corrs = {}
self.reduced_gen_descs = {}
self.weights = {}
self.sufficient_indices = {}
if self.config.get('num_cpus') == 'all':
self.num_cpus = multiprocessing.cpu_count()
else:
self.num_cpus = int(self.config.get('num_cpus'))
@thread
def single_generate(self, descs, objs, feature_index, result_dict=None):
# collect all relevant properties
sim_dict = {}
for prop in dir(self):
if callable(getattr(self, prop)) or prop.startswith(('__', 'W', 'config')):
continue
sim_dict[prop] = getattr(self, prop)
sim_dict['num_samples'] = descs.shape[0]
sim_dict['num_descs'] = descs.shape[1]
sim_dict['descs'] = descs
sim_dict['objs'] = objs
sim_dict['grid_descs'] = self.config.feature_descriptors[feature_index]
identifier = str(uuid.uuid4())[:8]
config_name = '%s/descriptor_generation_%d_%s.pkl' % (self.config.get('scratch_dir'), feature_index, identifier)
with open(config_name, 'wb') as content:
pickle.dump(sim_dict, content)
subprocess.call('python %s %s' % (self.exec_name, config_name), shell=True)
print('SUBMITTED DESC GENERATION')
results_name = '%s/completed_descriptor_generation_%d_%s.pkl' % (self.config.get('scratch_dir'), feature_index, identifier)
# wait for results to be written
while not os.path.isfile(results_name):
time.sleep(0.05)
current_size = 0
while current_size != os.path.getsize(results_name):
current_size = os.path.getsize(results_name)
time.sleep(0.05)
time.sleep(0.2)
try:
with open(results_name, 'rb') as content:
results = pickle.load(content)
except EOFError:
time.sleep(2)
with open(results_name, 'rb') as content:
results = pickle.load(content)
self.min_corrs[feature_index] = results['min_corrs']
self.auto_gen_descs[feature_index] = results['auto_gen_descs']
self.comp_corr_coeffs[feature_index] = results['comp_corr_coeffs']
self.gen_descs_cov[feature_index] = results['gen_descs_cov']
self.reduced_gen_descs[feature_index] = results['reduced_gen_descs']
self.weights[feature_index] = results['weights']
self.sufficient_indices[feature_index] = results['sufficient_indices']
result_dict[feature_index] = results['reduced_gen_descs']
os.remove(config_name)
os.remove(results_name)
@thread
def generate(self, obs_params, obs_objs):
import time
start = time.time()
self.is_generating = True
result_dict = {}
feature_types = self.config.feature_types
feature_descriptors = self.config.feature_descriptors
for feature_index, feature_options in enumerate(self.config.feature_options):
if feature_types[feature_index] == 'continuous':
self.weights[feature_index] = None
self.reduced_gen_descs[feature_index] = None
result_dict[feature_index] = None
continue
if feature_descriptors[feature_index] is None:
self.weights[feature_index] = None
self.reduced_gen_descs[feature_index] = None
result_dict[feature_index] = None
continue
if feature_descriptors[feature_index].shape[1] == 1:
self.weights[feature_index] = np.array([[1.]])
self.reduced_gen_descs[feature_index] = feature_descriptors[feature_index]
result_dict[feature_index] = feature_descriptors[feature_index]
continue
sampled_params = obs_params[:, feature_index].astype(np.int32)
sampled_descriptors = feature_descriptors[feature_index][sampled_params]
sampled_objs = np.reshape(obs_objs, (len(obs_objs), 1))
self.single_generate(sampled_descriptors, sampled_objs, feature_index, result_dict)
# avoid parallel execution if not desired
if self.num_cpus == 1:
if feature_types[feature_index] == 'continuous':
continue
while not feature_index in result_dict:
time.sleep(0.1)
for feature_index in range(len(self.config.feature_options)):
if feature_types[feature_index] == 'continuous':
continue
while not feature_index in result_dict:
time.sleep(0.1)
gen_feature_descriptors = [result_dict[feature_index] for feature_index in range(len(result_dict.keys()))]
self.gen_feature_descriptors = gen_feature_descriptors
self.is_generating = False
end = time.time()
self.desc_gen_time = end - start
def get_descriptors(self):
while self.is_generating:
time.sleep(0.1)
if hasattr(self, 'gen_feature_descriptors'):
print('[TIME: ', self.desc_gen_time, ' (descriptor generation)')
return self.gen_feature_descriptors
else:
return self.config.feature_descriptors
def get_summary(self):
summary = {}
feature_types = self.config.feature_types
# If we have not generated new descriptors
if not hasattr(self, 'gen_feature_descriptors'):
for feature_index in range(len(self.config.feature_options)):
contribs = {}
if feature_types[feature_index] == 'continuous':
continue
feature_descriptors = self.config.feature_descriptors[feature_index]
if feature_descriptors is None:
continue
for desc_index in range(feature_descriptors.shape[1]):
desc_summary_dict = {}
desc_summary_dict['relevant_given_descriptors'] = np.arange(len(feature_descriptors[:, desc_index]))
desc_summary_dict['given_descriptor_contributions'] = np.ones(len(feature_descriptors[:, desc_index]))
contribs['descriptor_%d' % desc_index] = copy.deepcopy(desc_summary_dict)
summary['feature_%d' % feature_index] = copy.deepcopy(contribs)
return summary
# If we have generated new descriptors
for feature_index in range(len(self.config.feature_options)):
if feature_types[feature_index] == 'continuous':
continue
weights = self.weights[feature_index]
sufficient_indices = self.sufficient_indices[feature_index]
print('sufficient_indices', sufficient_indices)
if weights is None:
continue
if len(sufficient_indices) == 0:
continue
# normalize weights
normed_weights = np.empty(weights.shape)
for index, weight_elements in enumerate(weights):
normed_weights[index] = weight_elements / np.sum(np.abs(weight_elements))
# identify contributing indices
contribs = {}
for new_desc_index in sufficient_indices:
desc_summary_dict = {}
relevant_weights = normed_weights[new_desc_index]
sorting_indices = np.argsort(np.abs(relevant_weights))
cumulative_sum = np.cumsum(np.abs(relevant_weights[sorting_indices]))
include_indices = np.where(cumulative_sum > 0.1)[0]
relevant_given_descriptors = sorting_indices[include_indices]
desc_summary_dict['relevant_given_descriptors'] = relevant_given_descriptors
desc_summary_dict['given_descriptor_contributions'] = weights[new_desc_index]
contribs['descriptor_%d' % new_desc_index] = copy.deepcopy(desc_summary_dict)
summary['feature_%d' % feature_index] = copy.deepcopy(contribs)
return summary
|
"""This module contains the general information for FabricFcMonSrcEpOperation ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class FabricFcMonSrcEpOperationConsts():
ADMIN_STATE_DISABLED = "disabled"
ADMIN_STATE_ENABLED = "enabled"
ADMIN_STATE_REMOTE_TRIGGER = "remoteTrigger"
ADMIN_STATE_REMOVE = "remove"
DIRECTION_BOTH = "both"
DIRECTION_RX = "rx"
DIRECTION_TX = "tx"
TRIGGER_STATUS_TRIGGER_ACKED = "trigger-acked"
TRIGGER_STATUS_TRIGGER_FAILED = "trigger-failed"
TRIGGER_STATUS_TRIGGERED = "triggered"
TRIGGER_STATUS_UNKNOWN = "unknown"
class FabricFcMonSrcEpOperation(ManagedObject):
"""This is FabricFcMonSrcEpOperation class."""
consts = FabricFcMonSrcEpOperationConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("FabricFcMonSrcEpOperation", "fabricFcMonSrcEpOperation", "remoper-mon-src-[name]", VersionMeta.Version151a, "InputOutput", 0x1ff, [], ["admin", "ext-lan-config", "ext-lan-policy"], [u'fabricFcEstcEpOperation', u'fabricFcSanEpOperation', u'fabricFcSanPcOperation', u'fabricVsan', u'vnicFc'], [u'faultInst'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version151a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["disabled", "enabled", "remoteTrigger", "remove"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"direction": MoPropertyMeta("direction", "direction", "string", VersionMeta.Version151a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["both", "rx", "tx"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"last_modified": MoPropertyMeta("last_modified", "lastModified", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version151a, MoPropertyMeta.NAMING, 0x10, None, None, r"""[\-\.:_a-zA-Z0-9]{1,16}""", [], []),
"remote_error_code": MoPropertyMeta("remote_error_code", "remoteErrorCode", "uint", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"remote_error_descr": MoPropertyMeta("remote_error_descr", "remoteErrorDescr", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"session": MoPropertyMeta("session", "session", "uint", VersionMeta.Version151a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], ["1-255"]),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151a, MoPropertyMeta.READ_WRITE, 0x80, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"transport": MoPropertyMeta("transport", "transport", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|ether|dce|fc),){0,4}(defaultValue|unknown|ether|dce|fc){0,1}""", [], []),
"trigger_status": MoPropertyMeta("trigger_status", "triggerStatus", "string", VersionMeta.Version151a, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["trigger-acked", "trigger-failed", "triggered", "unknown"], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version151a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((defaultValue|unknown|lan|san|ipc),){0,4}(defaultValue|unknown|lan|san|ipc){0,1}""", [], []),
}
prop_map = {
"adminState": "admin_state",
"childAction": "child_action",
"direction": "direction",
"dn": "dn",
"lastModified": "last_modified",
"name": "name",
"remoteErrorCode": "remote_error_code",
"remoteErrorDescr": "remote_error_descr",
"rn": "rn",
"session": "session",
"status": "status",
"transport": "transport",
"triggerStatus": "trigger_status",
"type": "type",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.admin_state = None
self.child_action = None
self.direction = None
self.last_modified = None
self.remote_error_code = None
self.remote_error_descr = None
self.session = None
self.status = None
self.transport = None
self.trigger_status = None
self.type = None
ManagedObject.__init__(self, "FabricFcMonSrcEpOperation", parent_mo_or_dn, **kwargs)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-11-27 01:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0003_address_label'),
]
operations = [
migrations.AlterField(
model_name='taskmonitor',
name='task_state',
field=models.CharField(choices=[('started', 'started'), ('failed', 'failed'), ('succeeded', 'succeeded')], max_length=200, null=True),
),
]
|
from q35_test import (
Q35Project_2_6_0
)
proj = Q35Project_2_6_0()
|
from os import listdir
from os.path import isfile, join
import json
from PIL import Image
import os
memes = []
invalid_images = []
mypath = "memes/3"
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
for file in onlyfiles:
try:
temp = {}
with Image.open('./'+mypath+'/'+file) as img:
width, height = img.size
temp['id'] = file.split('.')[0]
temp['path'] = mypath+'/'+file
temp['width'] = width
temp['height'] = height
memes.append(temp)
except:
print(file, 'is not a valid image !')
invalid_images.append('./'+mypath+'/'+file)
# delete file
for invalid in invalid_images:
os.remove(invalid)
print(invalid ," removed")
f = open('3.json', 'w')
f.write(json.dumps(memes))
f.close()
|
from typing import List
class Solution:
def singleNumber(self, nums: List[int]) -> int:
ones , twos, threes = 0 , 0, 0
for n in nums:
twos |= (ones & n)
ones ^= n
threes = ones & twos
ones &= ~threes # remove the threes from the ones
twos &= ~threes # remove the threes from the twos
print(f'{ones} , {twos}, {threes}')
return ones
s = Solution()
ip = [1,1,1,2]
ans = s.singleNumber(ip)
print(ans)
|
"""
––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
Tools for creation or conversion of lists from/to desired classification
(the default is ISO 3166-1)
© econcz, 2021
––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
This project was inspired
by **[pycountry](https://pypi.org/project/pycountry/)** and
**[pycountry-convert](https://pypi.org/project/pycountry-convert/)**
modules and is a port of the **Stata package**
**[pyconvertu](https://ideas.repec.org/c/boc/bocode/s458892.html)**
*(Stata module to convert a string variable into a classification
from the default or user-provided JSON file with the help of Python 3)*
written in Python 3 and ADO. The tools can, for example,
be used together with **[pandas](https://pypi.org/project/pandas/)**
to process **pandas.DataFrame()**, `data`, `index`, and / or `columns`.
Parameters:
–––––––––––
`source_file` : *raw str* or *unicode*, optional
Relative or absolute path to the user-defined
JSON file.
`from_list` : *sequence* of *iterable*
Input data.
`to_classification` : *str* or *unicode*
'name_en' (English name), 'name_fr' (French name),
'iso3' (ISO 3166-1 alpha-3),
'iso2' (ISO 3166-1 alpha-2), or
'isoN' (ISO 3166-1 numeric).
`from_classification` : *str* or *unicode*
'name_en' (English name), 'name_fr' (French name),
'iso3' (ISO 3166-1 alpha-3),
'iso2' (ISO 3166-1 alpha-2), or
'isoN' (ISO 3166-1 numeric).
`source_file` (if defined) replaces the default classification (ISO 3166-1).
The file must contain a list of dictionaries where `regex`
is a compulsory key in each one. The default JSON file was prepared
with the help of **[json](https://docs.python.org/3/library/json.html)**
module:
```
[
{
"regex": "^(.*afgh.*|\\s*AFG\\s*|\\s*AF\\s*|\\s*4\\s*)$",
"name_en": "Afghanistan", # classification A
"name_fr": "Afghanistan (l')", # classification B
"iso3": "AFG", # ...
"iso2": "AF",
"isoN": "4"
},
...
{
"metadata": {
"name_en": "English short name",
"name_fr": "French short name",
"iso3": "alpha-3 code",
"iso2": "alpha-2 code",
"isoN": "numeric"
}
},
{
"sources": [
"[...](ISO 3166 COUNTRY CODES)",
"[...](ALTERNATIVE NAMES)"
]
}
]
```
Returns:
––––––––
`l` : *list*
Processed data.
"""
import json
import os
import re
import sys
# User-defined Functions
def convert(
source_file=r'' + sys.modules['pyconvertu'].__file__.replace(
'__init__.py', 'classification.json'
),
from_list=[], to_classification='',
*args, **kwargs
):
"""
Converts a list of strings (from_list) to classification
(to_classification) based on a JSON file (source_file).
"""
try:
# load classification
with open(os.path.expanduser(source_file)) as f:
classification = list(filter(
lambda d: not d.get('metadata') and not d.get('sources'),
json.load(f)
))
# convert list
return list(map(
lambda s:
(lambda l, s:
l[1].get(to_classification) if len(l) > 1 else l[0]
)(
[s] + list(filter(
lambda d: re.search(
r'' + d.get('regex') + r'', s, flags=re.I|re.M
),
classification
)),
str(s)
),
from_list
))
except:
return {}
def classification(
source_file=r'' + sys.modules['pyconvertu'].__file__.replace(
'__init__.py', 'classification.json'
),
from_classification='',
*args, **kwargs
):
"""
Creates a list of strings from classification
(from_classification) based on a JSON file (source_file).
"""
try:
# load classification
with open(os.path.expanduser(source_file)) as f:
classification = list(filter(
lambda d: not d.get('metadata') and not d.get('sources'),
json.load(f)
))
# create list
l = list(map(
lambda d: d.get(from_classification),
classification
))
l.sort()
return l
except:
return {}
def info(
source_file=r'' + sys.modules['pyconvertu'].__file__.replace(
'__init__.py', 'classification.json'
),
*args, **kwargs
):
"""
Returns a list based on a JSON file (source_file).
"""
try:
# load classification metadata
with open(os.path.expanduser(source_file)) as f:
metadata = list(filter(
lambda d: d.get('metadata') or d.get('sources'),
json.load(f)
))
# create list
return list(map(
lambda d: str(d),
metadata
))
except:
return {}
|
num=int(input("Enter number :"))
count=0
for i in range(1,num+1):
if (num%i==0):
count=count+1
if count==2:
print(num,"is a prime number")
else:
print(num,"is not a prime number")
# n=int(input("Enter number :"))
# count=0
# i=1
# while(i<=n):
# if n%i==0:
# count=count+1
# i=i+1
# if count==2:
# print("prime number")
# else:
# print("not prime number")
|
# Ivan Carvalho
# Solution to https://www.urionlinejudge.com.br/judge/problems/view/1553
# encoding : utf-8
def frequente(conjunto,numero):
return len([j for j in [conjunto.count(i) for i in set(conjunto)] if j>= numero])
while True:
a,b = [int(i) for i in raw_input().split(" ")]
if a != 0 and b != 0 :
print frequente([int(j) for j in raw_input().split(" ")],b)
else:
break
|
# -*- coding: utf-8 -*-
import yaml
import sys
import numpy as np
import pandas as pd
from time import time
from sklearn.pipeline import make_pipeline
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, LeaveOneLabelOut
from collections import OrderedDict
from multiprocessing import Pool
from functools import partial
# local import
from classif import updateMeta, baggingIterator
def from_yaml_to_func(method,params):
prm = dict()
if params!=None:
for key,val in params.items():
prm[key] = eval(str(val))
return eval(method)(**prm)
def makeCV(kfolds,X,Labels,User,Meta,clf,opts):
users = np.unique(User)
toPredData=[]
Gauc = []
for train_users,test_users in kfolds[1]:
allProb = 0
test_index = np.array([True if u in set(users[test_users]) else False for u in User])
if 'bagging' in opts:
bagging = baggingIterator(opts,[users[i] for i in train_users])
else:
bagging = [[-1]]
for bag in bagging:
bagUsers = np.array([True if u in set(bag) else False for u in User])
train_index = np.logical_xor(np.negative(test_index), bagUsers)
try:
# train
updateMeta(clf,Meta[train_index])
clf.fit(X[train_index,:,:],Labels[train_index])
# predict
prob = []
for ut in np.unique(users[test_users]):
updateMeta(clf,Meta[User==ut,...])
prob.extend(clf.predict(X[User==ut,...]))
prob = np.array(prob)
allProb += prob/len(bagging)
except:
print(kfolds[0])
print([users[i] for i in train_users])
print(bag)
continue
# save & return
predictions = OrderedDict()
predictions['user']=User[test_index]
predictions['label'] = Labels[test_index]
predictions['prediction']=allProb
if 'leak' in opts:
predictions['prediction'] += opts['leak']['coeff']*(1-Meta[test_index,-1])
predictions = pd.DataFrame(predictions)
Gauc.append(roc_auc_score(predictions.label,predictions.prediction))
toPredData.append(predictions)
predData = pd.concat(toPredData)
Sauc = [roc_auc_score(predData.loc[predData.user==i].label,predData.loc[predData.user==i].prediction) for i in np.unique(predData.user)]
print('Rep %d: gAUC (mean of folds) %0.5f, sAUC %0.5f (%0.5f)' % (kfolds[0],np.mean(Gauc),np.mean(Sauc),np.std(Sauc)))
return [Gauc,Sauc]
# load parameters file
yml = yaml.load(open(sys.argv[1]))
# imports
for pkg, functions in yml['imports'].items():
stri = 'from ' + pkg + ' import ' + ','.join(functions)
exec(stri)
# parse pipe function from parameters
pipe = []
for item in yml['pipeline']:
for method,params in item.items():
pipe.append(from_yaml_to_func(method,params))
# create pipeline
clf = make_pipeline(*pipe)
opts=yml['MetaPipeline']
if opts is None:
opts = {}
# load files
X = np.load('./preproc/epochs.npy')
Labels,User = np.load('./preproc/infos.npy')
Meta = np.load('./preproc/meta_leak.npy') if 'leak' in opts else np.load('./preproc/meta.npy')
users=np.unique(User)
# parallel CV
np.random.seed(5)
folds = yml['CrossVal']['folds']
repetitions = yml['CrossVal']['repetitions']
cores = yml['CrossVal']['cores']
kfolds = [[i,KFold(len(users),folds,shuffle=True)] for i in range(repetitions)]
np.random.seed(432432)
t = time()
pMakeCV = partial(makeCV,X=X,Labels=Labels,User=User,Meta=Meta,clf=clf,opts=opts) # pool function is able to process only 1 argument, so the rest has to be set fixed
pool = Pool(processes = cores) # define number of cores
results = pool.map(pMakeCV,kfolds,chunksize=1) # apply parallel processing
pool.close() # close parallel processes after execution (frees memory)
print("Done in " + str(time()-t) + " second")
# calculating performance
gAUC = np.concatenate([i[0] for i in results]) # mean of folds
sAUC = [np.mean(i[1]) for i in results]
indAUC = np.array([i[1] for i in results])
indAUC = np.mean(indAUC,axis=0)
print('Global AUC : %.5f (%.5f)' % (np.mean(gAUC),np.std(gAUC)))
print('Subject AUC : %.5f (%.5f)' % (np.mean(sAUC),np.std(sAUC)))
# writing it down
import os
comment = yml['CrossVal']['comments']
path = yml['CrossVal']['path']
pipelineSteps = [str(clf.steps[i][1]).replace('\n','').replace(' ','') for i in range(len(clf.steps))]
if not os.path.isfile(path):
fd = open(path,'w')
fd.write('comment;folds;reps;gAUC mean;gAUC std;sAUC mean;sAUC std;user'+ ";user".join(map(str,list(map(int,users)))) + ';leak;bagging;pipeline\n')
fd.close()
fd = open(path,'a')
leakStr = 'on' if 'leak' in opts else 'off'
bagStr = '-'.join([str(opts['bagging']['bag_size']),str(opts['bagging']['models'])]) if 'bagging' in opts else 'off'
toWrite = [comment] + list(map(str,[folds,repetitions,np.mean(gAUC),np.std(gAUC),np.mean(sAUC),np.std(sAUC)])) + [str(i) for i in indAUC] + [leakStr,bagStr] + pipelineSteps
fd.write(';'.join(toWrite) + '\n')
fd.close()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import functools
import tensorflow as tf
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.distributions import Distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import special_math
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import tensor_util
__all__ = [
"SkewGeneralizedNormal",
]
class SkewGeneralizedNormal(Distribution):
'''
The skew-generalized normal distribution.
Also known as the generalized Gaussian distribution of the second type.
This implementation is based on the distribution
described as the generalized normal version 2
defined in the Wikipedia article:
https://en.wikipedia.org/wiki/Generalized_normal_distribution
accessed January 2019.
Quantile, survival, log_survival, and all other essential functions
were derived and defined by
Daniel Luria, legally Daniel Maryanovsky, of vAIral, Kabbalah AI,
and formerly of Lofty AI, MariaDB, and Locbit Inc.
The distribution returns NaN when evaluating
probability of points outside its support
'''
def __init__(self,
loc,
scale,
peak,
validate_args=False,
allow_nan_stats=True,
name="SkewGeneralizedNormal"):
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, scale, peak],
dtype_hint=tf.float32)
loc = tf.convert_to_tensor(loc, name="loc", dtype=dtype)
scale = tf.convert_to_tensor(scale, name="scale", dtype=dtype)
peak = tf.convert_to_tensor(peak, name="peak", dtype=dtype)
with tf.control_dependencies([tf.assert_positive(scale)] if
validate_args else []):
self._loc = tf.identity(loc)
self._scale = tf.identity(scale)
self._peak = tf.identity(peak)
tf.debugging.assert_same_float_dtype(
[self._loc, self._scale, self._peak]
)
super().__init__(
dtype=dtype,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale, self._peak],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale", "peak"),
([tf.convert_to_tensor(sample_shape, dtype=tf.int32)] * 3)))
@classmethod
def _params_event_ndims(cls):
return dict(loc=0, scale=0, peak=0)
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
@property
def peak(self):
"""Distribution parameter related to mode and skew."""
return self._peak
def _batch_shape_tensor(self, loc=None, scale=None, peak=None):
return functools.reduce(prefer_static.broadcast_shape, (
prefer_static.shape(self.loc if loc is None else loc),
prefer_static.shape(self.scale if scale is None else scale),
prefer_static.shape(self.peak if peak is None else peak)))
def _batch_shape(self):
return functools.reduce(tf.broadcast_static_shape, (
self.loc.shape, self.scale.shape, self.peak.shape))
def _event_shape_tensor(self):
return tf.constant([], dtype=tf.int32)
def _event_shape(self):
return tf.TensorShape([])
#-
def _sample_n(self, n, seed=None):
shape = tf.concat([[n], self.batch_shape_tensor()], 0)
sampled = tf.random.uniform(
shape=shape, minval=0., maxval=1., dtype=self.loc.dtype, seed=seed)
return self._quantile(sampled)
def _n_log_prob(self, x):
return self._n_log_unnormalized_prob(x) - self._n_log_normalization()
def _log_prob(self, x):
log_smpe = tf.math.log(self.scale - (self.peak * (x - self.loc)))
return self._n_log_prob(self._y(x)) - log_smpe
def _prob(self, x):
prob = tf.exp(self._log_prob(x))
return tf.where(tf.math.is_nan(prob), tf.zeros_like(prob), prob)
def _log_cdf(self, x):
return special_math.log_ndtr(self._y(x))
def _y(self, x):
inv_peak = (-1./self.peak)
inv_offset = 1. - self.peak * (x - self.loc) / self.scale
return inv_peak * tf.math.log(inv_offset)
def _cdf(self, x):
return special_math.ndtr(self._y(x))
def _log_survival_function(self, x):
return special_math.log_ndtr(-self._y(x))
def _survival_function(self, x):
return special_math.ndtr(-self._y(x))
def _n_log_unnormalized_prob(self, x):
return -0.5 * tf.square(x)
#
def _n_log_normalization(self):
return 0.5 * math.log(2. * math.pi) + 1.
def _mean(self):
broadcast_ones = tf.ones_like(self.scale)
esp = (tf.exp(tf.square(self.peak) / 2.) - 1.)
mean = self.loc - (self.scale*esp/self.peak)
return mean * broadcast_ones
def _quantile(self, p):
quantile_z = (1. - tf.exp(-self.peak * tf.math.ndtri(p)))/self.peak
return self._inv_z(quantile_z)
def _stddev(self):
broadcast_ones = tf.ones_like(self.loc)
root_sq_offset = tf.sqrt(tf.exp(tf.square(self.peak)) - 1.)
exp_square_peak = tf.exp(tf.square(self.peak)/2)
scale_q = self.scale/tf.abs(self.peak)
return scale_q * exp_square_peak * root_sq_offset * broadcast_ones
def _variance(self):
return tf.square(self._stddev())
def _mode(self):
broad_ones = tf.ones_like(self.scale)
unit_mode = ((1. - tf.exp(-tf.square(self.peak)))*self.scale)/self.peak
return (unit_mode + self.loc) * broad_ones
def _z(self, x):
"""Standardize input `x` to a unit normal."""
with tf.name_scope("standardize"):
return (x - self.loc) / self.scale
def _inv_z(self, z):
"""Reconstruct input `x` from a its normalized version."""
with tf.name_scope("reconstruct"):
return z * self.scale + self.loc
def _default_event_space_bijector(self):
return identity_bijector.Identity(validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
assertions = []
if is_init:
# _batch_shape() will raise error if it can statically prove that `loc`,
# `scale`, and `peak` have incompatible shapes.
# taken from generalized_normal
try:
self._batch_shape()
except ValueError:
raise ValueError(
'Arguments `loc`, `scale` and `peak` must have compatible shapes; '
'loc.shape={}, scale.shape={}, peak.shape={}.'.format(
self.loc.shape, self.scale.shape, self.peak.shape))
if not self.validate_args:
assert not assertions # Should never happen.
return []
if is_init != tensor_util.is_ref(self.scale):
assertions.append(assert_util.assert_positive(
self.scale, message='Argument `scale` must be positive.'))
if is_init != tensor_util.is_ref(self.peak):
assertions.append(assert_util.assert_positive(
self.power, message='Argument `peak` must be positive.'))
return assertions
|
#!/usr/bin/env python
import numpy as np
import h5py
import matplotlib.pyplot as plt
from matplotlib import gridspec
from os import path
import parameters as param
import argparse
# Configure Command Line interface
controller = dict(tf="target following controller", oa="obstacle avoidance controller")
parser = argparse.ArgumentParser(description='Plot the final weights and show it in a Window')
parser.add_argument('controller', choices=controller, default='oa', help="tf - target following, oa - obstacle avoidance")
parser.add_argument('-n', '--noShow', help='Do not show the resulting Plot in a window', action="store_true")
parser.add_argument('dir', help='Base directory of the experiment eg. ./data/session_xyz', default=param.default_dir)
args = parser.parse_args()
print "Using", controller[args.controller]
is_oa = args.controller == 'oa'
if is_oa:
h5f = h5py.File(path.join(args.dir, param.training_file_oa), 'r')
w_tf = np.array(h5f['w_tf'], dtype=float)
w_l = w_tf[:, 0]
w_r = w_tf[:, 1]
w_i = range(0, w_l.shape[0])
w_p = np.array(h5f['w_oa'], dtype=float)
else:
h5f = h5py.File(path.join(args.dir, param.training_file_tf), 'r')
w_tf = np.array(h5f['w_tf'], dtype=float)
w_l = w_tf[:, 0]
w_r = w_tf[:, 1]
w_i = range(0, w_l.shape[0])
episode_steps = np.array(h5f["episode_steps"], dtype=float)
episode_completed = np.array(h5f['episode_completed'], dtype=bool)
rewards = np.array(h5f['reward'], dtype=float)
angle_to_target = np.array(h5f['angle_to_target'], dtype=float)
xlim = w_r.shape[0]
fig = plt.figure()
gs = gridspec.GridSpec(1, 1)
ax1 = plt.subplot(gs[0])
values_x = np.array(range(episode_steps.size))
success_y = episode_steps[episode_completed]
success_x = values_x[episode_completed]
failures_y = episode_steps[~episode_completed]
failures_x = values_x[~episode_completed]
ax1.scatter(success_x, success_y, marker='^', color='g')
ax1.scatter(failures_x, failures_y, marker='x', color='r')
ax1.set_ylabel("Duration")
ax1.set_xlabel("Episode")
fig.tight_layout()
if is_oa:
plt.savefig(path.join(args.dir, "success_oa.png"))
else:
plt.savefig(path.join(args.dir, "success_tf.png"))
if not args.noShow:
plt.show()
|
# bfs_kbacon.py
"""Volume 2A: Breadth-First Search (Kevin Bacon).
<Name>
<Class>
<Date>
"""
# Problems 1-4: Implement the following class
class Graph(object):
"""A graph object, stored as an adjacency dictionary. Each node in the
graph is a key in the dictionary. The value of each key is a list of the
corresponding node's neighbors.
Attributes:
dictionary: the adjacency list of the graph.
"""
def __init__(self, adjacency):
"""Store the adjacency dictionary as a class attribute."""
self.dictionary = adjacency
# Problem 1
def __str__(self):
"""String representation: a sorted view of the adjacency dictionary.
Example:
>>> test = {'A':['B'], 'B':['A', 'C',], 'C':['B']}
>>> print(Graph(test))
A: B
B: A; C
C: B
"""
raise NotImplementedError("Problem 1 Incomplete")
# Problem 2
def traverse(self, start):
"""Begin at 'start' and perform a breadth-first search until all
nodes in the graph have been visited. Return a list of values,
in the order that they were visited.
Inputs:
start: the node to start the search at.
Returns:
the list of visited nodes (in order of visitation).
Raises:
ValueError: if 'start' is not in the adjacency dictionary.
Example:
>>> test = {'A':['B'], 'B':['A', 'C',], 'C':['B']}
>>> Graph(test).traverse('B')
['B', 'A', 'C']
"""
raise NotImplementedError("Problem 2 Incomplete")
# Problem 3 (Optional)
def DFS(self, start):
"""Begin at 'start' and perform a depth-first search until all
nodes in the graph have been visited. Return a list of values,
in the order that they were visited. If 'start' is not in the
adjacency dictionary, raise a ValueError.
Inputs:
start: the node to start the search at.
Returns:
the list of visited nodes (in order of visitation)
"""
raise NotImplementedError("Problem 3 Incomplete")
# Problem 4
def shortest_path(self, start, target):
"""Begin at the node containing 'start' and perform a breadth-first
search until the node containing 'target' is found. Return a list
containg the shortest path from 'start' to 'target'. If either of
the inputs are not in the adjacency graph, raise a ValueError.
Inputs:
start: the node to start the search at.
target: the node to search for.
Returns:
A list of nodes along the shortest path from start to target,
including the endpoints.
Example:
>>> test = {'A':['B', 'F'], 'B':['A', 'C'], 'C':['B', 'D'],
... 'D':['C', 'E'], 'E':['D', 'F'], 'F':['A', 'E', 'G'],
... 'G':['A', 'F']}
>>> Graph(test).shortest_path('A', 'G')
['A', 'F', 'G']
"""
raise NotImplementedError("Problem 4 Incomplete")
# Problem 5: Write the following function
def convert_to_networkx(dictionary):
"""Convert 'dictionary' to a networkX object and return it."""
raise NotImplementedError("Problem 5 Incomplete")
# Helper function for problem 6
def parse(filename="movieData.txt"):
"""Generate an adjacency dictionary where each key is
a movie and each value is a list of actors in the movie.
"""
# open the file, read it in, and split the text by '\n'
with open(filename, 'r') as movieFile:
moviesList = movieFile.read().split('\n')
graph = dict()
# for each movie in the file,
for movie in moviesList:
# get movie name and list of actors
names = movie.split('/')
title = names[0]
graph[title] = []
# add the actors to the dictionary
for actor in names[1:]:
graph[title].append(actor)
return graph
# Problems 6-8: Implement the following class
class BaconSolver(object):
"""Class for solving the Kevin Bacon problem."""
# Problem 6
def __init__(self, filename="movieData.txt"):
"""Initialize the networkX graph and with data from the specified
file. Store the graph as a class attribute. Also store the collection
of actors in the file as an attribute.
"""
raise NotImplementedError("Problem 6 Incomplete")
# Problem 6
def path_to_bacon(self, start, target="Bacon, Kevin"):
"""Find the shortest path from 'start' to 'target'."""
raise NotImplementedError("Problem 6 Incomplete")
# Problem 7
def bacon_number(self, start, target="Bacon, Kevin"):
"""Return the Bacon number of 'start'."""
raise NotImplementedError("Problem 7 Incomplete")
# Problem 7
def average_bacon(self, target="Bacon, Kevin"):
"""Calculate the average Bacon number in the data set.
Note that actors are not guaranteed to be connected to the target.
Inputs:
target (str): the node to search the graph for
"""
raise NotImplementedError("Problem 7 Incomplete")
# =========================== END OF FILE =============================== #
|
# Function bribe:
# takes a queue and a ID k
# and moves k to the front of the queue if found
# else, do nothing
def bribe(queue, k):
for entry in queue:
if k == entry[1]:
sav=entry
queue.remove(entry)
queue.insert(0,entry)
return None
N = int(input("Enter N: "))
tuplist = []
print("Enter ID: ")
for i in range(1,N+1):
inp = input()
entry = (i,inp)
tuplist.append(entry)
for x in tuplist:
print(x)
k = input("Enter k: ")
bribe(tuplist, k)
for x in tuplist:
print(x)
|
from __future__ import print_function, absolute_import
from pygments.lexer import RegexLexer
from pygments.lexer import words
from pygments.token import Literal, Text
from .completer import KubectlCompleter
class KubectlLexer(RegexLexer):
"""Provides highlighting for commands, subcommands, arguments, and options.
"""
completer = KubectlCompleter()
tokens = {
'root': [
(words(
tuple(['kubectl', 'clear', 'exit']),
prefix=r'\b',
suffix=r'\b'),
Literal.String),
# (words(
# tuple(completer.all_commands),
# prefix=r'\b',
# suffix=r'\b'),
# Name.Class),
# (words(
# tuple(completer.all_args),
# prefix=r'\b',
# suffix=r'\b'),
# Name.Class),
# (words(
# tuple(completer.all_opts),
# prefix=r'',
# suffix=r'\b'),
# Keyword),
# (words(
# tuple(completer.global_opts),
# prefix=r'',
# suffix=r'\b'),
# Keyword),
# Everything else
(r'.*\n', Text),
]
}
|
# Generated by Django 3.2.11 on 2022-01-09 02:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='class',
name='cCode',
),
migrations.AlterField(
model_name='class',
name='id',
field=models.CharField(max_length=4, primary_key=True, serialize=False, verbose_name='課程代碼'),
),
]
|
from flask import Flask, jsonify, render_template
import test, stats, os
app = Flask(__name__)
cache = {}
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/r/<string:subreddit>')
def episodes(subreddit):
seasonsAndEpisodes = _getEpisodes(subreddit)
return render_template('index.html', result=seasonsAndEpisodes, subreddit=subreddit)
@app.route('/api/r/<string:subreddit>', methods=['GET'])
def get_episodes(subreddit):
seasonsAndEpisodes = _getEpisodes(subreddit)
seasons = [season.serialize() for season in seasonsAndEpisodes]
result = {"seasons": seasons, "subreddit": subreddit}
return jsonify(result)
def _getEpisodes(subreddit):
if subreddit in cache:
return cache[subreddit]
episodes = test.getValidData(subreddit)
seasonsAndEpisodes = stats.extractSeasonsAndEpisodes(episodes)
cache[subreddit] = seasonsAndEpisodes
return seasonsAndEpisodes
if __name__ == '__main__':
port = int(os.environ.get('PORT', 33507))
app.run(debug=True, host='0.0.0.0', port=port)
|
import pathlib
from setuptools import setup, find_packages
import crypto_factory as cf
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README.md file
README = (HERE / "README.md").read_text()
# Requirements list
REQUIRED_PKGS = (HERE / "requirements.txt").read_text().split()
# This call to setup() does all the work
setup(
name=cf.__title__,
version=cf.__version__,
author=cf.__author__,
author_email=cf.__email__,
description=cf.__summary__,
long_description=README,
long_description_content_type="text/markdown",
# long_description_content_type="text/x-rst",
url=cf.__uri__,
license=cf.__license__,
packages=find_packages(exclude=("tests", "private", )),
include_package_data=True,
platforms='any',
# zip_safe=False,
python_requires='>=3.4',
install_requires=REQUIRED_PKGS,
test_suite="tests",
keywords='cryptography factory-design',
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Security :: Cryptography",
"Topic :: Software Development :: Libraries :: Python Modules",
],
project_urls={
'Documentation': 'https://' + cf.__title__.lower() + '.readthedocs.io/en/latest/',
'Releases': 'https://pypi.org/project/' + cf.__title__,
'Source': cf.__uri__,
'Tracker': cf.__uri__ + '/issues',
},
)
|
def prvo_tisoc_mestno_fibonacijevo():
a = 1
b = 1
index = 2
while len(str(b)) < 1000:
c = a
a = b
b = b + c
index += 1
return index
print(prvo_tisoc_mestno_fibonacijevo())
4782
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangobb_forum', '0010_remove_topic_views'),
]
operations = [
migrations.AlterModelTable(
name='attachment',
table='slimbb_attachment',
),
migrations.AlterModelTable(
name='ban',
table='slimbb_ban',
),
migrations.AlterModelTable(
name='category',
table='slimbb_category',
),
migrations.AlterModelTable(
name='forum',
table='slimbb_forum',
),
migrations.AlterModelTable(
name='post',
table='slimbb_post',
),
migrations.AlterModelTable(
name='posttracking',
table='slimbb_posttracking',
),
migrations.AlterModelTable(
name='profile',
table='slimbb_profile',
),
migrations.AlterModelTable(
name='report',
table='slimbb_report',
),
migrations.AlterModelTable(
name='topic',
table='slimbb_topic',
),
]
|
# -*- coding: utf-8 -*-
import argparse
import common
import logging
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser(add_help=True)
subparsers = parser.add_subparsers(dest="func")
grs_subparser = subparsers.add_parser("generate_report_from_shelve")
grs_subparser.add_argument('--shelve', action='store', dest="shelve")
imp_subparser = subparsers.add_parser("import_hierarchy_into_magna")
imp_subparser.add_argument('--host', action='store', dest="server")
imp_subparser.add_argument('--port', action='store', type=int, dest="port", default=8080)
imp_subparser.add_argument('--module', action='store', dest="module")
imp_subparser.add_argument('--project_id', action='store', type=int, dest="project_id")
args = parser.parse_args()
if args.func == "generate_report_from_shelve":
from ngta.util import generate_report_from_shelve
generate_report_from_shelve(args.shelve)
elif args.func == "import_hierarchy_into_magna":
from magna.client import RestClient
client = RestClient("http://%s:%s/magna/api/rest/" % (args.server, args.port))
client.export_hierarchy_to_magna(args.module, args.project_id)
if __name__ == "__main__":
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)-15s [%(levelname)-8s] - %(message)s'
)
main()
|
import os
import tempfile
import pytest
from docker import errors
from docker.context import ContextAPI
from docker.tls import TLSConfig
from .base import BaseAPIIntegrationTest
class ContextLifecycleTest(BaseAPIIntegrationTest):
def test_lifecycle(self):
assert ContextAPI.get_context().Name == "default"
assert not ContextAPI.get_context("test")
assert ContextAPI.get_current_context().Name == "default"
dirpath = tempfile.mkdtemp()
ca = tempfile.NamedTemporaryFile(
prefix=os.path.join(dirpath, "ca.pem"), mode="r")
cert = tempfile.NamedTemporaryFile(
prefix=os.path.join(dirpath, "cert.pem"), mode="r")
key = tempfile.NamedTemporaryFile(
prefix=os.path.join(dirpath, "key.pem"), mode="r")
# create context 'test
docker_tls = TLSConfig(
client_cert=(cert.name, key.name),
ca_cert=ca.name)
ContextAPI.create_context(
"test", tls_cfg=docker_tls)
# check for a context 'test' in the context store
assert any([ctx.Name == "test" for ctx in ContextAPI.contexts()])
# retrieve a context object for 'test'
assert ContextAPI.get_context("test")
# remove context
ContextAPI.remove_context("test")
with pytest.raises(errors.ContextNotFound):
ContextAPI.inspect_context("test")
# check there is no 'test' context in store
assert not ContextAPI.get_context("test")
ca.close()
key.close()
cert.close()
def test_context_remove(self):
ContextAPI.create_context("test")
assert ContextAPI.inspect_context("test")["Name"] == "test"
ContextAPI.remove_context("test")
with pytest.raises(errors.ContextNotFound):
ContextAPI.inspect_context("test")
|
# Generated by Django 3.1.6 on 2021-04-08 16:15
import bookwyrm.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("bookwyrm", "0067_denullify_list_item_order"),
]
operations = [
migrations.AlterField(
model_name="listitem",
name="order",
field=bookwyrm.models.fields.IntegerField(),
),
migrations.AlterUniqueTogether(
name="listitem",
unique_together={("order", "book_list"), ("book", "book_list")},
),
]
|
# -*- coding: utf-8 -*-
"""
xueqiu.sheet
~~~~~~~~~~~~
This module contains stock sheet languages.
:copyright: (c) 2019 by 1dot75cm.
:license: MIT, see LICENSE for more details.
"""
income_lang_us = dict(
sd='开始日期',
ed='结束日期',
report_date='报告日期',
report_name='报告名称',
report_annual='报告年度',
report_type_code='报告代码',
revenue='营业收入',
othr_revenues='其他收入',
total_revenue='营业总收入',
sales_cost='营业成本',
gross_profit='营业毛利润',
marketing_selling_etc='市场销售管理费用',
rad_expenses='研发费用',
net_interest_expense='净利息费用',
interest_income='利息收入',
interest_expense='利息支出',
total_operate_expenses_si='营业支出特殊科目总计',
total_operate_expenses='营业支出总计',
operating_income='营业利润',
share_of_earnings_of_affiliate='子公司股权权益收入',
income_from_co_before_tax_si='税前营业收入特殊科目',
income_from_co_before_it='税前利润',
income_tax='所得税',
income_from_co='税后利润',
net_income='净利润',
preferred_dividend='优先股股息',
net_income_atms_interest='归属于少数股东的净利润',
net_income_atcss='归属于母公司股东的净利润',
total_compre_income='综合收益总额',
total_compre_income_atms='归属于少数股东的综合收益',
total_compre_income_atcss='归属于母公司股东的综合收益',
total_basic_earning_common_ps='基本每股收益',
total_dlt_earnings_common_ps='稀释每股收益')
income_lang_hk = dict(
sd='开始日期',
ed='结束日期',
report_date='报告日期',
report_name='报告名称',
month_num='月数',
tto='总营业额',
slgcost='销售成本',
gp='毛利',
otiog='其他收入及收益',
sr_ta='营业收入合计',
slgdstexp='销售及分销开支',
admexp='行政开支',
otopeexp='其他经营开支',
topeexp='经营开支总额',
rshdevexp='研发开支费用',
tipmcgpvs='减值拨备',
depaz='折旧摊销',
opeploinclfincost='扣除融资成本前的经营利润',
fcgcost='融资成本',
opeplo='经营利润',
nosplitems='非经营特殊项目',
jtctletiascom='联营合营公司',
plobtx='税前利润',
tx='所得税',
txexcliotx='所得税外税金',
npdsubu='非持续经营业务利润',
plocyr='税后利润',
amteqyhdcom='公司股东权益',
amtmiint='少数股东权益',
ploashh='股东应得权益',
otcphio='其他全面收入',
tcphio='全面收入总额',
cmnshdiv='普通股股息',
beps_aju='基本每股收益',
deps_aju='摊薄每股收益',
divdbups_ajupd='每股股息')
income_lang_cn = dict(
report_date='报告日期',
report_name='报告名称',
total_revenue='营业总收入',
revenue='营业收入',
income_from_chg_in_fv='公允价值变动收益',
invest_income='投资收益',
invest_incomes_from_rr='联营合营企业',
exchg_gain='汇兑收益',
operating_costs='营业总成本',
operating_cost='营业成本',
operating_taxes_and_surcharge='营业税附加税',
sales_fee='销售费用',
manage_fee='管理费用',
financing_expenses='财务费用',
asset_impairment_loss='资产减值损失',
op='营业利润',
non_operating_income='营业外收入',
non_operating_payout='营业外支出',
profit_total_amt='利润总额',
income_tax_expenses='所得税费',
net_profit='净利润',
net_profit_atsopc='归属母公司所有者的净利润',
minority_gal='少数股东损益',
net_profit_after_nrgal_atsolc='扣非净利润',
basic_eps='基本每股收益',
dlt_earnings_per_share='稀释每股收益',
othr_compre_income='其他综合收益',
othr_compre_income_atms='少数股东的其他综合收益',
othr_compre_income_atoopc='归属于母公司所有者的其他综合收益',
total_compre_income='综合收益总额',
total_compre_income_atsopc='归属于母公司股东的综合收益总额',
total_compre_income_atms='少数股东的综合收益总额')
balance_lang_us = dict(
sd='开始日期',
ed='结束日期',
report_date='报告日期',
report_name='报告名称',
report_annual='报告年度',
report_type_code='报告代码',
cce='现金和现金等价物',
st_invest='短期投资',
total_cash='总现金',
net_receivables='应收账款',
inventory='库存',
dt_assets_current_assets='流动资产递延所得税',
prepaid_expense='预付款',
current_assets_special_subject='流动资产特殊项目',
total_current_assets='流动资产合计',
gross_property_plant_and_equip='固定资产总额',
accum_depreciation='固定资产折旧',
net_property_plant_and_equip='固定资产净额',
equity_and_othr_invest='股权投资和长期投资',
goodwill='商誉',
net_intangible_assets='无形资产净额',
accum_amortization='累计摊销',
dt_assets_noncurrent_assets='非流动资产递延所得税',
nca_si='非流动资产特殊项目',
total_noncurrent_assets='非流动资产合计',
total_assets_special_subject='资产特殊科目',
total_assets='总资产',
st_debt='短期借款',
accounts_payable='应付账款',
income_tax_payable='应缴所得税',
accrued_liab='应计负债',
deferred_revenue_current_liab='流动负债递延收入',
current_liab_si='流动负债特殊科目',
total_current_liab='流动负债合计',
lt_debt='长期借款',
deferred_tax_liab='递延所得税负债',
dr_noncurrent_liab='非流动负债递延收入',
noncurrent_liab_si='非流动负债特殊科目',
total_noncurrent_liab='非流动负债合计',
total_liab_si='负债合计特殊科目',
total_liab='负债合计',
asset_liab_ratio='资产负债率',
preferred_stock='优先股',
common_stock='普通股',
add_paid_in_capital='额外实收资本',
retained_earning='未分配利润',
treasury_stock='库存股',
accum_othr_compre_income='累计损益',
total_holders_equity_si='归属于母公司股东权益特殊项目',
total_holders_equity='归属于母公司股东权益合计',
minority_interest='少数股东权益',
total_equity_special_subject='权益特殊项目',
total_equity='股东权益合计')
balance_lang_hk = dict(
sd='开始日期',
ed='结束日期',
report_date='报告日期',
report_name='报告名称',
month_num='月数',
cceq='现金和现金等价物',
fina='金融资产',
trrb='应收账款',
iv='存货',
otca='其他流动资产',
ca='流动资产合计',
fxda='固定资产',
inv='投资',
iga='无形资产',
otnca='其他非流动资产',
otnc='',
tnca='非流动资产合计',
ta='总资产',
trpy='应付账款',
stdt='短期借款',
otstdt='其他短期负债',
clia='流动负债合计',
ncalia='净流动负债',
diftatclia='总资产减流动负债',
ltdt='长期借款',
otltlia='其他长期负债',
tnclia='非流动负债合计',
tlia='总负债',
nalia='净资产',
ta_tlia='负债率',
numtsh='总股本',
shpm='股份溢价',
rpaculo='保留利润',
caprx='资本储备',
otrx='其他储备',
trx='总储备',
shhfd='股东权益',
miint='少数股东权益',
teqy='总权益')
balance_lang_cn = dict(
report_date='报告日期',
report_name='报告名称',
currency_funds='货币资金',
ar_and_br='应收票据及应收账款',
tradable_fnncl_assets='交易性金融资产',
bills_receivable='应收票据',
account_receivable='应收账款',
pre_payment='预付款项',
contractual_assets='合同资产',
interest_receivable='应收利息',
dividend_receivable='应收股利',
othr_receivables='其他应收款',
inventory='存货',
nca_due_within_one_year='1年内到期的非流动资产',
current_assets_si='流动资产特殊项目',
othr_current_assets='其他流动资产',
total_current_assets='流动资产合计',
salable_financial_assets='可供出售金融资产',
saleable_finacial_assets='saleable_finacial_assets',
held_to_maturity_invest='持有到期投资',
lt_receivable='长期应收款',
lt_equity_invest='长期股权投资',
invest_property='投资性房地产',
fixed_asset='固定资产',
construction_in_process='在建工程',
intangible_assets='无形资产',
dev_expenditure='开发支出',
goodwill='商誉',
lt_deferred_expense='长期待摊费用',
dt_assets='递延所得税资产',
noncurrent_assets_si='非流动资产特殊项目',
othr_noncurrent_assets='其他非流动资产',
total_noncurrent_assets='非流动资产合计',
total_assets='资产合计',
st_loan='短期借款',
tradable_fnncl_liab='交易性金融负债',
bp_and_ap='应付票据及应付账款',
accounts_payable='应付账款',
bill_payable='应付票据',
pre_receivable='预收款项',
contract_liabilities='合同负债',
payroll_payable='应付职工薪酬',
tax_payable='应缴税费',
interest_payable='应付利息',
dividend_payable='应付股利',
othr_payables='其他应付款',
noncurrent_liab_due_in1y='1年内到期的非流动负债',
current_liab_si='流动负债特殊项目',
othr_current_liab='其他流动负债',
total_current_liab='流动负债合计',
lt_loan='长期借款',
bond_payable='应付债券',
lt_payable='长期应付款',
special_payable='专项应付款',
estimated_liab='预计负债',
dt_liab='递延所得税负债',
noncurrent_liab_si='非流动负债特殊项目',
othr_non_current_liab='其他非流动负债',
total_noncurrent_liab='非流动负债合计',
total_liab='负债合计',
asset_liab_ratio='负债率',
shares='股本',
capital_reserve='资本公积',
treasury_stock='库存股',
earned_surplus='盈余公积',
undstrbtd_profit='未分配利润',
total_quity_atsopc='归属于母公司股东权益合计',
minority_equity='少数股东权益',
total_holders_equity='股东权益合计',
total_liab_and_holders_equity='负债和股东权益总计')
cash_flow_lang_us = dict(
sd='开始日期',
ed='结束日期',
report_date='报告日期',
report_name='报告名称',
report_annual='报告年度',
report_type_code='报告代码',
net_cash_provided_by_oa='经营活动产生的现金流量净额',
net_cash_used_in_ia='投资活动产生的现金流量净额',
payment_for_property_and_equip='物业设备资金',
net_cash_used_in_fa='筹资活动产生的现金流量净额',
effect_of_exchange_chg_on_cce='汇率对现金的影响',
cce_at_boy='期初现金余额',
cce_at_eoy='期末现金余额',
increase_in_cce='现金净增加额',
depreciation_and_amortization='折旧与摊销',
operating_asset_and_liab_chg='营运资金变动',
purs_of_invest='投资购买',
common_stock_issue='股票发行',
repur_of_common_stock='股份回购',
dividend_paid='支付股息')
cash_flow_lang_hk = dict(
sd='开始日期',
ed='结束日期',
report_date='报告日期',
report_name='报告名称',
month_num='月数',
nocf='经营活动产生的现金流量净额',
depaz='折旧摊销',
intrc='已收利息',
intp='已付利息',
divrc='已收股息',
divp='已派股息',
txprf='退回或已缴税项',
adtfxda='增添固定资产',
icinv='投资增加',
dsfxda='出售固定资产',
dcinv='投资减少',
ncfrldpty_invact='与关联人的现金流量_投资活动',
ninvcf='投资活动产生的现金流量净额',
nicln='新增贷款',
lnrpa='偿还贷款',
fxdiodtinstr='定息或债券融资',
eqyfin='股本融资',
ncfrldpty_finact='与关联人的现金流量_融资活动',
rpafxdiodtinstr='偿还定息或债券融资',
nfcgcf='融资活动产生的现金流量净额',
ncfdchexrateot='汇率影响',
icdccceq='现金净增加额',
cceqbegyr='期初现金余额',
cceqeyr='期末现金余额')
cash_flow_lang_cn = dict(
report_date='报告日期',
report_name='报告名称',
#经营活动产生的现金流量
cash_received_of_sales_service='销售商品提供劳务收到的现金',
refund_of_tax_and_levies='返还税费',
cash_received_of_othr_oa='收到其他与经营活动有关的现金',
sub_total_of_ci_from_oa='经营活动现金流入小计',
goods_buy_and_service_cash_pay='购买商品接受劳务支付的现金',
cash_paid_to_employee_etc='支付职工薪酬',
payments_of_all_taxes='支付各项税费',
othrcash_paid_relating_to_oa='支付其他与经营活动有关的现金',
sub_total_of_cos_from_oa='经营活动现金流出小计',
ncf_from_oa='经营活动现金流量净额',
#投资活动产生的现金流量
cash_received_of_dspsl_invest='收回投资获得的现金',
invest_income_cash_received='投资收益获得的现金',
net_cash_of_disposal_assets='处置固定资产无形资产和其他长期资产收回的现金净额',
net_cash_of_disposal_branch='处置子公司收到的现金净额',
cash_received_of_othr_ia='收到其他与投资活动有关的现金',
sub_total_of_ci_from_ia='投资活动现金流入小计',
cash_paid_for_assets='购买固定资产无形资产和其他长期资产支付的现金',
invest_paid_cash='投资支付的现金',
net_cash_amt_from_branch='取得子公司支付的现金净额',
othrcash_paid_relating_to_ia='支付其他与投资活动有关的现金',
sub_total_of_cos_from_ia='投资活动现金流出小计',
ncf_from_ia='投资活动产生的现金流量净额',
#筹资活动产生的现金流量
cash_received_of_absorb_invest='吸收投资收到的现金',
cash_received_from_investor='子公司吸收少数股东投资收到的现金',
cash_received_of_borrowing='取得借款收到的现金',
cash_received_from_bond_issue='发行债券收到的现金',
cash_received_of_othr_fa='收到其他与筹资活动有关的现金',
sub_total_of_ci_from_fa='筹资活动现金流入小计',
cash_pay_for_debt='偿还债务支付的现金',
cash_paid_of_distribution='分配股利利润利息支付的现金',
branch_paid_to_minority_holder='子公司支付给少数股东的股利',
othrcash_paid_relating_to_fa='支付其他与筹资活动有关的现金',
sub_total_of_cos_from_fa='筹资活动现金流出小计',
ncf_from_fa='筹资活动产生的现金流量净额',
effect_of_exchange_chg_on_cce='汇率变动对现金的影响',
net_increase_in_cce='现金及现金等价物净增加额',
initial_balance_of_cce='期初现金余额',
final_balance_of_cce='期末现金余额')
indicator_lang_us = dict(
sd='开始日期',
ed='结束日期',
report_date='报告日期',
report_name='报告名称',
report_annual='报告年度',
report_type_code='报告代码',
#每股指标
basic_eps='基本每股收益',
eps_dlt='稀释每股收益',
nav_ps='每股净资产',
ncf_from_oa_ps='每股现金流',
capital_reserve='每股公积金',
oips='每股总营收',
revenue_ps='每股营收',
#营运能力
operating_cycle='营业周期',
inventory_turnover_days='存货周转天数',
inventory_turnover='存货周转率',
receivable_turnover_days='应收账款周转天数',
account_receivable_turnover='应收账款周转率',
current_asset_turnover='流动资产周转率',
fixed_asset_turnover_ratio='固定资产周转率',
total_capital_turnover='总资产周转率',
#盈利能力
roe_avg='净资产收益率',
net_interest_of_ta='总资产收益率',
net_sales_rate='销售净利率',
gross_selling_rate='销售毛利率',
sales_cost_rate='销售成本率',
np_to_revenue='净利润/总营收',
income_tax_to_total_profit='所得税/利润总额',
ncf_from_oa_to_revenue='经营活动现金流量净额/营业收入',
#成长能力(相对年初)
nag_respond_boy='每股净资产%',
assets_relative_boy_growth='总资产%',
equity_atsopc_growth_boy='归母股东权益%',
#财务风险
asset_liab_ratio='资产负债率',
current_ratio='流动比率',
quick_ratio='速动比率',
equity_ratio='产权比率',
equity_multiplier='权益乘数',
flow_assets_to_total_assets='流动资产/总资产',
noncurrent_assets_to_ta='非流动资产/总资产',
flow_debt_to_total_debt='流动负债/总负债',
noncurrent_liab_to_total_liab='非流动负债/总负债',
equity_atsopc_to_total_liab='归母股东权益/总负债')
indicator_lang_cn = dict(
report_date='报告日期',
report_name='报告名称',
#关键指标
total_revenue='营业收入',
operating_income_yoy='营收同比增长',
net_profit_atsopc='净利润',
net_profit_atsopc_yoy='净利润同比增长',
net_profit_after_nrgal_atsolc='扣非净利润',
np_atsopc_nrgal_yoy='扣非净利润同比增长',
#每股指标
basic_eps='每股收益',
np_per_share='每股净资产',
capital_reserve='每股资本公积',
undistri_profit_ps='每股未分配利润',
operate_cash_flow_ps='每股经营现金流',
#盈利能力
avg_roe='净资产收益率',
ore_dlt='净资产收益率摊薄',
net_interest_of_total_assets='总资产报酬率',
rop='人力投入回报率',
gross_selling_rate='销售毛利率',
net_selling_rate='销售净利率',
#财务风险
asset_liab_ratio='资产负债率',
current_ratio='流动比率',
quick_ratio='速动比率',
equity_multiplier='权益乘数',
equity_ratio='产权比率',
holder_equity='股东权益比率',
ncf_from_oa_to_total_liab='现金流量比率',
#运营能力
inventory_turnover_days='存货周转天数',
receivable_turnover_days='应收账款周转天数',
accounts_payable_turnover_days='应付账款周转天数',
cash_cycle='现金循环周期',
operating_cycle='营业周期',
total_capital_turnover='总资产周转率',
inventory_turnover='存货周转率',
account_receivable_turnover='应收账款周转率',
accounts_payable_turnover='应付账款周转率',
current_asset_turnover_rate='流动资产周转率',
fixed_asset_turnover_ratio='固定资产周转率')
f10_indicator_bs = dict(
currency='currency',
report_date='report_date',
pb='pb',
operating_income_yoy='revenue_yoy',
net_profit_atsopc_yoy='net_profit_yoy',
total_shares='total_shares',
market_capital='market_capital')
f10_indicator_us = dict(
pe_ttm='pe_ttm',
eps_dlt='eps',
nav_ps='navps',
total_revenue='total_revenue',
net_profit_atsopc='net_profit',
gross_selling_rate='gross_selling_rate',
net_sales_rate='net_sales_rate',
roe_avg='roe_avg',
asset_liab_ratio='asset_liab_ratio')
f10_indicator_hk = dict(
refccomty='',
pe_lyr='pe_lyr',
beps_anrpt='eps',
navps='navps',
tto='total_revenue',
plocyr='net_profit',
float_shares='float_shares',
float_market_capital='float_market_capital')
f10_indicator_cn = dict(
pe_ttm='pe_ttm',
basic_eps='eps',
np_per_share='navps',
total_revenue='total_revenue',
net_profit_atsopc='net_profit',
gross_selling_rate='gross_selling_rate',
net_selling_rate='net_selling_rate',
avg_roe='roe_avg',
asset_liab_ratio='asset_liab_ratio',
dividend='dividend',
dividend_yield='dividend_yield',
float_shares='float_shares',
float_market_capital='float_market_capital')
margin = dict(
tdate='日期',
close='收盘-沪深300',
zdf='涨跌幅',
zdf3='涨跌幅3日',
zdf5='涨跌幅5日',
zdf10='涨跌幅10日',
rzye='融资余额',
rzyezb='融资余额占比',
rzmre='融资买入额',
rzmre3='融资买入额3日',
rzmre5='融资买入额5日',
rzmre10='融资买入额10日',
rzche='融资偿还额',
rzche3='融资偿还额3日',
rzche5='融资偿还额5日',
rzche10='融资偿还额10日',
rzjmre='融资净买入额',
rzjmre3='融资净买入额3日',
rzjmre5='融资净买入额5日',
rzjmre10='融资净买入额10日',
rqye='融券余额',
rqyl='融券余量',
rqmcl='融券卖出量',
rqmcl3='融券卖出量3日',
rqmcl5='融券卖出量5日',
rqmcl10='融券卖出量10日',
rqchl='融券偿还量',
rqchl3='融券偿还量3日',
rqchl5='融券偿还量5日',
rqchl10='融券偿还量10日',
rqjmcl='融券净卖出量',
rqjmcl3='融券净卖出量3日',
rqjmcl5='融券净卖出量5日',
rqjmcl10='融券净卖出量10日',
rzrqye='融资融券余额',
rzrqyecz='融资融券余额差值')
hsgt = dict(
DetailDate='日期',
DRZJLR='当日资金流入',
DRYE='当日余额',
LSZJLR='历史资金累计流入',
DRCJJME='当日成交净买额',
MRCJE='买入成交额',
MCCJE='卖出成交额',
LCG='领涨股',
LCGCode='代码',
LCGZDF='领涨股涨跌幅',
SSEChange='指数',
SSEChangePrecent='涨跌幅')
hsgt_hold = dict(
HDDATE='日期',
SCODE='代码',
SNAME='名称',
CLOSEPRICE='收盘价',
ZDF='涨跌幅',
SHAREHOLDSUM='持股数量',
ShareHoldSumChg='持股数量变化',
SHAREHOLDPRICE='持股市值',
SHARESRATE='持股占A股比例',
SHAREHOLDPRICEONE='持股市值变化1日',
SHAREHOLDPRICEFIVE='持股市值变化5日',
SHAREHOLDPRICETEN='持股市值变化10日')
income_lang = dict(
cn=income_lang_cn,
hk=income_lang_hk,
us=income_lang_us)
balance_lang = dict(
cn=balance_lang_cn,
hk=balance_lang_hk,
us=balance_lang_us)
cash_flow_lang = dict(
cn=cash_flow_lang_cn,
hk=cash_flow_lang_hk,
us=cash_flow_lang_us)
indicator_lang = dict(
cn=indicator_lang_cn,
hk=None,
us=indicator_lang_us)
f10_indicator_ks = dict(
base=f10_indicator_bs,
cn=f10_indicator_cn,
hk=f10_indicator_hk,
us=f10_indicator_us)
spindices = dict(
#规模
cssp50='5457821', #标普中国A50指数RMB
csp100='5457815', #标普中国A100指数RMB
csp200='5457817', #标普中国A200指数RMB
csp300='5457819', #标普中国A300指数RMB
csp500eur='92030324', #标普中国500指数EUR
csp500usd='92030325', #标普中国500指数USD
spncschp='92366266', #标普新中国行业指数(A股上限15%)HKD
spncscup='92366265', #标普新中国行业指数(A股上限15%)USD
spcsxahp='92352459', #标普新中国行业指数(除A股)HKD
spcsxaup='92351698', #标普新中国行业指数(除A股)USD
#跨境
spcqxtd='5475402', #标普大中华bmi指数(除台湾)usd
spcqhktd='2322750', #标普全大中华国内bmi指数usd*
spcqhkd='5475375', #标普中国和香港bmi指数usd
sptcqhkd='2311038', #标普全中国+香港bmi指数usd*
djchos50='1798569', #道琼斯中国离岸50指数
sphcmshp='92030268', #标普香港中国中小盘指数
spcqlhd='5475384', #标普香港上市中概股bmi指数usd
spacncp='92026363', #标普沪港通北向指数*
spcrd30='5458359', #标普中国港股30指数rmb
splc50up='92029778', #标普美国中概股50指数usd
splc50cp='92029775', #标普美国中概股50指数rmb
#沪深港通
spac50cp='92029744', #标普直通中国a50指数
spahkup='92319823', #标普直通香港指数usd*
spahkhp='92319825', #标普直通香港指数hkd*
spahkcp='92319821', #标普直通香港指数rmb*
spacaup='92319465', #标普直通中国a股指数usd*
spacahp='92319479', #标普直通中国a股指数hkd*
spacacp='92319464', #标普直通中国a股指数rmb*
#大盘股
scrtcn='5520649', #标普中国bmi指数usd
spcqxsd='5475393', #标普中国(除ab股)bmi指数usd
spcqbmi='2286509', #标普中国a股bmi指数rmb*
spcqabbmi='2300040', #标普中国a股+b股bmi指数*
sptcqbmi='2288328', #标普全中国国内bmi指数usd*
spccashr='785146', #标普中国a股综合指数
spcaxfup='92353802', #标普中国a股大中盘指数usd
spncshp='92315890', #标普新中国行业指数hkd
spncsup='92279260', #标普新中国行业指数
djchn88='100587981', #道琼斯中国88指数
hksplc25='5475858', #s&p/hkex大盘股指数hkd
#小盘股
hkspgem='5475857', #s&p/hkex创业板指数hkd
spcveup='92332999', #标普中国a股创业指数
cspsc='5458461', #标普中国a股小盘指数
#风格
csp100v='5474417', #标普中国a100价值
csp100g='5474411', #标普中国a100成长
csp100pv='5474415', #标普中国a100纯价值
csp100pg='5474413', #标普中国a100纯成长
csp200v='5474425', #标普中国a200价值
csp200g='5474419', #标普中国a200成长
csp200pv='5474423', #标普中国a200纯价值
csp200pg='5474421', #标普中国a200纯成长
csp300v='5474433', #标普中国a300价值
csp300g='5474427', #标普中国a300成长
csp300pv='5474431', #标普中国a300纯价值
csp300pg='5474429', #标普中国a300纯成长
cspscv='5474441', #标普中国a股小盘价值
cspscg='5474435', #标普中国a股小盘成长
cspscpv='5474439', #标普中国a股小盘纯价值
cspscpg='5474437', #标普中国a股小盘纯成长
#策略
sp5mv='1405830', #标普500最小波动
sp5ceup='92364883', #标普500碳效率
spgrcuu='5475709', #标普500碳效率精选
sp5lvi='5475134', #标普500低波 sp5lveup低波增强
sp5lvhd='1159383', #标普500低波红利
spxcldep='92279581', #标普500低碳低波红利
spxltbup='92033433', #标普500低波目标贝塔Low Vol Target Beta
sp5hbi='5475130', #标普500高贝
spxhbdup='92320842', #标普500高贝高股息
sp500mup='92024474', #标普500动量
spxvmoup='92330652', #标普500动量价值
spxhmvsr='92364852', #标普500动量价值行业轮动
spxevup='92028698', #标普500增强价值
spxqup='91920515', #标普500质量
spxqhdup='92351466', #标普500质量高股息
spxqvmup='92321176', #标普500质量价值动量多因子
spxhdup='92031893', #标普500红利
spdaudp='5458465', #标普500红利贵族
sp4lvi='1159360', #标普400低波
sp4lvhd='92346564', #标普400低波红利 SPMLHUP
sp4hbi='92321164', #标普400高贝
spmmup='92346586', #标普400动量
spmvmoup='92367079', #标普400动量价值
sp4evup='92346588', #标普400增强价值 SPMEVUP
spmqup='92346566', #标普400质量
spdamcup='92025036', #标普400红利贵族
sp6lvi='1159363', #标普600低波
sp6lvhd='92317231', #标普600低波红利 SPC6LHUP
sp6hbi='92321165', #标普600高贝
sp6mup='92347267', #标普600动量
spsvmoup='92367088', #标普600动量价值
sp6evup='92347280', #标普600增强价值
sp6qup='92330656', #标普600质量
sphyda='2325', #标普高收益红利贵族
spgdaup='1692178', #S&P Global Dividend Aristocrats
spgtgdo='5475766', #S&P Global Dividend Opportunities
djhksd='3231529', #道琼斯香港精选红利30
spahlvcp='92321367', #标普港股通低波红利
cspsadrp='5625108', #标普中国a股红利
spcadmcp='92349977', #标普中国a股红利动量*
spcalvcp='92353636', #标普中国a股低波
spcalhcp='92353014', #标普中国a股低波红利
spcaqcp='92353632', #标普中国a股质量
spcqvcp='92340542', #标普中国a股质量价值
spcaevcp='92353637', #标普中国a股增强价值
spacevup='92331490', #标普沪港深中国增强价值usd
spacevhp='92331485', #标普沪港深中国增强价值hkd
spacevcp='92331483', #标普沪港深中国增强价值rmb
#美国
djiew='92321353', #道琼斯工业平均等权重指数
djiyw='92033439', #道琼斯工业平均股息加权指数
djia='1720081', #道琼斯工业平均指数*
djta='100003017', #道琼斯运输平均指数*
djua='100003020', #道琼斯公用事业平均指数*
djca='100003008', #道琼斯综合指数*
sp50='92033453', #标普50*
sp100='2431', #标普100*
sp100ew='2429', #标普100等权重*
spx='340', #标普500
spxhkd='92025788', #标普500hkd
spxcnyp='92346676', #标普500rmb
spxew='370', #标普500等权重*
spxrevw='37003713', #标普500收入加权
sp400='410', #标普中盘400*
sp400ew='388', #标普中盘400等权重
sp400revw='37001111',#标普中盘400收入加权
sp600='2239', #标普小盘600*
sp600ew='2037', #标普小盘600等权重
sp600revw='37002215',#标普小盘600收入加权
sp900='857', #标普900
sp1000='1608', #标普1000*
sp1500='1636', #标普1500*
spcmi='2750', #标普全指除sp500
sptmi='2762', #标普综指=spcmi+sp500
#美国风格
sp500g='2029', #标普500成长
sp500v='2034', #标普500价值
sp500pg='2030', #标普500纯成长
sp500pv='2032', #标普500纯价值
sp400g='1973', #标普400成长
sp400v='1979', #标普400价值
sp400pg='1975', #标普400纯成长
sp400pv='1977', #标普400纯价值
sp600g='2240', #标普600成长
sp600v='2246', #标普600价值
sp600pg='2242', #标普600纯成长
sp600pv='2244', #标普600纯价值
sp900g='1833', #标普900成长
sp900v='1839', #标普900价值
sp900pg='1835', #标普900纯成长
sp900pv='1837', #标普900纯价值
sp1000g='1609', #标普1000成长
sp1000v='1615', #标普1000价值
sp1000pg='1611', #标普1000纯成长
sp1000pv='1613', #标普1000纯价值
sp1500g='1637', #标普1500成长
sp1500v='1643', #标普1500价值
sp1500pg='1639', #标普1500纯成长
sp1500pv='1641', #标普1500纯价值
#美国行业
spxxttsp='92030239', #标普500除通信服务和信息技术
spxxcmup='92354017', #标普500除通讯服务
spxxtsup='92354012', #标普500除信息技术
spxxhcp='92030260', #标普500除医疗保健
spxxcsp='92030248', #标普500除主要消费
spxxcdp='92030257', #标普500除可选消费
spxxtp='92035472', #标普500除烟草Tobacco
spxxfu='92035480', #标普500除金融(new)
spxxfisp='92319291', #标普500除金融
spxxfinp='92030250', #标普500除金融房地产
spxxretp='92319292', #标普500除房地产
spxxindp='92030244', #标普500除工业
spxxmp='92030242', #标普500除材料
spxxegp='92030243', #标普500除能源
spxxutip='92030247', #标普500除公用事业
spx450up='92354118', #标普500通信服务和信息技术
spxtels='339', #标普500通信服务
spxinft='307', #标普500信息技术*
spxhlth='253', #标普500医疗保健
spxcons='213', #标普500主要消费
spxcond='139', #标普500可选消费
spxre='2436', #标普500房地产
spxf='279', #标普500金融
spxindu='81', #标普500工业
spxmatr='41', #标普500材料
spxe='25', #标普500能源
spxutil='356', #标普500公用事业
spsvcp='92351609', #标普500通信服务35/20
spsvitp='92289034', #标普500信息技术35/20
spsvhcp='92289037', #标普500医疗保健35/20
spsvcsp='92289046', #标普500主要消费35/20
spsvcdp='92289028', #标普500可选消费35/20
spsvrep='92289031', #标普500房地产35/20
spsvfp='92289026', #标普500金融35/20
spsvip='92289040', #标普500工业35/20
spsvmp='92289021', #标普500材料35/20
spsvep='92289029', #标普500能源35/20
spsvulp='92320816', #标普500公用事业35/20
spsvutp='92289023', #标普500公用事业&电信35/20
spxtelsew='379', #标普500等权通信服务
spxinftew='378', #标普500等权信息技术
spxhlthew='376', #标普500等权医疗保健*
spxconsew='375', #标普500等权主要消费
spxcondew='374', #标普500等权可选消费
spxreew='92029326', #标普500等权房地产
spxfew='377', #标普500等权金融
spxinduew='373', #标普500等权工业
spxmatrew='372', #标普500等权材料
spxeew='371', #标普500等权能源
spxutilewp='381', #标普500等权公用事业plus
spxutilew='380', #标普500等权公用事业
spsdcsup='92353697', #标普通讯服务25/20
spsdtup='92354040', #标普信息技术25/20
spsdvup='92354034', #标普医疗保健25/20
spsdrup='92354025', #标普主要消费25/20
spsdyup='92354031', #标普可选消费25/20
spsdreup='92354028', #标普房地产25/20
spsdmup='92354043', #标普金融25/20
spsdiup='92354046', #标普工业25/20
spsdbup='92354049', #标普材料25/20
spsdeup='92354052', #标普能源25/20
spsduup='92354037', #标普公用事业25/20
spsucsup='92352997', #标普通讯服务上限20
spsutp='16362', #标普信息技术上限20
spsuhcp='16353', #标普医疗保健上限20
spsucsp='16344', #标普主要消费上限20
spsucdp='16341', #标普可选消费上限20
spsurp='92033394', #标普房地产上限20
spsufp='16350', #标普金融上限20
spsuip='16356', #标普工业上限20
spsump='16359', #标普材料上限20
spsuep='16347', #标普能源上限20
spsuup='16365', #标普公用事业上限20
ixcpr='92351596', #标普通讯服务
ixt='57246', #标普信息技术
ixv='57250', #标普医疗保健
ixr='57244', #标普主要消费
ixy='57252', #标普可选消费
ixre='92030233', #标普房地产
ixm='57242', #标普金融
ixi='57240', #标普工业
ixb='57236', #标普材料
ixe='57238', #标普能源
ixu='57248', #标普公用事业
spsite='2361', #标普电信
spsiad='2327', #标普航空航天国防
djintcup='92361671', #道琼斯国际互联网
spsiin='92247637', #标普互联网
spsiss='57168', #标普软件与服务
spsisc='2359', #标普半导体
spsich='2333', #标普计算机硬件
spsibi='2331', #标普生物科技
spsiph='2355', #标普制药
spsihe='2339', #标普医疗保健设备
spsihp='2343', #标普医疗保健服务
spsifbup='2337', #标普食品饮料
spsire='2357', #标普零售业
spsiins='57092', #标普保险
spsibk='57088', #标普银行
spsirbk='57096', #标普区域银行
spsicm='57090', #标普资本市场
spsiho='2341', #标普建筑商
spsimm='2347', #标普金属采矿
spsitn='2363', #标普交通运输
spsiop='2351', #标普石油和天然气勘探和生产
spsios='2353', #标普石油和天然气设备和服务
)
|
#! /usr/bin/python3
import numpy
import scipy
import matplotlib.pyplot as plt
import random
import datetime
import os
import sys
import time
import math
import argparse
import matplotlib.pyplot as plt
import FastPID
import ArduinoPID
import AutoPID
import refpid
import process
def randomtest(seed, steps, turns, pid, name, bits, sign) :
random.seed(a=seed)
results = numpy.array([])
results.resize((turns,))
outdir = 'randomtest-seed-{}'.format(seed)
for test_num in range (turns) :
kp = round(random.uniform(0, 255), 3)
ki = round(random.uniform(0, kp), 3)
kd = round(random.uniform(0, ki), 3)
pid.configure(kp, ki, kd, bits, sign)
reference = refpid.refpid(kp, ki, kd, bits, sign)
ref = process.Process(reference, steps, turns)
dut = process.Process(pid, steps, turns)
ref.run()
dut.run()
# Check for fit
errf = numpy.square(numpy.subtract(ref.output, dut.output))
err = numpy.cumsum(errf) / numpy.arange(1, ref.output.size+1, dtype=float)
chi2 = numpy.sum(errf) / ref.output.size
results[test_num,] = chi2
if chi2 > 1000 :
if not os.path.isdir(outdir) :
os.mkdir(outdir)
outfile = os.path.join(outdir, "{}-p{}-i{}-d{}.png".format(name, kp, ki, kd))
setline = plt.plot(ref.setpoint, '', label='Setpoint')
refline = plt.plot(ref.output, '', label='Reference')
outline = plt.plot(dut.output, '', label='Output/Feedback')
plt.legend(['Setpoint', 'Reference', 'Out/Feedback'])
plt.xlabel('Time (Seconds)')
plt.ylabel('Codes')
plt.title('{} vs. Reference (p={} i={} d={})'.format(name, kp, ki, kd))
plt.savefig(outfile)
plt.close()
best = numpy.amin(results)
worst = numpy.amax(results)
med = numpy.median(results)
print ("Best: {} Worst: {} Median: {}".format(best,worst,med))
plt.hist(results)
outfile = os.path.join(outdir, "{}-histogram.png".format(name))
plt.savefig(outfile)
plt.show()
def main() :
parser = argparse.ArgumentParser(description="Run PID tests")
parser.add_argument('test', help='The test to execute.', choices=['reference', 'random', 'load'])
parser.add_argument('-p', help='Kp', type=float, default=1)
parser.add_argument('-i', help='Ki', type=float, default=0)
parser.add_argument('-d', help='Kd', type=float, default=0)
parser.add_argument('-n', help='Number of steps to simulate.', type=int, default=100)
parser.add_argument('-t', help='Number of random turns to test.', type=int, default=100)
parser.add_argument('--obits', help='Number of output bits.', type=int, default=16)
parser.add_argument('--osign', help='Signedness of the output.', type=int, default=0)
parser.add_argument('--pid', help='PID implementation to use.', choices=['FastPID', 'ArduinoPID', 'AutoPID'], default='FastPID')
parser.add_argument('--seed', help='Random seed to use.', default=int(time.time()))
args = parser.parse_args()
if args.pid == 'FastPID' :
pid = FastPID
elif args.pid == 'ArduinoPID' :
pid = ArduinoPID
else:
pid = AutoPID
if not pid.configure(args.p, args.i, args.d, args.obits, args.osign) :
print ('Error configuring the PID.')
exit(-1)
if args.test == 'reference' :
# Test the PID against the reference implementation.
reference = refpid.refpid(args.p, args.i, args.d, args.obits, bool(args.osign))
ref = process.Process(reference, 100, args.n)
dut = process.Process(pid, 100, args.n)
ref.run()
dut.run()
setline = plt.plot(ref.setpoint, '', label='Setpoint')
refline = plt.plot(ref.output, '--', label='Reference')
outline = plt.plot(dut.output, '', label='Output/Feedback')
plt.legend(['Setpoint', 'Reference', 'Out/Feedback'])
plt.xlabel('Time (Seconds)')
plt.ylabel('Codes')
plt.title('{} vs. Reference (p={} i={} d={})'.format(args.pid, args.p, args.i, args.d))
plt.show()
if args.test == 'random' :
# Test random parameters vs. the reference implementation. Look for outliers.
randomtest(args.seed, args.n, args.t, pid, args.pid, args.obits, bool(args.osign))
if args.test == 'load' :
factory_f = process.DifferentialFactory(lambda x : math.log(x *.1) * 0.1 )
dut = process.Process(pid, 100, args.n)
x = numpy.arange(0, args.n)
dut.run()
fig, ax1 = plt.subplots()
ax1.set_xlabel('Step')
ax1.set_ylabel('Setpoint (green), Feedback (red)')
ax1.tick_params('y', color='r')
ax1.plot(x, dut.setpoint, 'g--', dut.feedback, 'r')
ax3 = ax1.twinx()
ax3.set_ylabel('Output (blue)')
ax3.plot(x, dut.output)
#fig.tight_layout()
plt.show()
pass
if __name__ == '__main__' :
main()
|
#!/usr/bin/python
import sys, re
basewords = [] #default list
answers = [] #list of possible codes
#print help for the lame guyz outside
def printHelp(case):
if case == 1:
print "Use from terminal/cmd with one parameter: list of the words separate by enter."
if case == 2:
print "Input format: \"yourword\" \"likeness\" (without qoutes)"
#narrow the answers list
def narrowList():
global answers
global basewords
print "You can choose from these words: " + str(basewords) # all remained words
print ""
print "The answer is in these words: " + str(answers) # possible asnwers
print ""
result = raw_input("Result(word likeness): ") #prompt to input the last tries word and its likeness
# check the input
if not inputCheck(result):
printHelp(2)
return False
# set the word and the likeness value from the user input
if " " in result:
temp_list = result.split(" ")
word = temp_list[0]
likeness = int(temp_list[1])
else:
word = result
likeness = 0
# delete all appereance of the input word from the answers list
answers = filter(lambda a: a != word, answers)
basewords = filter(lambda a: a != word, basewords)
# if the likeness is 0, just deleted the word from the list, nothing else to do
if likeness == 0:
return
# if likeness not 0 lets continue
for answer in answers:
like = 0 # if a letter equals, increase this by one
for i in range(0,len(answer)):
if(answer[i] == word[i]):
like += 1
# after get all of the equals return and the like is the same or bigger then the likeness let it in the list.
# else delete it
if like < likeness:
answers = filter(lambda a: a != answer, answers)
# check is the user input correct..never trust in ppl.except urself. except if ur name anuka. meh never trust in anuka
def inputCheck(inp):
global basewords
if " " in inp:
l = inp.split(" ")
if l[0] not in basewords:
return False
r = re.match("[a-zA-Z]* [0-9]*", inp)
if not r:
return False
r = re.match("[a-zA-Z]*", inp)
if not r:
return False
return True
def main(file):
global basewords
global answers
# open the word file for read
try:
f = open(file, "r")
except:
print "Cannot open %s." % file
sys.exit()
# fill up basewords list without the new line character
for line in f:
basewords.append(line.replace("\n", ""))
answers = basewords # atm the possible answer is the default list
printHelp(2)
while True:
narrowList() # lets start narrowing the our answer list
# if only 1 answer left no more trying
if len(answers) == 1:
print "Congratulation! The solution is: %s" % (answers[0])
break
print "The possible answers are: " + str(answers)
sys.exit()
if __name__ == "__main__":
if len(sys.argv) != 2:
printHelp(1)
sys.exit()
main(sys.argv[1])
|
import heapq
import collections
class Node(object):
def __init__(self, int, str):
self.freq = int
self.word = str
# 自定义less than comparator
# freq不同的话正常比较
# freq相同时,看word, 词典序小的排在前面
def __lt__(self, other):
if self.freq != other.freq:
return self.freq < other.freq
return self.word > other.word
class test(object):
def __init__(self, int, str):
self.freq = int
self.word = str
def __lt__(self, other):
if self.freq != other.freq:
return self.freq < other.freq
return self.word > other.word
class Solution:
"""
@param words: an array of string
@param k: An integer
@return: an array of string
"""
def topKFrequentWords(self, words, k):
# write your code here
if not words or k <= 0:
return []
map = collections.defaultdict(int)# https://www.jianshu.com/p/bbd258f99fd3
ans = []
for word in words:
map[word] += 1
heap = []
for word, freq in map.items():
heapq.heappush(heap, Node(freq, word))
if len(heap) > k:
heapq.heappop(heap)
while heap:
ans.append(heapq.heappop(heap).word)
ans.reverse()
return ans
map = {}
map['1'] = 2
map['1'] += 1
print(map.get("1"))
print(map)
#
# map = {4: 2, 2: 3}
#
# map = sorted(map.items(), key=lambda kv: (kv[0], kv[1]))
# print()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# to use for example with ipython -pylab
# run /home/pierre/Python/Production/Energy_budget/all_terms_loop_days_review.py
# run /scratch/augier/Python/Production/Energy_budget/all_terms_loop_days_review.py
# compute some terms of the spectral energy budget.
# The memory is used just as needed.
# import basic modules...
import os, sys, resource
import numpy as np
# import homemade module to load data and perform spharm transform
import treat_simul_shtns22 as treat_simul
# import function time in module time for timming functions
from time import time
# import module for saving...
####import pickle
import cPickle as pickle
import matplotlib.pyplot as plt
def cumsum_inv(a):
return a[::-1].cumsum()[::-1]
SAVE = 1
season = 'summer'
#season = 'winter'
name_simul = 'AFES_T639'
#name_simul = 'AFES_T1279'
name_simul = 'ECMWF_T1279'
#name_simul = 'ECMWF_T159'
Osim = treat_simul.treat_simul(name_simul=name_simul)
####print resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
plevs = Osim.plevs
nz = len(plevs)
p00 = 1000 # hPa
A = (p00/plevs)**Osim.khi
cosLATS = Osim.cosLATS
sinLATS = np.sin(Osim.LATS*np.pi/180)
fCor0 = 2*Osim.Omega
f_LATS = fCor0*sinLATS
beta3D_small, nb_lev_beta = Osim.load_beta_ps_mean()
beta3D = np.ones([nz, Osim.nlat, Osim.nlon], dtype=np.float32)
beta3D[0:nb_lev_beta] = beta3D_small
Osim.load_meanT_coefAPE(season=season)
Coef_APE_Theta = Osim.Coef_APE_Theta
zeros_lm = Osim.init_array_SH(0.)
# For these files, we use the notations of the resubmitted version of the paper
# A new formulation of the spectral energy budget of the atmosphere...
# the tendencies
P_TKhOp_l = np.zeros([nz,Osim.lmax+1])
P_TKvOp_l = np.zeros([nz,Osim.lmax+1])
P_TKrot_l = np.zeros([nz,Osim.lmax+1])
P_TAhOp_l = np.zeros([nz,Osim.lmax+1])
P_TAvOp_l = np.zeros([nz,Osim.lmax+1])
P_Lcori_l = np.zeros([nz,Osim.lmax+1])
P_Lcalt_l = np.zeros([nz,Osim.lmax+1])
P_Conv__l = np.zeros([nz,Osim.lmax+1])
P_Conv2_l = np.zeros([nz,Osim.lmax+1])
P_DKh___l = np.zeros([nz,Osim.lmax+1])
# the vertical fluxes
F_Kpres_l = np.zeros([nz,Osim.lmax+1])
F_Kturb_l = np.zeros([nz,Osim.lmax+1])
F_Aturb_l = np.zeros([nz,Osim.lmax+1])
list_ens_tendencies = []
for ip in range(nz):
ens_tendencies = treat_simul.ensemble_variables('ensemble tendency terms')
ens_tendencies.ddata['P_TKhOp_l'] = np.zeros([Osim.lmax+1])
ens_tendencies.ddata['P_TKvOp_l'] = np.zeros([Osim.lmax+1])
ens_tendencies.ddata['P_TKrot_l'] = np.zeros([Osim.lmax+1])
ens_tendencies.ddata['P_TAhOp_l'] = np.zeros([Osim.lmax+1])
ens_tendencies.ddata['P_TAvOp_l'] = np.zeros([Osim.lmax+1])
ens_tendencies.ddata['P_Lcori_l'] = np.zeros([Osim.lmax+1])
ens_tendencies.ddata['P_Lcalt_l'] = np.zeros([Osim.lmax+1])
ens_tendencies.ddata['P_Conv__l'] = np.zeros([Osim.lmax+1])
ens_tendencies.ddata['P_Conv2_l'] = np.zeros([Osim.lmax+1])
ens_tendencies.ddata['P_DKh___l'] = np.zeros([Osim.lmax+1])
ens_tendencies.ddata['P_Conv_'] = 0.
ens_tendencies.ddata['P_DKh__'] = 0.
list_ens_tendencies.append(ens_tendencies)
list_ens_vert_fluxes = []
for ip in range(nz):
ens_vert_fluxes = treat_simul.ensemble_variables('ensemble vertical fluxes terms')
ens_vert_fluxes.ddata['F_Kpres_l'] = np.zeros([Osim.lmax+1])
ens_vert_fluxes.ddata['F_Kturb_l'] = np.zeros([Osim.lmax+1])
ens_vert_fluxes.ddata['F_Aturb_l'] = np.zeros([Osim.lmax+1])
list_ens_vert_fluxes.append(ens_vert_fluxes)
def hour_max(day):
if day==11:
h = 8
else:
h = 24
return h
delta_hour = 2
if name_simul[:4]=='AFES':
if Osim.computer=='pierre-KTH':
days = [5,11,15]
elif Osim.computer=='KTH':
days = [4,5,8,11,12,13,14,15]
elif name_simul=='ECMWF_T1279':
days = [1, 5, 10, 15, 20, 25]
elif name_simul=='ECMWF_T159':
days = [5]
#delta_hour = 24
#days = [4,5,8,11]
#days = [4]
def hours_day_namesimul(day, name_simul):
if name_simul[:4]=='AFES':
hours = np.arange(1, hour_max(day)+1, delta_hour)
elif name_simul[:5]=='ECMWF':
hours = [12]
return hours
nbtot = 0
for day in days:
hours = hours_day_namesimul(day, name_simul)
nbtot += len(hours)
nb_to_do = nbtot+0 # "deepcopy"...
nb_instants_computed = 0
for day in days:
for ip in range(nz):
list_ens_tendencies[ip].reset_to_zeros()
list_ens_vert_fluxes[ip].reset_to_zeros()
hours = hours_day_namesimul(day, name_simul)
nb_instants_computed_day = 0
for hour in hours:
t1_1time = time()
# For each time, we compute (in this order) the values of:
# F_Kpres_l[ip, il], P_Conv__l[ip, il]
# F_Aturb_l[ip, il], P_TAvOp_l[ip, il], P_Conv2_l[ip, il]
# P_TAhOp_l[ip, il]
# P_TKhOp_l[ip, il], P_TKrot_l[ip, il], P_DKh___l[ip, il]
# P_TKvOp_l[ip, il], F_Kturb_l[ip, il]
# P_Lcori_l[ip, il]
# We first list the things that we have to do:
# load omega3D, Phi3D, TT3D
# compute omegab3D_lm, Phib3D_lm, TTb3D_lm
# del(Phib3D, oob3D)
# compute F_Kpres_l[ip, il],
# P_Conv__l[ip, il]
# del(Phib3D_lm), del(Tb3D_lm)
# compute Thetabp3D from T3D, then Thetabp3D_lm
# del(T3D)
# compute dp_Thetabp3D
# compute F_Aturb_l[ip, il],
# P_TAvOp_l[ip, il]
# del(dp_Thetabp3D)
# load uu3D, vv3D
# compute d3D
# compute P_TAhOp_l[ip, il]
# del(Thetabp3D, Thetabp3D_lm, d3D)
# compute uub3D, vvb3D
# del(uu3D, vv3D)
# compute divhuub3D_lm, rothuub3D_lm
# compute P_TKhOp_l[ip, il],
# P_TKrot_l[ip, il],
# P_DKh___l[ip, il]
# compute dp_uub3D
# P_TKvOp_l[ip, il], F_Kturb_l[ip, il]
# del(omega3D, dp_uub3D)
# compute P_Lcori_l[ip, il]
# P_Lcalt_l[ip, il]
# del(uub3D, vvb3D, divhuub3D_lm, rothuub3D_lm)
# Then, the computations:
# load oo3D, Phi3D, T3D
print 'load oo3D, Phi3D, T3D'
t1 = time()
oo3D = Osim.load_var3D(name_var='o', day=day, hour=hour, season=season) # en Pa/s
Phi3D = Osim.load_var3D(name_var='Phi', day=day, hour=hour, season=season)
TT3D = Osim.load_var3D(name_var='T', day=day, hour=hour, season=season)
t2 = time()
print '(loaded in {0:3.2f} s)'.format(t2-t1)
Phib3D = Phi3D
Phib3D[0:nb_lev_beta] = Phi3D[0:nb_lev_beta] * beta3D_small
del(Phi3D)
TTb3D = np.empty(TT3D.shape)
TTb3D[nb_lev_beta:nz] = TT3D[nb_lev_beta:nz]
TTb3D[0:nb_lev_beta] = TT3D[0:nb_lev_beta] * beta3D_small
print '1 SH3D transform, compute oob3D_lm',
sys.stdout.flush()
t1 = time()
oob3D_lm = np.empty([nz, Osim.nlm], dtype=complex)
for ip in range(nz):
oob3D_lm[ip] = Osim.SH_from_spat(oo3D[ip]*beta3D[ip])
# we use an analytical result for levels above the surface...
if ip>=nb_lev_beta:
oob3D_lm[ip, 0] = 0.
oo3D[ip] = Osim.spat_from_SH(oob3D_lm[ip])
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
# compute Phib3D_lm, TTb3D_lm
print'2 SH3D transforms, compute Phib3D_lm, TTb3D_lm',
sys.stdout.flush()
t1 = time()
Phib3D_lm = np.zeros([nz, Osim.nlm], dtype=complex)
TTb3D_lm = np.zeros([nz, Osim.nlm], dtype=complex)
for ip in range(nz):
Phib3D_lm[ip] = Osim.SH_from_spat(Phib3D[ip])
TTb3D_lm[ip] = Osim.SH_from_spat(TTb3D[ip])
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
# del(Phib3D)
del(Phib3D)
# compute F_Kpres_l[ip, il],
# P_Conv__l[ip, il]
print 'compute F_Kpres_l, P_Conv__l',
sys.stdout.flush()
t1 = time()
for ip in range(nz):
# spectrum of vertical pressure flux
F_Kpres_l[ip] = -Osim.cospectrum_from_2fieldsSH(oob3D_lm[ip],
Phib3D_lm[ip])
# spectrum of conversion
P_Conv__l[ip] = -Osim.cospectrum_from_2fieldsSH(oob3D_lm[ip],
TTb3D_lm[ip])*Osim.R/(plevs[ip]*100)
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
# del(Phib3D_lm), del(TTb3D_lm)
del(Phib3D_lm)
del(TTb3D_lm)
# compute Thetabp3D from T3D, then Thetabp3D_lm from TTb3D
print 'compute Thetabp3D and dp_Thetabp3D',
sys.stdout.flush()
t1 = time()
TTbp3D = np.empty([nz, Osim.nlat, Osim.nlon])
for ip in range(nz):
TTbp3D[ip] = (TT3D[ip]-Osim.mean_field_representative(TT3D[ip], ip))*beta3D[ip]
del(TT3D)
Thetabp3D = np.empty([nz, Osim.nlat, Osim.nlon])
for ip in range(nz):
Thetabp3D[ip] = A[ip]*TTbp3D[ip]
# compute dp_Thetabp3D
## compute vertical derivative of Theta
## one part is done analytically
dp_TTbp3D = Osim.vertical_derivative_f(TTbp3D)
del(TTbp3D)
dp_Thetabp3D = np.empty([nz, Osim.nlat, Osim.nlon])
for ip in range(nz):
dp_Thetabp3D[ip] = -Osim.khi*Thetabp3D[ip]/plevs[ip] + A[ip]*dp_TTbp3D[ip]
del(dp_TTbp3D)
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
# compute Thetabp3D_lm
print'1 SH3D transforms, compute Thetabp3D_lm',
sys.stdout.flush()
t1 = time()
Thetabp3D_lm = np.zeros([nz, Osim.nlm], dtype=complex)
for ip in range(nz):
Thetabp3D_lm[ip] = Osim.SH_from_spat(Thetabp3D[ip])
Thetabp3D_lm[ip,0] = 0.
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
# compute F_Aturb_l[ip, il],
# P_TAvOp_l[ip, il]
print 'compute F_Aturb_l, P_TAvOp_l, P_Conv2_l',
sys.stdout.flush()
t1 = time()
for ip in range(nz):
dp_Thetabp_lm = Osim.SH_from_spat(dp_Thetabp3D[ip])
ooThetabp_lm = Osim.SH_from_spat(oo3D[ip]*Thetabp3D[ip])
oodp_Thetabp_lm = Osim.SH_from_spat(oo3D[ip]*dp_Thetabp3D[ip])
F_Aturb_l[ip] = -Osim.cospectrum_from_2fieldsSH(
Thetabp3D_lm[ip],
ooThetabp_lm
)*Coef_APE_Theta[ip]/2
P_TAvOp_l[ip] = ( +Osim.cospectrum_from_2fieldsSH(
dp_Thetabp_lm, ooThetabp_lm)
-Osim.cospectrum_from_2fieldsSH(
Thetabp3D_lm[ip], oodp_Thetabp_lm)
)*Coef_APE_Theta[ip]/2/100
TTbp_lm = Thetabp3D_lm[ip]/A[ip]
P_Conv2_l[ip] = -Osim.cospectrum_from_2fieldsSH(oob3D_lm[ip],
TTbp_lm)*Osim.R/(plevs[ip]*100)
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
del(dp_Thetabp3D)
# load uu3D, vv3D
print 'load uu3D, vv3D'
t1 = time()
uu3D = Osim.load_var3D(name_var='u', day=day, hour=hour, season=season)
vv3D = Osim.load_var3D(name_var='v', day=day, hour=hour, season=season)
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
# compute P_TAhOp_l[ip, il]
print 'compute P_TAhOp_l',
sys.stdout.flush()
t1 = time()
for ip in range(nz):
hdiv_lm, hrot_lm = Osim.hdivrotSH_from_uuvv(uu3D[ip], vv3D[ip])
hdiv = Osim.spat_from_SH(hdiv_lm)
grad_Thetabp_lon, grad_Thetabp_lat = Osim.gradf_from_fSH(Thetabp3D_lm[ip])
temp_AhOp = ( -uu3D[ip]*grad_Thetabp_lon - vv3D[ip]*grad_Thetabp_lat
-hdiv*Thetabp3D[ip]/2 )
temp_AhOp_lm= Osim.SH_from_spat(temp_AhOp)
P_TAhOp_l[ip] = Osim.cospectrum_from_2fieldsSH( Thetabp3D_lm[ip],
temp_AhOp_lm
)*Osim.Coef_APE_Theta[ip]
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
# del(Thetabp3D, Thetabp3D_lm)
del(Thetabp3D)
del(Thetabp3D_lm)
# compute uub3D, vvb3D and del(uu3D, vv3D)
uub3D = uu3D
uub3D[0:nb_lev_beta] = uu3D[0:nb_lev_beta] * beta3D_small
del(uu3D)
vvb3D = vv3D
vvb3D[0:nb_lev_beta] = vv3D[0:nb_lev_beta] * beta3D_small
del(vv3D)
# compute divhuub3D_lm, rothuub3D_lm
print'1 vectorial SH3D transform, compute divhuub3D_lm, rothuub3D_lm',
sys.stdout.flush()
t1 = time()
divhuub3D_lm = np.zeros([nz, Osim.nlm], dtype=complex)
rothuub3D_lm = np.zeros([nz, Osim.nlm], dtype=complex)
for ip in range(nz):
divhuub3D_lm[ip], rothuub3D_lm[ip] = Osim.hdivrotSH_from_uuvv(
uub3D[ip], vvb3D[ip])
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
# compute dp_uub3D and dp_vvb3D
print 'compute dp_uub3D and dp_vvb3D',
sys.stdout.flush()
t1 = time()
dp_uub3D = Osim.vertical_derivative_f(uub3D)
dp_vvb3D = Osim.vertical_derivative_f(vvb3D)
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
# compute P_TKvOp_l, F_Kturb_l
print 'compute P_TKvOp_l, F_Kturb_l',
sys.stdout.flush()
t1 = time()
for ip in range(nz):
divhdp_uub_lm, rothdp_uub_lm = Osim.hdivrotSH_from_uuvv(
dp_uub3D[ip], dp_vvb3D[ip])
divhoouub_lm, rothoouub_lm = Osim.hdivrotSH_from_uuvv(
oo3D[ip]*uub3D[ip], oo3D[ip]*vvb3D[ip])
divhoodp_uub_lm, rothoodp_uub_lm = Osim.hdivrotSH_from_uuvv(
oo3D[ip]*dp_uub3D[ip], oo3D[ip]*dp_vvb3D[ip])
F_Kturb_l[ip] = -Osim.cospectrum_from_2divrotSH(
divhuub3D_lm[ip], rothuub3D_lm[ip],
divhoouub_lm, rothoouub_lm
)/2
P_TKvOp_l[ip] = ( +Osim.cospectrum_from_2divrotSH(
divhdp_uub_lm, rothdp_uub_lm,
divhoouub_lm, rothoouub_lm)
-Osim.cospectrum_from_2divrotSH(
divhuub3D_lm[ip], rothuub3D_lm[ip],
divhoodp_uub_lm, rothoodp_uub_lm)
)/2/100
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
del(dp_uub3D)
del(dp_vvb3D)
del(oo3D)
# compute P_TKhOp_l[ip, il],
# P_TKrot_l[ip, il],
# P_DKh___l[ip, il]
print 'compute P_TKhOp_l, P_TKrot_l, P_DKh___l',
sys.stdout.flush()
t1 = time()
for ip in range(nz):
rothuub = Osim.spat_from_SH(rothuub3D_lm[ip])
divhuub = Osim.spat_from_SH(divhuub3D_lm[ip])
temp_KhOp_lon = -rothuub*vvb3D[ip] + divhuub*uub3D[ip]/2
temp_KhOp_lat = +rothuub*uub3D[ip] + divhuub*vvb3D[ip]/2
divhtemp_KhOp_lm, rothtemp_KhOp_lm = Osim.hdivrotSH_from_uuvv(
temp_KhOp_lon, temp_KhOp_lat)
temp2_AhOp_lm = Osim.SH_from_spat( uub3D[ip]*uub3D[ip]
+vvb3D[ip]*vvb3D[ip])
P_TKhOp_l[ip] = (
-Osim.cospectrum_from_2divrotSH(
divhuub3D_lm[ip], rothuub3D_lm[ip],
divhtemp_KhOp_lm, rothtemp_KhOp_lm)
+Osim.cospectrum_from_2fieldsSH(
divhuub3D_lm[ip],
temp2_AhOp_lm
)/2
)
uub_rot, vvb_rot = Osim.uuvv_from_hdivrotSH(zeros_lm, rothuub3D_lm[ip])
temp_KhOp_lon = -rothuub*vvb_rot
temp_KhOp_lat = +rothuub*uub_rot
divhtemp_KhOp_lm, rothtemp_KhOp_lm = Osim.hdivrotSH_from_uuvv(
temp_KhOp_lon, temp_KhOp_lat)
P_TKrot_l[ip] = -Osim.cospectrum_from_2divrotSH(
zeros_lm, rothuub3D_lm[ip],
zeros_lm, rothtemp_KhOp_lm)
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
# compute P_Lcori_l[ip, il]
# P_Lcalt_l[ip, il]
print 'compute P_Lcori_l, P_Lcalt_l',
sys.stdout.flush()
t1 = time()
for ip in range(nz):
Fcor_lon = -f_LATS*vvb3D[ip]
Fcor_lat = +f_LATS*uub3D[ip]
divhFcor_lm, rothFcor_lm = Osim.hdivrotSH_from_uuvv(
Fcor_lon, Fcor_lat)
P_Lcori_l[ip] = -Osim.cospectrum_from_2divrotSH(
divhuub3D_lm[ip], rothuub3D_lm[ip],
divhFcor_lm, rothFcor_lm)
psi_lm = -Osim.r_a**2/Osim.l2_idx*rothuub3D_lm[ip]
chi_lm = -Osim.r_a**2/Osim.l2_idx*divhuub3D_lm[ip]
grad_psi_lon, grad_psi_lat = Osim.gradf_from_fSH(psi_lm)
grad_chi_lon, grad_chi_lat = Osim.gradf_from_fSH(chi_lm)
divhuub = Osim.spat_from_SH(divhuub3D_lm[ip])
rothuub = Osim.spat_from_SH(rothuub3D_lm[ip])
temp_rot = sinLATS*divhuub + cosLATS*grad_chi_lat/Osim.r_a
temp_div = sinLATS*rothuub + cosLATS*grad_psi_lat/Osim.r_a
temp_rot_lm = Osim.SH_from_spat(temp_rot)
temp_div_lm = Osim.SH_from_spat(temp_div)
P_Lcalt_l[ip] = fCor0*( Osim.cospectrum_from_2fieldsSH(psi_lm, temp_rot_lm)
- Osim.cospectrum_from_2fieldsSH(chi_lm, temp_div_lm)
)
t2 = time()
print '(done in {0:3.2f} s)'.format(t2-t1)
# del(uub3D, vvb3D, divhuub3D_lm, rothuub3D_lm)
del(uub3D)
del(vvb3D)
del(divhuub3D_lm)
del(rothuub3D_lm)
print 'build the ensemble and add for the time average'
for ip in range(nz):
ens_tendencies = treat_simul.ensemble_variables('ensemble tendency terms')
ens_tendencies.ddata['P_TKhOp_l'] = P_TKhOp_l[ip]
ens_tendencies.ddata['P_TKvOp_l'] = P_TKvOp_l[ip]
ens_tendencies.ddata['P_TKrot_l'] = P_TKrot_l[ip]
ens_tendencies.ddata['P_TAhOp_l'] = P_TAhOp_l[ip]
ens_tendencies.ddata['P_TAvOp_l'] = P_TAvOp_l[ip]
ens_tendencies.ddata['P_Lcori_l'] = P_Lcori_l[ip]
ens_tendencies.ddata['P_Lcalt_l'] = P_Lcalt_l[ip]
ens_tendencies.ddata['P_Conv__l'] = P_Conv__l[ip]
ens_tendencies.ddata['P_Conv2_l'] = P_Conv2_l[ip]
ens_tendencies.ddata['P_DKh___l'] = P_DKh___l[ip]
ens_tendencies.ddata['P_Conv_'] = P_Conv__l[ip].sum()
ens_tendencies.ddata['P_DKh__'] = P_DKh___l[ip].sum()
list_ens_tendencies[ip] += ens_tendencies
ens_vert_fluxes = treat_simul.ensemble_variables('ensemble vertical fluxes terms')
ens_vert_fluxes.ddata['F_Kpres_l'] = F_Kpres_l[ip]
ens_vert_fluxes.ddata['F_Kturb_l'] = F_Kturb_l[ip]
ens_vert_fluxes.ddata['F_Aturb_l'] = F_Aturb_l[ip]
list_ens_vert_fluxes[ip] += ens_vert_fluxes
nb_instants_computed_day += 1
nb_instants_computed += 1
nb_to_do = nb_to_do - 1
t2_1time = time()
print '1 time treated in {0:3.2f} s'.format(t2_1time-t1_1time)
if nb_instants_computed%(nbtot/100.)<1.:
print 'day =', day, 'hour =', hour, ' completed: {0:3.0f}% done'.format(nb_instants_computed/float(nbtot)*100.)
if not nb_to_do==0:
print 'there are still {0} instants to treat'.format(nb_to_do)
print 'approximative time left: {0:5.0f} s'.format((t2_1time-t1_1time)*nb_to_do)
else:
print 'computation completed...'
for ip in range(nz):
list_ens_tendencies[ip] = list_ens_tendencies[ip]/nb_instants_computed_day
# il faut calculer la liste list_ens_cumul_tend
list_ens_cumul_tend = []
for ip in range(nz):
ens_tendencies = list_ens_tendencies[ip]
ens_vert_fluxes = list_ens_vert_fluxes[ip]
Pi_TKhOp_l = cumsum_inv(ens_tendencies.ddata['P_TKhOp_l'])
Pi_TKvOp_l = cumsum_inv(ens_tendencies.ddata['P_TKvOp_l'])
Pi_TKrot_l = cumsum_inv(ens_tendencies.ddata['P_TKrot_l'])
Pi_TAhOp_l = cumsum_inv(ens_tendencies.ddata['P_TAhOp_l'])
Pi_TAvOp_l = cumsum_inv(ens_tendencies.ddata['P_TAvOp_l'])
Pi_Lcori_l = cumsum_inv(ens_tendencies.ddata['P_Lcori_l'])
Pi_Lcalt_l = cumsum_inv(ens_tendencies.ddata['P_Lcalt_l'])
cumu_Con_l = cumsum_inv(ens_tendencies.ddata['P_Conv__l'])
cumu_Co2_l = cumsum_inv(ens_tendencies.ddata['P_Conv2_l'])
cumu_DKh_l = cumsum_inv(ens_tendencies.ddata['P_DKh___l'])
cumu_FKp_l = cumsum_inv(ens_vert_fluxes.ddata['F_Kpres_l'])
cumu_FKt_l = cumsum_inv(ens_vert_fluxes.ddata['F_Kturb_l'])
cumu_FAt_l = cumsum_inv(ens_vert_fluxes.ddata['F_Aturb_l'])
ens_cumul_tend = treat_simul.ensemble_variables('ensemble flux terms')
ens_cumul_tend.ddata['Pi_TKhOp_l'] = Pi_TKhOp_l
ens_cumul_tend.ddata['Pi_TKvOp_l'] = Pi_TKvOp_l
ens_cumul_tend.ddata['Pi_TKrot_l'] = Pi_TKrot_l
ens_cumul_tend.ddata['Pi_TAhOp_l'] = Pi_TAhOp_l
ens_cumul_tend.ddata['Pi_TAvOp_l'] = Pi_TAvOp_l
ens_cumul_tend.ddata['Pi_Lcori_l'] = Pi_Lcori_l
ens_cumul_tend.ddata['Pi_Lcalt_l'] = Pi_Lcalt_l
ens_cumul_tend.ddata['cumu_Con_l'] = cumu_Con_l
ens_cumul_tend.ddata['cumu_Co2_l'] = cumu_Co2_l
ens_cumul_tend.ddata['cumu_DKh_l'] = cumu_DKh_l
ens_cumul_tend.ddata['cumu_FKp_l'] = cumu_FKp_l
ens_cumul_tend.ddata['cumu_FKt_l'] = cumu_FKt_l
ens_cumul_tend.ddata['cumu_FAt_l'] = cumu_FAt_l
list_ens_cumul_tend.append(ens_cumul_tend)
if SAVE:
name_directory_save = Osim.path_dir+'/Statistics/Dyn_days_review'
if name_simul=='ECMWF_T1279':
name_directory_save = name_directory_save+season
if not os.path.exists(name_directory_save):
os.mkdir(name_directory_save)
name_save = 'T'+str(Osim.lmax)+'_dyn_day'+str(day)+'.pickle'
dico_save = dict([ ['list_ens_tendencies', list_ens_tendencies],
['list_ens_vert_fluxes', list_ens_vert_fluxes],
['list_ens_cumul_tend', list_ens_cumul_tend],
['name_save', name_save]
])
f = open(name_directory_save+'/'+name_save, 'w')
pickle.dump(dico_save, f, pickle.HIGHEST_PROTOCOL)
f.close()
print '\nday = '+str(day)+' is done'
print 'nb_instants_computed_day =', nb_instants_computed_day
print '\n'
|
import matplotlib.pyplot as plt
def draw_graph(final):
if final <= 1:
raise ValueError("specify number bigger than 1")
fibonacci = [1, 1]
for _ in range(final - 2):
fibonacci.append(fibonacci[-1] + fibonacci[-2])
ys = [fibonacci[x + 1] / fibonacci[x] for x in range(len(fibonacci) - 1)]
plt.plot(range(final - 1), ys)
plt.xlabel("No.")
plt.ylabel("Ratio")
plt.title("Ratio between consecutive Fibonacci numbers")
if __name__ == '__main__':
draw_graph(100)
plt.show()
|
class Plugin:
"""
A plugin is an extension to the core behavior of bonobo. If you're writing transformations, you should not need
to use this interface.
For examples, you can read bonobo.plugins.console.ConsoleOutputPlugin, or bonobo.plugins.jupyter.JupyterOutputPlugin
that respectively permits an interactive output on an ANSI console and a rich output in a jupyter notebook. Note
that you most probably won't instanciate them by yourself at runtime, as it's the default behaviour of bonobo to use
them if your in a compatible context (aka an interactive terminal for the console plugin, or a jupyter notebook for
the notebook plugin.)
Warning: THE PLUGIN API IS PRE-ALPHA AND WILL EVOLVE BEFORE 1.0, DO NOT RELY ON IT BEING STABLE!
"""
def register(self, dispatcher):
"""
:param dispatcher: whistle.EventDispatcher
"""
pass
def unregister(self, dispatcher):
"""
:param dispatcher: whistle.EventDispatcher
"""
pass
|
"""
Collection of Buffer objects with general functionality
"""
import numpy as np
class Buffer(object):
"""
Abstract class for different kinds of data buffers. Minimum API should have a "push" and "clear" method
"""
def push(self, value):
"""
Pushes a new @value to the buffer
Args:
value: Value to push to the buffer
"""
raise NotImplementedError
def clear(self):
raise NotImplementedError
class RingBuffer(Buffer):
"""
Simple RingBuffer object to hold values to average (useful for, e.g.: filtering D component in PID control)
Note that the buffer object is a 2D numpy array, where each row corresponds to
individual entries into the buffer
Args:
dim (int): Size of entries being added. This is, e.g.: the size of a state vector that is to be stored
length (int): Size of the ring buffer
"""
def __init__(self, dim, length):
# Store input args
self.dim = dim
self.length = length
# Variable so that initial average values are accurate
self._size = 0
# Save pointer to end of buffer
self.ptr = self.length - 1
# Construct ring buffer
self.buf = np.zeros((length, dim))
def push(self, value):
"""
Pushes a new value into the buffer
Args:
value (int or float or array): Value(s) to push into the array (taken as a single new element)
"""
# Increment pointer, then add value (also increment size if necessary)
self.ptr = (self.ptr + 1) % self.length
self.buf[self.ptr] = np.array(value)
if self._size < self.length:
self._size += 1
def clear(self):
"""
Clears buffer and reset pointer
"""
self.buf = np.zeros((self.length, self.dim))
self.ptr = self.length - 1
self._size = 0
@property
def current(self):
"""
Gets the most recent value pushed to the buffer
Returns:
float or np.array: Most recent value in buffer
"""
return self.buf[self.ptr]
@property
def average(self):
"""
Gets the average of components in buffer
Returns:
float or np.array: Averaged value of all elements in buffer
"""
return np.mean(self.buf[: self._size], axis=0)
class DeltaBuffer(Buffer):
"""
Simple 2-length buffer object to streamline grabbing delta values between "current" and "last" values
Constructs delta object.
Args:
dim (int): Size of numerical arrays being inputted
init_value (None or Iterable): Initial value to fill "last" value with initially.
If None (default), last array will be filled with zeros
"""
def __init__(self, dim, init_value=None):
# Setup delta object
self.dim = dim
self.last = np.zeros(self.dim) if init_value is None else np.array(init_value)
self.current = np.zeros(self.dim)
def push(self, value):
"""
Pushes a new value into the buffer; current becomes last and @value becomes current
Args:
value (int or float or array): Value(s) to push into the array (taken as a single new element)
"""
self.last = self.current
self.current = np.array(value)
def clear(self):
"""
Clears last and current value
"""
self.last, self.current = np.zeros(self.dim), np.zeros(self.dim)
@property
def delta(self, abs_value=False):
"""
Returns the delta between last value and current value. If abs_value is set to True, then returns
the absolute value between the values
Args:
abs_value (bool): Whether to return absolute value or not
Returns:
float or np.array: difference between current and last value
"""
return self.current - self.last if not abs_value else np.abs(self.current - self.last)
@property
def average(self):
"""
Returns the average between the current and last value
Returns:
float or np.array: Averaged value of all elements in buffer
"""
return (self.current + self.last) / 2.0
class DelayBuffer(RingBuffer):
"""
Modified RingBuffer that returns delayed values when polled
"""
def get_delayed_value(self, delay):
"""
Returns value @delay increments behind most recent value.
Args:
delay (int): How many steps backwards from most recent value to grab value. Note that this should not be
greater than the buffer's length
Returns:
np.array: delayed value
"""
# First make sure that the delay is valid
assert delay < self.length, "Requested delay must be less than buffer's length!"
# Grab delayed value
return self.buf[(self.ptr - delay) % self.length]
|
import datetime
import logging
from datetime import timedelta
import homeassistant.helpers.config_validation as cv
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_USERNAME, CONF_PASSWORD, CONF_NAME, CONF_MONITORED_VARIABLES)
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_METER_ID = 'energy_meter_id'
CONF_GENERATION = 'check_generation'
DEFAULT_NAME = 'Tauron AMIPlus'
MIN_TIME_BETWEEN_UPDATES = datetime.timedelta(seconds=60)
ZONE = 'zone'
CONSUMPTION_DAILY = 'consumption_daily'
CONSUMPTION_MONTHLY = 'consumption_monthly'
CONSUMPTION_YEARLY = 'consumption_yearly'
GENERATION_DAILY = 'generation_daily'
GENERATION_MONTHLY = 'generation_monthly'
GENERATION_YEARLY = 'generation_yearly'
SENSOR_TYPES = {
ZONE: [timedelta(minutes=1), None],
CONSUMPTION_DAILY: [timedelta(hours=1), 'kWh', 'sum', ('generation', 'OZEValue')],
CONSUMPTION_MONTHLY: [timedelta(hours=1), 'kWh', 'sum', ('generation', 'OZEValue')],
CONSUMPTION_YEARLY: [timedelta(hours=1), 'kWh', 'sum', ('generation', 'OZEValue')],
GENERATION_DAILY: [timedelta(hours=1), 'kWh', 'OZEValue', ('consumption', 'sum')],
GENERATION_MONTHLY: [timedelta(hours=1), 'kWh', 'OZEValue', ('consumption', 'sum')],
GENERATION_YEARLY: [timedelta(hours=1), 'kWh', 'OZEValue', ('consumption', 'sum')]
}
TARIFF_G12 = 'G12'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_METER_ID): cv.string,
vol.Required(CONF_MONITORED_VARIABLES, default=[]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_GENERATION, default=False): cv.boolean,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
name = config.get(CONF_NAME)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
meter_id = config.get(CONF_METER_ID)
generation = config.get(CONF_GENERATION)
dev = []
for variable in config[CONF_MONITORED_VARIABLES]:
dev.append(TauronAmiplusSensor(name, username, password, meter_id, generation, variable))
add_entities(dev, True)
def calculate_configuration(username, password, meter_id, days_before=2):
payload_login = {
"username": username,
"password": password,
"service": "https://elicznik.tauron-dystrybucja.pl"
}
session = requests.session()
session.request("POST", TauronAmiplusSensor.url_login, data=payload_login,
headers=TauronAmiplusSensor.headers)
session.request("POST", TauronAmiplusSensor.url_login, data=payload_login,
headers=TauronAmiplusSensor.headers)
config_date = (datetime.datetime.now() - datetime.timedelta(days_before))
payload = {
"dane[chartDay]": config_date.strftime('%d.%m.%Y'),
"dane[paramType]": "day",
"dane[smartNr]": meter_id,
"dane[chartType]": 1
}
response = session.request("POST", TauronAmiplusSensor.url_charts,
data={**TauronAmiplusSensor.payload_charts, **payload},
headers=TauronAmiplusSensor.headers)
json_data = response.json()
zones = json_data['dane']['zone']
parsed_zones = []
for zone in zones:
start = datetime.time(hour=int(zone['start'][11:]))
stop = datetime.time(hour=int(zone['stop'][11:]))
parsed_zones.append({'start': start, 'stop': stop})
calculated_zones = []
for i in range(0, len(parsed_zones)):
next_i = (i + 1) % len(parsed_zones)
start = datetime.time(parsed_zones[i]['stop'].hour)
stop = datetime.time(parsed_zones[next_i]['start'].hour)
calculated_zones.append({'start': start, 'stop': stop})
power_zones = {1: parsed_zones, 2: calculated_zones}
tariff = json_data['dane']['chart'][0]['Taryfa']
return power_zones, tariff, config_date.strftime('%d.%m.%Y, %H:%M')
class TauronAmiplusSensor(Entity):
url_login = "https://logowanie.tauron-dystrybucja.pl/login"
url_charts = "https://elicznik.tauron-dystrybucja.pl/index/charts"
headers = {
'cache-control': "no-cache",
}
payload_charts = {
"dane[cache]": 0,
"dane[chartType]": 2
}
def __init__(self, name, username, password, meter_id, generation, sensor_type):
self.client_name = name
self.username = username
self.password = password
self.meter_id = meter_id
self.additional_param_enabled = generation or sensor_type.startswith("generation")
self.sensor_type = sensor_type
self.unit = SENSOR_TYPES[sensor_type][1]
configuration = calculate_configuration(username, password, meter_id)
self.power_zones = configuration[0]
self.mode = configuration[1]
self.power_zones_last_update = configuration[2]
self.power_zones_last_update_tech = datetime.datetime.now() - datetime.timedelta(days=1)
self.data = None
self.params = {}
self._state = None
self.update = Throttle(SENSOR_TYPES[sensor_type][0])(self._update)
if not sensor_type == ZONE:
self.state_param = SENSOR_TYPES[sensor_type][2]
self.additional_param_name = SENSOR_TYPES[sensor_type][3][0]
self.additional_param = SENSOR_TYPES[sensor_type][3][1]
@property
def name(self):
return '{} {}'.format(self.client_name, self.sensor_type)
@property
def state(self):
return self._state
@property
def device_state_attributes(self):
_params = {'tariff': self.mode, 'updated': self.power_zones_last_update, **self.params}
return _params
@property
def unit_of_measurement(self):
return self.unit
@property
def icon(self):
return 'mdi:counter'
def _update(self):
self.update_configuration()
if self.sensor_type == ZONE:
self.update_zone()
elif self.sensor_type.endswith("daily"):
self.update_values_daily()
elif self.sensor_type.endswith("monthly"):
self.update_values_monthly()
elif self.sensor_type.endswith("yearly"):
self.update_values_yearly()
def get_session(self):
payload_login = {
"username": self.username,
"password": self.password,
"service": "https://elicznik.tauron-dystrybucja.pl"
}
session = requests.session()
session.request("POST", TauronAmiplusSensor.url_login, data=payload_login,
headers=TauronAmiplusSensor.headers)
session.request("POST", TauronAmiplusSensor.url_login, data=payload_login,
headers=TauronAmiplusSensor.headers)
return session
def update_configuration(self):
now_datetime = datetime.datetime.now()
if (now_datetime - datetime.timedelta(days=1)) >= self.power_zones_last_update_tech and now_datetime.hour >= 10:
config = calculate_configuration(self.username, self.password, self.meter_id, 1)
self.power_zones = config[0]
self.mode = config[1]
self.power_zones_last_update = config[2]
self.power_zones_last_update_tech = now_datetime
def update_zone(self):
if self.mode == TARIFF_G12:
parsed_zones = self.power_zones[1]
now_time = datetime.datetime.now().time()
if len(list(filter(lambda x: x['start'] <= now_time < x['stop'], parsed_zones))) > 0:
self._state = 1
else:
self._state = 2
self.params = {}
for power_zone in self.power_zones:
pz_name = 'zone{} '.format(power_zone)
pz = str(list(map(lambda x: x['start'].strftime('%H:%M') + ' - ' + x['stop'].strftime('%H:%M'),
self.power_zones[power_zone]))).replace('[', '').replace(']', '').replace("'", '')
self.params[pz_name] = pz
else:
self._state = 1
def update_values_daily(self):
session = self.get_session()
payload = {
"dane[chartDay]": (datetime.datetime.now() - datetime.timedelta(1)).strftime('%d.%m.%Y'),
"dane[paramType]": "day",
"dane[smartNr]": self.meter_id,
"dane[checkOZE]": "on" if self.additional_param_enabled else "off"
}
response = session.request("POST", TauronAmiplusSensor.url_charts,
data={**TauronAmiplusSensor.payload_charts, **payload},
headers=TauronAmiplusSensor.headers)
correct_data = False
if response.status_code == 200 and response.text.startswith('{"name"') and response.json()['isFull']:
correct_data = True
else:
session = self.get_session()
payload = {
"dane[chartDay]": (datetime.datetime.now() - datetime.timedelta(2)).strftime('%d.%m.%Y'),
"dane[paramType]": "day",
"dane[smartNr]": self.meter_id,
"dane[checkOZE]": "on" if self.additional_param_enabled else "off"
}
response = session.request("POST", TauronAmiplusSensor.url_charts,
data={**TauronAmiplusSensor.payload_charts, **payload},
headers=TauronAmiplusSensor.headers)
if response.status_code == 200 and response.text.startswith('{"name"'):
correct_data = True
if correct_data:
json_data = response.json()
self._state = round(float(json_data[self.state_param]), 3)
if self.mode == TARIFF_G12:
values = json_data['dane']['chart']
z1 = list(filter(lambda x: x['Zone'] == '1', values))
z2 = list(filter(lambda x: x['Zone'] == '2', values))
sum_z1 = round(sum(float(val['EC']) for val in z1), 3)
sum_z2 = round(sum(float(val['EC']) for val in z2), 3)
day = values[0]['Date']
self.params = {'zone1': sum_z1, 'zone2': sum_z2, 'day': day}
if self.additional_param_enabled:
self.params = {**self.params, self.additional_param_name: round(float(json_data[self.additional_param]), 3)}
def update_values_monthly(self):
session = self.get_session()
payload = {
"dane[chartMonth]": datetime.datetime.now().month,
"dane[chartYear]": datetime.datetime.now().year,
"dane[paramType]": "month",
"dane[smartNr]": self.meter_id,
"dane[checkOZE]": "on" if self.additional_param_enabled else "off"
}
response = session.request("POST", TauronAmiplusSensor.url_charts,
data={**TauronAmiplusSensor.payload_charts, **payload},
headers=TauronAmiplusSensor.headers)
if response.status_code == 200 and response.text.startswith('{"name"'):
json_data = response.json()
self._state = round(float(json_data[self.state_param]), 3)
self.params = {}
if self.mode == TARIFF_G12:
values = json_data['dane']['chart']
z1 = list(filter(lambda x: 'tariff1' in x, values))
z2 = list(filter(lambda x: 'tariff2' in x, values))
sum_z1 = round(sum(float(val['tariff1']) for val in z1), 3)
sum_z2 = round(sum(float(val['tariff2']) for val in z2), 3)
self.params = {'zone1': sum_z1, 'zone2': sum_z2}
if self.additional_param_enabled:
self.params = {**self.params, self.additional_param_name: round(float(json_data[self.additional_param]), 3)}
def update_values_yearly(self):
session = self.get_session()
payload = {
"dane[chartYear]": datetime.datetime.now().year,
"dane[paramType]": "year",
"dane[smartNr]": self.meter_id,
"dane[chartType]": 2,
"dane[checkOZE]": "on" if self.additional_param_enabled else "off"
}
response = session.request("POST", TauronAmiplusSensor.url_charts,
data={**TauronAmiplusSensor.payload_charts, **payload},
headers=TauronAmiplusSensor.headers)
if response.status_code == 200 and response.text.startswith('{"name"'):
json_data = response.json()
self._state = round(float(json_data[self.state_param]), 3)
self.params = {}
if self.mode == TARIFF_G12:
values = json_data['dane']['chart']
z1 = list(filter(lambda x: 'tariff1' in x, values))
z2 = list(filter(lambda x: 'tariff2' in x, values))
sum_z1 = round(sum(float(val['tariff1']) for val in z1), 3)
sum_z2 = round(sum(float(val['tariff2']) for val in z2), 3)
self.params = {'zone1': sum_z1, 'zone2': sum_z2}
if self.additional_param_enabled:
self.params = {**self.params, self.additional_param_name: round(float(json_data[self.additional_param]), 3)}
|
# Generated by Django 2.2.6 on 2019-12-27 21:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contracts', '0003_auto_20191226_1856'),
]
operations = [
migrations.CreateModel(
name='UserContractualPartyAssociation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('valid_from', models.DateTimeField(default=django.utils.timezone.now)),
('valid_to', models.DateTimeField(null=True)),
('association_type', models.CharField(choices=[('Administrator', 'Administrator'), ('Collaborator', 'Collaborator')], max_length=32)),
],
),
migrations.AddField(
model_name='contractualparty',
name='created_by',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.DeleteModel(
name='ContractualPartyAssociation',
),
migrations.AddField(
model_name='usercontractualpartyassociation',
name='cp',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contracts.ContractualParty'),
),
migrations.AddField(
model_name='usercontractualpartyassociation',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL),
),
]
|
"""
Copyright (C) 2019 Authors of gHHC
This file is part of "hyperbolic_hierarchical_clustering"
http://github.com/nmonath/hyperbolic_hierarchical_clustering
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import numpy as np
import tensorflow as tf
from ghhc.util.eval_dp import eval_dp
from ghhc.util.io import mkdir_p
from absl import logging
tf.enable_eager_execution()
def squared_norm(x, axis=1, keepdims=True):
"""Squared L2 Norm of x."""
return tf.reduce_sum(tf.pow(x, 2), axis=axis, keepdims=keepdims)
def squared_euclidean_cdist(x, y):
"""Squared euclidean distance
Computed as: ||x||^2 + ||y||^2 - 2 x^T y.
Args:
x: N by D matrix
y: M by D matrix
:returns matrix (N by M) such that result[i,j] = || x[i,:] - y[j,;] ||^2
"""
norms = squared_norm(x, axis=1, keepdims=True) + tf.transpose(squared_norm(y, axis=1, keepdims=True))
dot = 2.0*tf.matmul(x, y, transpose_b=True)
return norms - dot
def poincare_cdist(x, y):
"""Poincare distance
Args:
x: N by D matrix
y: M by D matrix
:returns matrix (N by M) such that result[i,j] = ppoincare dist(x[i,:], y[j,:])
"""
numerator = squared_euclidean_cdist(x, y)
denom = (1.0 - squared_norm(x)) * (1.0 - tf.transpose(squared_norm(y, axis=1, keepdims=True)))
arccosh_arg = 1.0 + 2.0 * numerator / denom
res = tf.math.acosh(1e-8 + arccosh_arg)
return res
def squared_euclidean_dist(x, y):
"""Squared euclidean distance
Computed as: ||x||^2 + ||y||^2 - 2 x^T y.
Args:
x: N by D matrix
y: N by D matrix
:returns vector (N by 1) such that the ith element is || x[i,:] - y[i,;] ||^2
"""
norms = squared_norm(x, axis=1, keepdims=True) + squared_norm(y, axis=1, keepdims=True)
dot = 2*tf.reduce_sum(tf.multiply(x, y), axis=1, keepdims=True)
return norms - dot
def poincare_dist(x, y):
"""Poincare distance between x and y.
Args:
x: N by D matrix
y: N by D matrix
:returns vector (N by 1) such that the ith element is poincare dist(x[i,:], y[i,:])
"""
numerator = squared_euclidean_dist(x, y)
denom = (1.0 - squared_norm(x)) * (1.0 - squared_norm(y))
arccosh_arg = 1.0 + 2.0 * numerator / denom
res = tf.math.acosh(arccosh_arg)
return res
def poincare_norm(x, axis=1, keepdims=True):
"""Squared poincare norm of x."""
return 2.0*tf.math.atanh(tf.linalg.norm(x, axis=axis, keepdims=keepdims))
def parent_order_penalty(p, c, marg):
"""Penalty for parents to have smaller norm than children."""
return tf.maximum(0.0, poincare_norm(p) - poincare_norm(c) + marg) + 1.0
def parent_order_penalty_cdist(p, c, marg):
"""Penalty for parents to have smaller norm than children."""
return tf.maximum(0.0, tf.transpose(poincare_norm(p)) - poincare_norm(c) + marg) + 1.0
class gHHCTree(tf.keras.Model):
"""Object for a ghhc tree."""
def __init__(self, init_tree=None, gamma=0.25, config=None, projection=None):
super(gHHCTree, self).__init__()
self.internals = tf.get_variable('internals', initializer=init_tree)
self.max_norm = 0.8
self.internals_so_far = 0
self.gamma = gamma
self.config = config
self.projection = None
self.cached_pairs = None
if projection is not None:
self.projection = projection
else:
self.projection = lambda x: x
def project(self, x_i, x_j, x_k):
return self.projection(x_i), self.projection(x_j), self.projection(x_k)
def clip(self):
tf.assign(self.internals, tf.clip_by_norm(self.internals, self.max_norm, axes=[1]))
def p_par_broadcast(self, x_i):
return self.p_par_to_broadcast(x_i, self.internals)
def p_par_to_broadcast(self, x_i, nodes):
dists = poincare_cdist(x_i, nodes)
res = tf.multiply(dists, parent_order_penalty_cdist(nodes, x_i, self.gamma))
return res
def p_par_to(self, x_i, nodes):
dists = poincare_dist(x_i, nodes)
res = tf.multiply(dists, parent_order_penalty(nodes, x_i, self.gamma))
return res
def p_par_to_batched_np(self, x_i, nodes, batch_size=1000):
dists = np.zeros((x_i.shape[0], nodes.shape[0]), np.float32)
for i in range(0, x_i.shape[0], batch_size):
logging.log_every_n_seconds(logging.INFO,'p_par_to_batched_np processed %s of %s', 5, i, x_i.shape[0])
for j in range(0, nodes.shape[0], batch_size):
dists[i:(i+batch_size), j:(j+batch_size)] = self.p_par_to_broadcast(x_i[i:(i + batch_size), :], nodes[j:(j + batch_size), :]).numpy()
return dists
def compute_loss(self, x_i, x_j, x_k):
x_i, x_j, x_k = self.project(x_i, x_j, x_k)
x_i_dists = self.p_par_to_broadcast(x_i, self.internals)
x_j_dists = self.p_par_to_broadcast(x_j, self.internals)
x_k_dists = self.p_par_to_broadcast(x_k, self.internals)
max_dists_ij = tf.maximum(x_i_dists, x_j_dists)
gumbel_ij_noise = tf.log(-tf.log(tf.random_uniform(tf.shape(max_dists_ij))))
gumbel_ijk_noise = tf.log(-tf.log(tf.random_uniform(tf.shape(max_dists_ij))))
max_dists_ijk = tf.maximum(x_k_dists, max_dists_ij)
lca_ij_softmax = tf.nn.softmax(-max_dists_ij+gumbel_ij_noise, axis=1)
lca_ij_idx = tf.argmin(max_dists_ij, axis=1)
offset = np.zeros_like(max_dists_ij)
offset[np.arange(offset.shape[0]), lca_ij_idx] = 1000
max_dists_ijk += offset
lca_ijk_softmax = tf.nn.softmax(-max_dists_ijk + gumbel_ijk_noise, axis=1)
logits1 = lca_ij_softmax * x_i_dists - lca_ijk_softmax * x_i_dists
logits2 = lca_ij_softmax * x_j_dists - lca_ijk_softmax * x_j_dists
logits3 = lca_ijk_softmax * x_k_dists - lca_ij_softmax * x_k_dists
per_ex_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(logits1), logits=logits1) \
+ tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(logits2), logits=logits2) \
+ tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(logits3), logits=logits3)
loss = tf.reduce_sum(per_ex_loss)
return loss
def p_par_assign_to_internal(self, children, parents, proj_child=True):
if proj_child:
children = self.projection(children)
internal_norm = tf.norm(parents, axis=1, keepdims=True)
internal_ordering = tf.argsort(-tf.squeeze(internal_norm)).numpy()
back_to_orig = tf.argsort(internal_ordering)
parents = parents[internal_ordering,:]
children = children[internal_ordering,:]
dists = self.p_par_to_batched_np(children, parents)
dists[np.tril_indices_from(dists)] = np.Inf
np.fill_diagonal(dists, np.Inf)
dists = dists[back_to_orig,:][:,back_to_orig]
assignments = np.argmin(dists, axis=1)
mindists = np.min(dists, axis=1)
assignments[mindists == np.Inf] = -1
return assignments
def p_par_assign_to(self, children, parents, exclude_diag=False, proj_child=True):
if proj_child:
children = self.projection(children)
dists = self.p_par_to_batched_np(children, parents)
children_norm = tf.norm(children, axis=1, keepdims=True)
internal_norm = tf.norm(parents, axis=1, keepdims=True)
eligible = tf.less(-children_norm + tf.transpose(internal_norm), 0).numpy()
dists[eligible == False] = np.Inf
if exclude_diag:
np.fill_diagonal(dists, np.Inf)
assignments = np.argmin(dists, axis=1)
mindists = np.min(dists, axis=1)
assignments[mindists == np.Inf] = -1
return assignments
def write_tsv(self, filename, leaves, pids=None, lbls=None, update_cache=True):
logging.info('Writing tree tsv to %s' % filename)
logging.info('num leaves %s' % leaves.shape[0])
logging.info('pids is None? %s' % (pids is None))
logging.info('lbls is None? %s' % (lbls is None))
internals = self.internals.numpy()
leaf_to_par_assign = self.p_par_assign_to(leaves, internals)
internal_to_par_assign = self.p_par_assign_to_internal(internals, internals, proj_child=False)
self.cached_pairs = np.concatenate([ np.expand_dims(np.arange(internal_to_par_assign.shape[0]),1), np.expand_dims(internal_to_par_assign,1)],axis=1)
self.cached_pairs = self.cached_pairs[self.cached_pairs[:,1]!=-1]
with open(filename + '.internals', 'w') as fouti:
with open(filename + '.leaves', 'w') as foutl:
with open(filename, 'w') as fout:
i = -1
pid = 'int_%s' % i
best_pid = 'best_int_%s' % i
par_id = 'None'
fout.write('%s\t%s\tNone\n' % (pid, par_id))
fout.write('%s\t%s\tNone\n' % (best_pid, pid))
fouti.write('%s\t%s\tNone\n' % (pid, par_id))
fouti.write('%s\t%s\tNone\n' % (best_pid, pid))
for i in range(leaf_to_par_assign.shape[0]):
logging.log_every_n_seconds(logging.INFO,'Wrote %s leaves' % i,5)
pid = 'pt_%s' % i if pids is None else pids[i]
lbl = pid if lbls is None else lbls[i]
par_id = 'best_int_%s' % leaf_to_par_assign[i]
fout.write('%s\t%s\t%s\n' % (pid, par_id, lbl))
foutl.write('%s\t%s\t%s\n' % (pid, par_id, lbl))
for i in range(internal_to_par_assign.shape[0]):
logging.log_every_n_seconds(logging.INFO,'Wrote %s internals' % i,5)
pid = 'int_%s' % i
par_id = 'int_%s' % internal_to_par_assign[i]
best_pid = 'best_int_%s' % i
fout.write('%s\t%s\tNone\n' % (pid, par_id))
fout.write('%s\t%s\tNone\n' % (best_pid, par_id))
fouti.write('%s\t%s\tNone\n' % (pid, par_id))
fouti.write('%s\t%s\tNone\n' % (best_pid, par_id))
def plot_tree(self, leaves, filename):
internals = self.internals.numpy()
leaf_to_par_assign = self.p_par_assign_to(leaves, internals)
internal_to_par_assign = self.p_par_assign_to_internal(internals, internals, proj_child=False)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(10, 10))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.tick_params(axis='x', which='both', bottom='off', top='off',
color='white')
ax.tick_params(axis='y', which='both', left='off', right='off',
color='white')
# plt.scatter(0, 0, label='root', marker='^', zorder=2)
# plt.annotate('root', xy=(0,0), size=3)
for idx in range(internals.shape[0]):
plt.scatter(internals[idx, 0], internals[idx, 1], label='int_%s' % idx, s=100, marker='^', zorder=2)
# plt.annotate('int_%s' % idx, xy=(internals[idx,0], internals[idx,1]), size=5)
for idx in range(internals.shape[0]):
if internal_to_par_assign[idx] != -1:
plt.plot([internals[idx,0], internals[internal_to_par_assign[idx],0]],
[internals[idx,1], internals[internal_to_par_assign[idx],1]], linewidth=2,
c='k', zorder=1)
# else:
# plt.plot([internals[idx, 0], 0],
# [internals[idx, 1], 0], linewidth=1,
# c='k', zorder=1)
for idx in range(leaves.shape[0]):
plt.scatter(leaves[idx, 0], leaves[idx, 1], s=100, label='%s' % idx, marker='o', zorder=2)
# plt.annotate('pt_%s' % idx, xy=(leaves[idx, 0], leaves[idx, 1]), size=5)
for idx in range(leaves.shape[0]):
if leaf_to_par_assign[idx] != -1:
# print('gpid %s lpid %s' % (grinch_par_id, leaf_to_par_assign[idx]))
plt.plot([leaves[idx, 0], internals[leaf_to_par_assign[idx], 0]],
[leaves[idx, 1], internals[leaf_to_par_assign[idx], 1]], linewidth=2,
c='k', zorder=1)
# else:
# plt.plot([leaves[idx, 0], 0],
# [leaves[idx, 1], 0], linewidth=1,
# c='k', zorder=1)
plt.xlim([-1.1, 1.1])
plt.ylim([-1.1, 1.1])
circle = plt.Circle((0, 0), 1, color='r',linewidth=5, fill=False)
ax.add_artist(circle)
plt.axis('off')
plt.savefig(filename)
def structure_loss(self):
res = tf.reduce_sum(self.child_parent_norm_loss(self.cached_pairs))
# logging.log_every_n(logging.INFO,'cp res: %s', 10,res )
return res
def child_parent_norm_loss(self, pairs):
internal_norms = poincare_norm(self.internals)
children = tf.gather(internal_norms, pairs[:,0])
parents = tf.gather(internal_norms, pairs[:,1])
logits1 = tf.nn.relu(parents - children + self.gamma)
min_norm = tf.argmin(internal_norms).numpy()[0]
logging.log_every_n(logging.INFO,'min_norm %s %s',500,min_norm,internal_norms[min_norm])
max_norm = tf.argmax(internal_norms).numpy()[0]
logging.log_every_n(logging.INFO, 'max_norm %s %s', 500, max_norm, internal_norms[max_norm])
return tf.reduce_sum(logits1)
def rsgd_or_sgd(grads_and_vars, rsgd=True):
if rsgd:
res = []
for g,v in grads_and_vars:
scale = ((1.0 - tf.reduce_sum(tf.multiply(v,v),axis=1,keepdims=True)) ** 2) / 4.0
res.append((scale*g, v))
return res
else:
return grads_and_vars
class gHHCInference(object):
def __init__(self, ghhcTree, optimizer, config, dev_set, dev_lbls):
self.ghhcTree = ghhcTree
self.optimizer = optimizer
self.config = config
self.dev_set = dev_set
self.dev_lbls = dev_lbls
self.best_dev_dp_score = 0.0
self.best_dev_iter = 0.0
self.last_dev_dp_score = 0.0
self.last_dev_iter = 0.0
self.checkpoint_prefix = self.config.checkpoint_dir + "/ckpt"
self.ckpt = tf.train.Checkpoint(optimizer=optimizer,
model=ghhcTree,
optimizer_step=tf.train.get_or_create_global_step())
def update(self, c1, c2, par_id, gp_id, steps=100):
for i in range(steps):
with tf.GradientTape() as tape:
loss = self.ghhcTree.pull_close_par_gp(c1, c2, par_id, gp_id)
grads = tape.gradient(loss, self.ghhcTree.trainable_variables)
self.optimizer.apply_gradients(rsgd_or_sgd(zip(grads, self.ghhcTree.trainable_variables)),
global_step=tf.train.get_or_create_global_step())
self.ghhcTree.clip()
def episode_inference(self, x_i, x_j, x_k, dataset, batch_size=1000, examples_so_far=0):
time_so_far = 0.0
loss_so_far = 0.0
struct_loss_so_far = 0.0
for idx in range(0, x_i.shape[0], batch_size):
if self.config.struct_prior is not None and idx+examples_so_far > 0:
if self.ghhcTree.cached_pairs is None:
self.dev_eval(idx + examples_so_far)
if (idx + examples_so_far) % self.config.struct_prior_every == 0:
for idx2 in range(self.config.num_struct_prior_batches):
start_time = time.time()
logging.log_every_n(logging.INFO,
'[STRUCTURE] Processed %s of %s batches || Avg. Loss %s || Avg Time %s' % (idx2, 100, struct_loss_so_far / max(idx2, 1), time_so_far / max(idx2, 1)),100)
with tf.GradientTape() as tape:
sloss = self.ghhcTree.structure_loss()
struct_loss_so_far += sloss.numpy()
grads = tape.gradient(sloss, self.ghhcTree.trainable_variables)
self.optimizer.apply_gradients(rsgd_or_sgd(zip(grads, self.ghhcTree.trainable_variables)),
global_step=tf.train.get_or_create_global_step())
self.ghhcTree.clip()
end_time = time.time()
time_so_far += end_time - start_time
logging.log(logging.INFO, '[STRUCTURE] Processed %s of %s batches || Avg. Loss %s || Avg Time %s' % (self.config.num_struct_prior_batches, 100, struct_loss_so_far / max(self.config.num_struct_prior_batches, 1), time_so_far / max(self.config.num_struct_prior_batches, 1)))
if (idx + examples_so_far) % self.config.dev_every == 0:
self.dev_eval(idx + examples_so_far)
elif (idx + examples_so_far ) % self.config.save_every == 0:
self.ckpt.save(self.checkpoint_prefix)
self.config.last_model = tf.train.latest_checkpoint(self.config.checkpoint_dir)
self.config.save_config(self.config.exp_out_dir, filename='config.json')
start_time = time.time()
if idx % 100 == 0 and idx > 0:
logging.info('Processed %s of %s batches || Avg. Loss %s || Avg Time %s' % (idx, x_i.shape[0], loss_so_far/idx, time_so_far / max(idx,1)))
with tf.GradientTape() as tape:
bx_i = dataset[x_i[idx:(idx + batch_size)], :]
bx_j = dataset[x_j[idx:(idx + batch_size)], :]
bx_k = dataset[x_k[idx:(idx + batch_size)], :]
loss = self.ghhcTree.compute_loss(bx_i, bx_j, bx_k)
loss_so_far += loss.numpy()
grads = tape.gradient(loss, self.ghhcTree.trainable_variables)
self.optimizer.apply_gradients(rsgd_or_sgd(zip(grads, self.ghhcTree.trainable_variables)),
global_step=tf.train.get_or_create_global_step())
self.ghhcTree.clip()
end_time = time.time()
time_so_far += end_time - start_time
logging.info('Processed %s of %s batches || Avg. Loss %s || Avg Time %s' % (x_i.shape[0], x_i.shape[0], loss_so_far / x_i.shape[0], time_so_far / max(x_i.shape[0], 1)))
# save model at the end of training
self.ckpt.save(self.checkpoint_prefix)
self.config.last_model = tf.train.latest_checkpoint(self.config.checkpoint_dir)
# record the last model in the config.
self.config.save_config(self.config.exp_out_dir, filename='config.json')
return x_i.shape[0]
def dev_eval(self, steps):
if self.dev_set is not None:
start_dev = time.time()
mkdir_p(os.path.join(self.config.exp_out_dir, 'dev'))
filename = os.path.join(self.config.exp_out_dir, 'dev', 'dev_tree_%s.tsv' % steps)
self.ghhcTree.write_tsv(filename,self.dev_set,lbls=self.dev_lbls)
dp = eval_dp(filename, os.path.join(self.config.exp_out_dir, 'dev', 'dev_score_%s.tsv' % steps),
self.config.threads, self.config.dev_points_file)
logging.info('DEV EVAL @ %s minibatches || %s DP' % (steps,dp))
end_dev = time.time()
logging.info('Finished Dev Eval in %s seconds' % (end_dev-start_dev))
if self.config.save_dev_pics:
filename = os.path.join(self.config.exp_out_dir, 'dev', 'dev_tree_%s.png' % steps)
self.ghhcTree.plot_tree(self.dev_set, filename)
# record the best dev score to try to understand if we end up doing worse, not used at inference time
# last model is used at inference.
self.best_dev_dp_score = max(self.best_dev_dp_score,dp)
self.best_dev_iter = steps if self.best_dev_dp_score == dp else self.best_dev_iter
self.last_dev_dp_score = dp
self.last_dev_iter = steps
# save every time we run this eval
self.ckpt.save(self.checkpoint_prefix)
self.config.last_model = tf.train.latest_checkpoint(self.config.checkpoint_dir)
if self.best_dev_dp_score == dp:
self.config.best_model = tf.train.latest_checkpoint(self.config.checkpoint_dir)
self.config.save_config(self.config.exp_out_dir, filename='config.json')
return dp
else:
return 0.0
def inference(self, indexes, dataset, batch_size=1000, episode_size=5000):
batches_so_far = 0
curr_idx = 0
episode_size = self.config.episode_size
if self.config.shuffle:
indexes = indexes[np.random.permutation(indexes.shape[0]), :]
for i in range(self.config.num_iterations):
if curr_idx > indexes.shape[0]:
logging.info('Restarting....')
curr_idx = 0
if self.config.shuffle:
indexes = indexes[np.random.permutation(indexes.shape[0]), :]
logging.info('Starting iteration %s of %s' % (i, self.config.num_iterations))
batches_so_far += self.episode_inference(indexes[curr_idx:(curr_idx+episode_size), 0],
indexes[curr_idx:(curr_idx+episode_size), 1],
indexes[curr_idx:(curr_idx+episode_size), 2],
dataset, batch_size, examples_so_far=batches_so_far)
|
#!/usr/bin/python
#Lyapunov: a library for integrating nonlinear dynamical systems
#Copyright (C) 2013-2018 John Wendell Hall
#
#The author may be reached at jackwhall7@gmail.com.
import time
import lyapunov
import numpy
import matplotlib.pyplot as plt
class MassSpringDemo(object):
"""
Mass spring damper system.
k = b = m = 1.0
No disturbances or control.
"""
def __init__(self):
self.state = 0.0, (1.0, 1.0)
self.u = lambda : 0.0
state = lyapunov.state_property(xname="_state")
def __call__(self):
x, v = self._state
return (v, -v - x + self.u())
def print_two_steps(system = MassSpringDemo()):
""" System must make use of lyapunov.State. """
t_in = [0.1, 0.2]
stepper = lyapunov.euler(system, t_in)
print("No Events - mass spring damper system")
print("Step 0:")
print("time", system.state.t, "| state ", system.state.x)
print("slope", system())
stepper.next()
print("Step 1:")
print("time", system.state.t, "| state ", system.state.x)
print("slope", system())
stepper.next()
print("Step 2:")
print("time", system.state.t, "| state ", system.state.x)
print("slope", system())
class SubsystemDemo(lyapunov.ParallelSystems):
"""A mass-spring-damper controlled by a PID."""
def __init__(self):
self.plant = MassSpringDemo()
self.control = lyapunov.PID(Ki=1)
self.reference = lyapunov.StepSignal(step_time=4.0)
self.control.y = lambda: self.plant.state[1]
self.control.r = lambda: self.reference.value
self.plant.u = self.control.u
lyapunov.ParallelSystems.__init__(self, [self.reference,
self.control,
self.plant])
def run_subsystem_demo():
system = SubsystemDemo()
record = lyapunov.Recorder(system)
stepper = lyapunov.adams_bashforth3(system, numpy.linspace(0.0, 8.0, 100))
#stepper = lyapunov.cash_karp(system, 8.0)
print("\nMass-Spring-Damper w/PID control")
print("initial state", system.state)
start = time.clock()
count = 0
for t, events in stepper:
if events:
stepper.step_across()
system.reference.update()
record.log(events)
print("time elapsed", time.clock() - start)
x_out = numpy.array(record.x)
plt.figure()
plt.plot(x_out[:,0], x_out[:,1])
plt.show()
if __name__ == "__main__":
run_subsystem_demo()
|
from fractions import Fraction
c = 1
while True:
line = input()
if line.strip() == "-1":
break
j, X = line.split()
if float(X) == 1.0:
print("Case "+str(c)+": 1/1")
c+=1
continue
if float(X) == 0.0:
print("Case "+str(c)+": 0/1")
c+=1
continue
j = int(j)
if j==0:
print("Case "+str(c)+": "+str(Fraction(str(X))))
c+=1
continue
k = len(X[2:])-j
repeating = X[2+k:]
for i in range(11):
X = X + repeating
numerator = int(10**(j+k) * float(X)) - int(10**k * float(X))
denominator = 10**(j+k) - 10**k
f = Fraction(numerator,denominator)
print("Case "+str(c)+": "+str(f.numerator)+"/"+str(f.denominator))
c+=1
|
# author: @netmanchris
# -*- coding: utf-8 -*-
"""
This module contains functions for working with the access point information
capabilities of the HPE IMC WSM Module using the RESTful API
"""
# This section imports required libraries
import json
import requests
from pyhpeimc.auth import HEADERS
def get_ap_info_all(auth, url):
"""
function takes no input to RESTFUL call to HP IMC
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dictionaries where each element of the list represents a single wireless
access point which has been
discovered in the HPE IMC WSM module
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.wsm.apinfo import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> all_ap_info = get_ap_info_all(auth.creds, auth.url)
>>> assert type(all_ap_info) is list
>>> assert len(all_ap_info[0]) == 19
>>> assert 'acDevId' in all_ap_info[0]
>>> assert 'acIpAddress' in all_ap_info[0]
>>> assert 'acLabel' in all_ap_info[0]
>>> assert 'apAlias' in all_ap_info[0]
>>> assert 'connectType' in all_ap_info[0]
>>> assert 'hardwareVersion' in all_ap_info[0]
>>> assert 'ipAddress' in all_ap_info[0]
>>> assert 'isFit' in all_ap_info[0]
>>> assert 'label' in all_ap_info[0]
>>> assert 'location' in all_ap_info[0]
>>> assert 'macAddress' in all_ap_info[0]
>>> assert 'onlineClientCount' in all_ap_info[0]
>>> assert 'onlineStatus' in all_ap_info[0]
>>> assert 'serialId' in all_ap_info[0]
>>> assert 'softwareVersion' in all_ap_info[0]
>>> assert 'ssids' in all_ap_info[0]
>>> assert 'status' in all_ap_info[0]
>>> assert 'type' in all_ap_info[0]
>>> assert 'sysName' in all_ap_info[0]
"""
f_url = url + "/imcrs/wlan/apInfo/queryApBasicInfo"
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
if len(response.text) > 0:
return json.loads(response.text)['apBasicInfo']
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_ap_info_all: An Error has occured"
def get_ap_info(ipaddress, auth, url):
"""
function takes input of ipaddress to RESTFUL call to HP IMC
:param ipaddress: The current IP address of the Access Point at time of query.
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: Dictionary object with the details of the target access point
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.wsm.apinfo import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> ap_info = get_ap_info('10.101.0.170',auth.creds, auth.url)
>>> assert type(ap_info) is dict
>>> assert len(ap_info) == 20
>>> assert 'acDevId' in ap_info
>>> assert 'acIpAddress' in ap_info
>>> assert 'acLabel' in ap_info
>>> assert 'apAlias' in ap_info
>>> assert 'connectType' in ap_info
>>> assert 'hardwareVersion' in ap_info
>>> assert 'ipAddress' in ap_info
>>> assert 'isFit' in ap_info
>>> assert 'label' in ap_info
>>> assert 'location' in ap_info
>>> assert 'locationList' in ap_info
>>> assert 'macAddress' in ap_info
>>> assert 'onlineClientCount' in ap_info
>>> assert 'serialId' in ap_info
>>> assert 'softwareVersion' in ap_info
>>> assert 'ssids' in ap_info
>>> assert 'status' in ap_info
>>> assert 'sysName' in ap_info
>>> assert 'type' in ap_info
"""
get_ap_info_url = "/imcrs/wlan/apInfo/queryApBasicInfoByCondition?ipAddress=" + str(ipaddress)
f_url = url + get_ap_info_url
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
if response.status_code == 200:
if len(response.text) > 0:
return json.loads(response.text)['apBasicInfo']
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + " get_ap_info_all: An Error has occured"
|
# Generated by Django 2.2.7 on 2019-11-13 12:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sitecampus', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='autor',
name='semestre',
field=models.CharField(choices=[('1', '2019/1'), ('2', '2018/2'), ('3', '2018/1'), ('4', '2017/2')], max_length=1),
),
]
|
print 'a'
print '\t\tHermit'
print 'i know , they are \'great\' '
|
# Copyright (C) 2019-2021 Ruhr West University of Applied Sciences, Bottrop, Germany
# AND Elektronische Fahrwerksysteme GmbH, Gaimersheim Germany
#
# This Source Code Form is subject to the terms of the Apache License 2.0
# If a copy of the APL2 was not distributed with this
# file, You can obtain one at https://www.apache.org/licenses/LICENSE-2.0.txt.
from typing import Tuple
from collections import OrderedDict
import numpy as np
import torch
import torch.distributions.constraints as constraints
import pyro
import pyro.distributions as dist
from netcal.scaling import AbstractLogisticRegression
class BetaCalibrationDependent(AbstractLogisticRegression):
"""
This calibration method uses a multivariate variant of a Beta distribution to obtain a
calibration mapping by means of the confidence as well as additional features. This method is originally
proposed by [1]_. This calibration scheme
tries to model several dependencies in the variables given by the input ``X``.
It is necessary to provide all data in input parameter ``X`` as an NumPy array of shape ``(n_samples, n_features)``,
whereas the confidence must be the first feature given in the input array. The ground-truth samples ``y``
must be an array of shape ``(n_samples,)`` consisting of binary labels :math:`y \\in \\{0, 1\\}`. Those
labels indicate if the according sample has matched a ground truth box :math:`\\text{m}=1` or is a false
prediction :math:`\\text{m}=0`.
**Mathematical background:** For confidence calibration in classification tasks, a
confidence mapping :math:`g` is applied on top of a miscalibrated scoring classifier :math:`\\hat{p} = h(x)` to
deliver a calibrated confidence score :math:`\\hat{q} = g(h(x))`.
For detection calibration, we can also use the additional box regression output which we denote as
:math:`\\hat{r} \\in [0, 1]^J` with :math:`J` as the number of dimensions used for the box encoding (e.g.
:math:`J=4` for x position, y position, width and height).
Therefore, the calibration map is not only a function of the confidence score, but also of :math:`\\hat{r}`.
To define a general calibration map for binary problems, we use the logistic function and the combined
input :math:`s = (\\hat{p}, \\hat{r})` of size K by
.. math::
g(s) = \\frac{1}{1 + \\exp(-z(s))} ,
According to [1]_, we can interpret the logit :math:`z` as the logarithm of the posterior odds
.. math::
z(s) = \\log \\frac{f(\\text{m}=1 | s)}{f(\\text{m}=0 | s)} \\approx
\\log \\frac{f(s | \\text{m}=1)}{f(s | \\text{m}=1)} = \\ell r(s)
For a multivariate probability density function :math:`f(s|\\text{m})`, we use a variant of the beta distribution
described in [2]_ and given by
.. math::
f(s|\\text{m}) = \\frac{1}{B(\\alpha_0, ..., \\alpha_K)}
\\frac{\\prod^K_{k=1} \\lambda_k^{\\alpha_k}(s_k^\\ast)^{\\alpha_k - 1}
\\Big(\\frac{s_k^\\ast}{s_k}\\Big)^2}
{\\Big[1 + \\sum^K_{k=1} \\lambda_k
s_k^\\ast\\Big]^{\\sum^K_{k=0} \\alpha_k}
}
with shape parameters :math:`\\alpha_k, \\beta_k > 0`, :math:`\\forall k \\in \\{0, ..., K \\}`. For notation
easyness, we denote :math:`\\lambda_k=\\frac{\\beta_k}{\\beta_0}` and :math:`s^\\ast=\\frac{s}{1-s}`.
Inserting this density function into this framework with :math:`\\alpha_k^+`, :math:`\\beta_k^+` and
:math:`\\alpha_k^-`, :math:`\\beta_k^-` as the distribution parameters for :math:`\\text{m}=1` and
:math:`\\text{m}=0`, respectively, we get a likelihood ratio of
.. math::
\\ell r(s) &= \\sum^K_{k=1} \\alpha_k^+ \\log(\\lambda_k^+) - \\alpha_k^- \\log(\\lambda_k^-) \\\\
&+ \\sum^K_{k=1} (\\alpha_k^+ - \\alpha_k^-) \\log(s^\\ast) \\\\
&+ \\sum^K_{k=0} \\alpha_k^- \\log\\Bigg[\\sum^K_{j=1} \\lambda_j^- s^\\ast_j\\Bigg] \\\\
&- \\sum^K_{k=0} \\alpha_k^+ \\log\\Bigg[\\sum^K_{j=1} \\lambda_j^+ s^\\ast_j\\Bigg] \\\\
&+ c ,
where and
:math:`c=\\log B(\\alpha_0^-, ..., \\alpha_k^-) - \\log B(\\alpha_0^+, ..., \\alpha^+_k)`.
This is optimized by an Adam optimizer with a learning rate of 1e-3 and a batch size of 256 for
1000 iterations (default).
Parameters
----------
method : str, default: "momentum"
Method that is used to obtain a calibration mapping:
- 'mle': Maximum likelihood estimate without uncertainty using a convex optimizer.
- 'momentum': MLE estimate using Momentum optimizer for non-convex optimization.
- 'variational': Variational Inference with uncertainty.
- 'mcmc': Markov-Chain Monte-Carlo sampling with uncertainty.
momentum_epochs : int, optional, default: 1000
Number of epochs used by momentum optimizer.
mcmc_steps : int, optional, default: 20
Number of weight samples obtained by MCMC sampling.
mcmc_chains : int, optional, default: 1
Number of Markov-chains used in parallel for MCMC sampling (this will result
in mcmc_steps * mcmc_chains samples).
mcmc_warmup_steps : int, optional, default: 100
Warmup steps used for MCMC sampling.
vi_epochs : int, optional, default: 1000
Number of epochs used for ELBO optimization.
independent_probabilities : bool, optional, default: False
Boolean for multi class probabilities.
If set to True, the probability estimates for each
class are treated as independent of each other (sigmoid).
use_cuda : str or bool, optional, default: False
Specify if CUDA should be used. If str, you can also specify the device
number like 'cuda:0', etc.
References
----------
.. [1] Fabian Küppers, Jan Kronenberger, Amirhossein Shantia and Anselm Haselhoff:
"Multivariate Confidence Calibration for Object Detection."
The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops.
.. [2] Libby, David L., and Melvin R. Novick:
"Multivariate generalized beta distributions with applications to utility assessment"
Journal of Educational Statistics 7.4, pp. 271-294, 1982
.. [3] Fabian Küppers, Jan Kronenberger, Jonas Schneider and Anselm Haselhoff:
"Bayesian Confidence Calibration for Epistemic Uncertainty Modelling."
2021 IEEE Intelligent Vehicles Symposium (IV), 2021
"""
def __init__(self, *args, method: str = 'momentum', **kwargs):
""" Create an instance of `BetaCalibrationDependent`. Detailed parameter description given in class docs. """
# an instance of this class is definitely of type detection
if 'detection' in kwargs and kwargs['detection'] == False:
print("WARNING: On BetaCalibrationDependent, attribute \'detection\' must be True.")
kwargs['detection'] = True
super().__init__(*args, method=method, **kwargs)
# -------------------------------------------------
@property
def intercept(self) -> float:
""" Getter for intercept of dependent beta calibration. """
if self._sites is None:
raise ValueError("Intercept is None. You have to call the method 'fit' first.")
return self._sites['bias']['values']
@property
def alphas(self) -> Tuple[np.ndarray, np.ndarray]:
""" Getter for alpha values of dependent beta calibration. """
if self._sites is None:
raise ValueError("Weights is None. You have to call the method 'fit' first.")
index_2 = self.num_features + 1
index_3 = index_2 + self.num_features + 1
weights = self._sites['weights']['values']
return weights[:index_2], weights[index_2:index_3]
@property
def betas(self) -> Tuple[np.ndarray, np.ndarray]:
""" Getter for beta values of dependent beta calibration. """
if self._sites is None:
raise ValueError("Weights is None. You have to call the method 'fit' first.")
index_1 = 2 * (self.num_features + 1)
index_2 = index_1 + self.num_features + 1
weights = self._sites['weights']['values']
return weights[index_1:index_2], weights[index_2:]
# -------------------------------------------------
def prepare(self, X: np.ndarray) -> torch.Tensor:
"""
Preprocessing of input data before called at the beginning of the fit-function.
Parameters
----------
X : np.ndarray, shape=(n_samples, [n_classes]) or (n_samples, [n_box_features])
NumPy array with confidence values for each prediction on classification with shapes
1-D for binary classification, 2-D for multi class (softmax).
On detection, this array must have 2 dimensions with number of additional box features in last dim.
Returns
-------
torch.Tensor
Prepared data vector X as torch tensor.
"""
assert self.detection, "Detection mode must be enabled for dependent beta calibration."
if len(X.shape) == 1:
X = np.reshape(X, (-1, 1))
# clip seperately due to numerical stability
data = np.clip(X, self.epsilon, 1. - self.epsilon) / np.clip((1. - X), self.epsilon, 1. - self.epsilon)
return torch.tensor(data)
def prior(self):
"""
Prior definition of the weights used for log regression. This function has to set the
variables 'self.weight_prior_dist', 'self.weight_mean_init' and 'self.weight_stddev_init'.
"""
# number of weights
num_weights = 4 * (self.num_features + 1)
self._sites = OrderedDict()
# initial values for mean, scale and prior dist
init_mean = torch.ones(num_weights).uniform_(1. + self.epsilon, 2.)
init_scale = torch.ones(num_weights)
prior = dist.Normal(init_mean, 10 * init_scale, validate_args=True)
# we have constraints on the weights to be positive
# this is usually solved by defining constraints on the MLE optimizer
# however, on MCMC and VI this is not possible
# instead, we are using a "shifted" LogNormal to obtain only positive samples
if self.method in ['variational', 'mcmc']:
# for this purpose, we need to transform the prior mean first and set the
# distribution to be a LogNormal
init_mean = torch.log(torch.exp(init_mean) - 1)
prior = dist.LogNormal(init_mean, 10 * init_scale, validate_args=True)
# set properties for "weights": weights must be positive
self._sites['weights'] = {
'values': None,
'constraint': constraints.greater_than(self.epsilon),
'init': {
'mean': init_mean,
'scale': init_scale
},
'prior': prior
}
# set properties for "bias"
self._sites['bias'] = {
'values': None,
'constraint': constraints.real,
'init': {
'mean': torch.ones(1) * self.epsilon,
'scale': torch.ones(1)
},
'prior': dist.Normal(torch.zeros(1), 10 * torch.ones(1), validate_args=True),
}
def model(self, X: torch.Tensor = None, y: torch.Tensor = None) -> torch.Tensor:
"""
Definition of the log regression model.
Parameters
----------
X : torch.Tensor, shape=(n_samples, n_log_regression_features)
Input data that has been prepared by "self.prepare" function call.
y : torch.Tensor, shape=(n_samples, [n_classes])
Torch tensor with ground truth labels.
Either as label vector (1-D) or as one-hot encoded ground truth array (2-D) (for multiclass MLE only).
Returns
-------
torch.Tensor, shape=(n_samples, [n_classes])
Logit of the log regression model.
"""
# get indices of weights
index_1 = self.num_features+1
index_2 = index_1 + self.num_features+1
index_3 = index_2 + self.num_features+1
# sample from prior - on MLE, this weight will be set as conditional
bias = pyro.sample("bias", self._sites['bias']['prior'])
weights = pyro.sample("weights", self._sites['weights']['prior'])
# on MCMC or VI, use samples obtained by a "shifted" LogNormal
# the "shifted" +1 term guarantees positive samples only
if self.method in ['variational', 'mcmc']:
weights = torch.log(weights + 1)
assert (weights >= 0).all().item() == True, "Negative weights are not allowed."
# on MCMC sampling, extreme values might occur and can cause an 'inf'
# this will result in invalid prob values - catch infs and set to log of max value
weights[torch.isinf(weights)] = torch.log(torch.tensor(torch.finfo(weights.dtype).max))
# the first dimension of the given input data is the "independent" sample dimension
with pyro.plate("data", X.shape[0]):
# clip values to range (0, inf]
alpha_pos = torch.clamp(weights[:index_1], self.epsilon, np.infty)
alpha_neg = torch.clamp(weights[index_1:index_2], self.epsilon, np.infty)
beta_pos = torch.clamp(weights[index_2:index_3], self.epsilon, np.infty)
beta_neg = torch.clamp(weights[index_3:], self.epsilon, np.infty)
# lambdas are ratio between all betas and beta_0
lambda_pos = beta_pos[1:] / beta_pos[0]
lambda_neg = beta_neg[1:] / beta_neg[0]
log_lambdas_upper = alpha_pos[1:] * torch.log(lambda_pos) - alpha_neg[1:] * torch.log(lambda_neg)
# parameter differences
differences_alpha_upper = alpha_pos[1:] - alpha_neg[1:]
log_values_upper = torch.log(X)
# calculate upper part
upper_part = torch.sum(log_lambdas_upper + (differences_alpha_upper * log_values_upper), dim=1)
# start with summation of alphas for lower part of equation
sum_alpha_pos = torch.sum(alpha_pos)
sum_alpha_neg = torch.sum(alpha_neg)
# calculate lower part
log_sum_lower_pos = torch.log(1. + torch.sum(lambda_pos * X, dim=1))
log_sum_lower_neg = torch.log(1. + torch.sum(lambda_neg * X, dim=1))
lower_part = (sum_alpha_neg * log_sum_lower_neg) - (sum_alpha_pos * log_sum_lower_pos)
# combine both parts and bias to logits
logit = torch.squeeze(bias + upper_part + lower_part)
# if MLE, (slow) sampling is not necessary. However, this is needed for 'variational' and 'mcmc'
if self.method in ['variational', 'mcmc']:
probs = torch.sigmoid(logit)
pyro.sample("obs", dist.Bernoulli(probs=probs, validate_args=True), obs=y)
return logit
|
from typing import Container
from . import LINE_NUMBER_KEY
from ._base import Filter
class SequenceLengthFilter(Filter):
def __init__(self, min_length: int):
self.min_length = min_length
def test_value(self, inp: dict) -> bool:
return inp['length'] >= self.min_length
class LineNumberFilter(Filter):
def __init__(self, line_numbers: Container[int]):
self.line_numbers = line_numbers
def test_value(self, inp: dict) -> bool:
return inp[LINE_NUMBER_KEY] in self.line_numbers
|
from argparse import Namespace
default_args = Namespace(
alpha=0.99,
arch='nba',
batch_size=256,
clip_loss_delta=1.0,
clip_norm=3.0,
clip_norm_type='global',
continuous_target_update=True,
debugging_folder='logs/',
device='cpu',
double_q=True,
dropout=0.5,
e=0.1,
evaluate=False,
exp_eps_segments='[(0, 1),(10000, 0.5),(15000,0)], 0',
experiment_type='corridor',
game='FrozenLake-v0',
gamma=0.99,
initial_lr=0.001,
initial_random_steps=10000,
layer_norm=False,
lr_annealing_steps=80000000,
lstm_size=50,
max_global_steps=80000000,
mlp_hiddens=[50],
n_emulator_runners=8,
n_emulators_per_emulator_runner=4,
n_steps=5,
optimizer='adam',
prioritized=True,
prioritized_alpha=0.4,
prioritized_beta0=0.6,
prioritized_eps=0.001,
random_start=True,
replay_buffer_size=1000000,
resume=False,
rom_path='./atari_roms',
single_life_episodes=False,
stochastic=True,
target_update_freq=10000,
target_update_tau=0.001,
use_exp_replay=True,
visualize=False,
)
custom_args = Namespace(**{**vars(default_args), **vars(Namespace(
e = 0,
alpha = 0.01,
))})
def main():
import skynet.train
skynet.train.main(custom_args)
if __name__ == '__main__':
main()
|
"""This file contains various wrappers and functions that ease the code digestion and programming in general.
:platform: linux
.. moduleauthor:: Ivan Syzonenko <is2k@mtmail.mtsu.edu>
"""
__license__ = "MIT"
__docformat__ = 'reStructuredText'
import os
import multiprocessing as mp
import hashlib
from shutil import copy2 as cp2
# import heapq
import shutil
import pickle
from typing import NoReturn
from gen_mdp import get_mdp
from gmx_wrappers import gmx_grompp, gmx_trjconv, gmx_trjcat, gmx_mdrun, gmx_mdrun_mpi, gmx_mdrun_mpi_with_sched
def get_digest(in_str: str) -> str:
"""Computes digest of the input string.
Args:
:param str in_str: typically list of seeds concatenated with _. like s_0_1_5
Returns:
:return: blake2 hash of the in_str. We use short version,
but you can use full version - slightly slower, but less chances of name collision.
:rtype: str
"""
# return hashlib.md5(in_str.encode()).hexdigest()
# if you have python older than 3.6 - use md5 or update python
return hashlib.blake2s(in_str.encode()).hexdigest()
def create_core_mapping(ncores: int = mp.cpu_count(), nseeds: int = 1) -> list:
"""Tries to map cores evenly among tasks.
Args:
:param int ncores: number of cores available
:param int nseeds: number of seeds used in current run
Returns:
:return: list of tuples, each tuple consist of (cores number, task identifier)
:rtype: list
"""
ncores = ncores if ncores > 0 else 1
nseeds = nseeds if nseeds > 0 else 1
print('I will use {} cores for {} seeds'.format(ncores, nseeds))
even = ncores // nseeds
remainder = ncores % nseeds
sched_arr = list()
if even:
cur_sched = [(even+1, i) if i < remainder else (even, i) for i in range(nseeds)]
sched_arr.append(cur_sched)
else:
seeds_range_iter = iter(range(nseeds))
tot_batches = nseeds//ncores
remainder = nseeds-tot_batches*ncores
tot_batches = tot_batches if not remainder else tot_batches+1 # if we can`t divide tasks evenly, we need one more batch
for i in range(tot_batches):
if i < tot_batches-1:
cur_sched = [(1, 0)]*ncores
else:
cur_sched = [(1, 0) if i < remainder else (0, 0) for i in range(ncores)]
free_cores = ncores - sum(i for i, j in cur_sched)
if free_cores:
cur_sched = [(j[0]+1, 0) if i < free_cores else (j[0], 0) for i, j in enumerate(cur_sched)]
sched_arr.append(cur_sched)
for i, cur_sched in enumerate(sched_arr):
for j, cornum_seed in enumerate(cur_sched):
if cornum_seed[0]:
cur_seed = next(seeds_range_iter)
sched_arr[i][j] = (cornum_seed[0], cur_seed)
print('Seed {} will be run on {} cores.'.format(cur_seed, cornum_seed[0]))
return sched_arr
def get_previous_runs_info(check_dir: str) -> list:
"""Scans direcotory for prior results and outputs the list of filenames.
Args:
:param str check_dir: directory to scan for prior trajectories
Returns:
:return: list of filenames .xtc or .gro
:rtype: list
"""
# filenames_found = os.walk(check_dir).__next__()[2]
filenames_found = [f.split("/")[-1] for f in os.listdir(check_dir)]
# filenames_found = [f.path.split("/")[-1] for f in os.scandir(check_dir)]
filenames_found_important = [f for f in filenames_found if f.split('.')[1] in ['xtc', 'gro']]
del filenames_found
print('Found files: {} with .gro and .xtc'.format(len(filenames_found_important)))
return filenames_found_important
def check_precomputed_noize(an_file: str):
"""Checks whether file with precomputed ambient noise exists.
Tries to read correct number of metrics, in case of error throws and exception
Otherwise returns dict{metric_name: noise_value}
Args:
:param str an_file: ambient noise filename to check
:param list metr_order: order of metric names (should be correct sequence)
Returns:
:return: dict{metric_name: noise_value}
:rtype: dict or None
"""
if an_file in os.walk(".").__next__()[2]:
print(an_file, ' was found. Reading... ')
with open(an_file, 'r') as f:
noize_arr = f.readlines()
try:
res_arr = [res.strip().split(' : ') for res in noize_arr]
err_node = dict()
for metr, val in res_arr:
err_node[metr.strip()] = float(val.strip())
except Exception as e:
print(e)
return None
return err_node
return None
def make_a_step(work_dir: str, cur_seed: int, seed_dirs: dict, top_file: str, ndx_file: str, seed_digest_filename: str,
old_name_digest: str, past_dir: str, ncores: int = 1) -> NoReturn:
"""Version for the case when you use one machine, for example, local computer or one remote server.
Generates the actual MD simulation by first - setting the simulation with grommp,
then using several mdruns, and finally conctatenating the result into the one file.
Args:
:param str work_dir: path to the directory where seed dirs reside
:param int cur_seed: current seed value used for MD production
:param dict seed_dirs: dict which contains physical path to
the directory where simulation with particular seed is performed
:param str top_file: .top - topology of the current conformation
:param str ndx_file: .ndx - index of the protein atoms of the current conformation
:param str seed_digest_filename: digest for a current MD simulation, used to store files in the past
:param str old_name_digest: digest for a prior MD simulation
:param str past_dir: path to the directory with prior computations
:param int ncores: number of cores to use for this task
"""
# global extra_past
old_name = os.path.join(past_dir, old_name_digest)
if not os.path.exists(old_name+'.gro'):
# old_name = os.path.join(extra_past, old_name_digest)
# if not os.path.exists(old_name + '.gro'):
raise Exception("make_a_step: did not find {} in {} ".format(old_name_digest, past_dir))
gmx_grompp(work_dir, cur_seed, top_file, old_name)
new_name = os.path.join(past_dir, seed_digest_filename)
gmx_mdrun(work_dir, cur_seed, new_name + '.gro', ncores)
gmx_trjconv(f=os.path.join(seed_dirs[cur_seed], 'md.xtc'), o='{}.xtc'.format(new_name),
n=ndx_file, s=os.path.join(seed_dirs[cur_seed], 'md.tpr'), pbc='mol', b=1)
try:
cp2(os.path.join(seed_dirs[cur_seed], 'md.edr'), '{}.edr'.format(new_name))
except:
print('Error when tried to copy energy file. Maybe you do not produce them ? Then comment this line.')
os.remove(os.path.join(seed_dirs[cur_seed], 'md.xtc'))
def make_a_step2(work_dir: str, cur_seed: int, seed_dirs: dict, top_file: str, ndx_file: str, seed_digest_filename: str,
old_name_digest: str, past_dir: str, hostname: list, ncores: int) -> NoReturn:
"""Version for the case when you use cluster and have hostnames.
Generates the actual MD simulation by first - setting the simulation with grommp,
then using several mdruns, and finally conctatenating the result into the one file.
Args:
:param str work_dir: path to the directory where seed dirs reside
:param int cur_seed: current seed value used for MD production
:param dict seed_dirs: dict which contains physical path to the directory
where simulation with particular seed is performed
:param str top_file: .top - topology of the current conformation
:param str ndx_file: .ndx - index of the protein atoms of the current conformation
:param str seed_digest_filename: digest for a current MD simulation, used to store files in the past
:param str old_name_digest: digest for a prior MD simulation
:param str past_dir: path to the directory with prior computations
:param list hostname: hostname(s) to use for MD simulation
:param int ncores: number of cores to use for this task
"""
# global extra_past
old_name = os.path.join(past_dir, old_name_digest)
if not os.path.exists(old_name + '.gro'):
# old_name = os.path.join(extra_past, old_name_digest)
# if not os.path.exists(old_name + '.gro'):
raise Exception("make_a_step2: did not find {} in {}".format(old_name_digest, past_dir))
gmx_grompp(work_dir, cur_seed, top_file, old_name)
new_name = os.path.join(past_dir, seed_digest_filename)
gmx_mdrun_mpi(work_dir, cur_seed, new_name + '.gro', hostname, ncores)
gmx_trjconv(f=os.path.join(seed_dirs[cur_seed], 'md.xtc'), o='{}.xtc'.format(new_name),
n=ndx_file, s=os.path.join(seed_dirs[cur_seed], 'md.tpr'), pbc='mol', b=1)
try:
cp2(os.path.join(seed_dirs[cur_seed], 'md.edr'), '{}.edr'.format(new_name))
except:
print('Error when tried to copy energy file. Maybe you do not produce them ? Then comment this line.')
os.remove(os.path.join(seed_dirs[cur_seed], 'md.xtc'))
def make_a_step3(work_dir: str, cur_seed: int, seed_dirs: dict, top_file: str, ndx_file: str, seed_digest_filename: str,
old_name_digest: str, past_dir: str, ncores: int, ntomp: int = 1) -> NoReturn:
"""Version for the case when you use scheduler and have many cores, but no hostnames.
Generates the actual MD simulation by first - setting the simulation with grommp,
then using several mdruns, and finally conctatenating the result into the one file.
Args:
:param str work_dir: path to the directory where seed dirs reside
:param int cur_seed: current seed value used for MD production
:param dict seed_dirs: dict which contains physical path to the directory where simulation with particular seed is performed
:param str top_file: .top - topology of the current conformation
:param str ndx_file: .ndx - index of the protein atoms of the current conformation
:param str seed_digest_filename: digest for a current MD simulation, used to store files in the past
:param str old_name_digest: digest for a prior MD simulation
:param str past_dir: path to the directory with prior computations
:param int ncores: number of cores to use for this task
:param int ntomp: number of OMP threads to use during the simulation
"""
# global extra_past
old_name = os.path.join(past_dir, old_name_digest)
if not os.path.exists(old_name + '.gro'):
# old_name = os.path.join(extra_past, old_name_digest)
# if not os.path.exists(old_name + '.gro'):
raise Exception("make_a_step3: did not find {} in {}".format(old_name_digest, past_dir))
gmx_grompp(work_dir, cur_seed, top_file, old_name)
new_name = os.path.join(past_dir, seed_digest_filename)
# gmx_mdrun_mpi(work_dir, cur_seed, new_name + '.gro', hostname, ncores)
gmx_mdrun_mpi_with_sched(work_dir, cur_seed, new_name + '.gro', ncores, ntomp)
gmx_trjconv(f=os.path.join(seed_dirs[cur_seed], 'md.xtc'), o='{}.xtc'.format(new_name),
n=ndx_file, s=os.path.join(seed_dirs[cur_seed], 'md.tpr'), pbc='mol', b=1)
try:
cp2(os.path.join(seed_dirs[cur_seed], 'md.edr'), '{}.edr'.format(new_name))
except:
print('Error when tried to copy energy file. Maybe you do not produce them ? Then comment this line.')
os.remove(os.path.join(seed_dirs[cur_seed], 'md.xtc'))
def get_seed_dirs(work_dir: str, list_with_cur_seeds: list, simulation_temp: int, sd: dict = None) -> dict:
"""Create directories with unique names for simulation with specified seeds and puts .mdp, config files for the MD simulation.
Args:
:param str work_dir: path to work directory, where all seed directories reside
:param list list_with_cur_seeds: list of seed currently used
:param int simulation_temp: simulation temperature used to generate proper .mdp file
:param dict sd: Not used anymore, but left for sime time as deprecated. sd - previous seed deers
Returns:
:return: dictionary with seed dir paths
:rtype dict
"""
if not sd:
sd = dict()
for seed in list_with_cur_seeds:
seed_dir = os.path.join(work_dir, str(seed))
sd[seed] = seed_dir
if not os.path.exists(seed_dir):
os.makedirs(seed_dir)
with open(os.path.join(sd[seed], 'md.mdp'), 'w') as f:
f.write(get_mdp(seed, simulation_temp))
return sd
def rm_seed_dirs(seed_dirs: dict) -> NoReturn:
"""Removes seed directory and all it's content
Args:
:param dict seed_dirs: dict which contains physical path to the directory where simulation with particular seed is performed
Removes old working directories to save disc space.
"""
for seed_dir in seed_dirs.values():
if os.path.exists(seed_dir):
shutil.rmtree(seed_dir, ignore_errors=True)
def get_new_seeds(old_seeds: list, seed_num: int = 4) -> list:
"""Returns next seed sequence.
Args:
:param list old_seeds: list of previous seeds
:param int seed_num: number of unique seeds in the current run
Returns:
:return: list of new seeds
:rtype list
"""
max_seeds = 64000 # change this if you want more exploration
if min(old_seeds) + seed_num > max_seeds:
return None
return [seed + seed_num for seed in old_seeds]
def trjcat_many(hashed_names: list, past_dir: str, out_name: str) -> NoReturn:
"""Concatenates many trajectories into one file.
Args:
:param list hashed_names: .xtc filenames to concatenate
:param str past_dir: path to the directory with prior computations
:param str out_name: single output filename
Returns:
Generates one file with many frames.
"""
wave = 100
tot_chunks = int((len(hashed_names) + 1) / wave)
print('wave={}, tot_chunks={}'.format(wave, tot_chunks))
gmx_trjcat(f=[os.path.join(past_dir, hashed_name) + '.xtc' for hashed_name in hashed_names[:wave]],
o='./combinded_traj.xtc', n='./prot_dir/prot.ndx', cat=True, vel=False, sort=False, overwrite=True)
for i in range(wave, len(hashed_names), wave):
os.rename('./combinded_traj.xtc', './combinded_traj_prev.xtc')
gmx_trjcat(f=[" ./combinded_traj_prev.xtc "] + [os.path.join(past_dir, hashed_name) + '.xtc' for hashed_name in hashed_names[i:i+wave]],
o='./combinded_traj.xtc',
n='./prot_dir/prot.ndx', cat=True, vel=False, sort=False, overwrite=True)
if int(i / wave) % 10 == 0:
print('{}/{} ({:.1f}%)'.format(int(i / wave), tot_chunks, 100 * int(i / wave) / tot_chunks))
if os.path.exists('./combinded_traj_prev.xtc'):
os.remove('./combinded_traj_prev.xtc')
os.rename('./combinded_traj.xtc', out_name)
def general_bak(fname: str, state: tuple) -> NoReturn:
"""Stores variables in the picke with the specific name
Args:
:param str fname: filename for the pickle
:param tuple state: variables to store
Returns:
Generates a file with pickled data.
"""
if os.path.exists(os.path.join(os.getcwd(), fname)):
try:
os.rename(os.path.join(os.getcwd(), fname), os.path.join(os.getcwd(), fname + '_prev'))
except Exception as e:
# print(e)
os.remove(os.path.join(os.getcwd(), fname))
os.rename(os.path.join(os.getcwd(), fname), os.path.join(os.getcwd(), fname + '_prev'))
with open(fname, 'wb') as f:
pickle.dump(state, f)
def general_rec(fname: str) -> tuple:
"""Reads pickle content from the file.
Args:
:param str fname: pickle filename
Returns:
:return: state from the pickle
:rtype: tuple
"""
with open(fname, 'rb') as f:
state = pickle.load(f)
return state
def main_state_backup(state: tuple) -> NoReturn:
"""Just a wrapper around the general_bak
Args:
:param tuple state: (visited_queue, open_queue, main_dict)
"""
general_bak('small.pickle', state)
def supp_state_backup(state: tuple) -> NoReturn:
"""Just a wrapper around the general_bak
Args:
:param tuple state: (tol_error, seed_list, seed_dirs, seed_change_counter, skipped_counter, cur_metric_name,
cur_metric, counter_since_seed_changed, guiding_metric, greed_mult,
best_so_far_name, best_so_far, greed_count)
"""
general_bak('big.pickle', state)
def main_state_recover() -> tuple:
"""Just a wrapper around the general_rec
Returns:
:return: state from the pickle
"""
return general_rec('small.pickle')
def supp_state_recover() -> tuple:
"""Just a wrapper around the general_rec
Returns:
:return: state from the pickle
"""
return general_rec('big.pickle')
|
"""
python3 predict.py <test_file> <output_file>
python3 predict.py "../resource/asnlib/publicdata/test_review.json" "prediction.json"
spark-submit predict.py <test_file> <output_file>
"""
import sys
import time
import json
from pyspark import SparkConf, SparkContext, StorageLevel
import support
import platform
system_type = platform.system()
if system_type == 'Linux':
print(system_type)
# for run on vocareum
import os
os.environ['PYSPARK_PYTHON'] = '/usr/local/bin/python3.6'
os.environ['PYSPARK_DRIVER_PYTHON'] = '/usr/local/bin/python3.6'
train_file = "../resource/asnlib/publicdata/train_review.json"
test_file = sys.argv[1]
model_file = "model.json"
output_file = sys.argv[2]
elif system_type == 'Darwin':
print(system_type)
# run for local macos
test_file = "file:///Users/markduan/duan/USC_course/USC_APDS/INF553/project/data/test_review.json"
output_file = "../predict/prediction_als.json"
train_file = "file:///Users/markduan/duan/USC_course/USC_APDS/INF553/project/data/train_review.json"
als_model_file = "/Users/markduan/duan/USC_course/USC_APDS/INF553/project/model/als.json"
agm_train_file = "/Users/markduan/duan/USC_course/USC_APDS/INF553/project/model/agm_train.json"
business_avg_file = "/Users/markduan/duan/USC_course/USC_APDS/INF553/project/data/business_avg.json"
else:
print('wrong system type.')
sys.exit()
# # for tuning
# N_NEIGHBORS_ITEMBASED = int(sys.argv[1])
# DEFAULT_OUTPUT = None
# WEIGHT = float(sys.argv[2])
def predictICF(corated, target_bid, model):
"""
corated - {bid: star, ...}
"""
if corated == None:
return None
corated.pop(target_bid, None)
bid_cor = list(corated.keys())
collect = []
for b in bid_cor:
pair = None
if b < target_bid:
pair = (b, target_bid)
else:
pair = (target_bid, b)
# if b == target_bid:
# print('same:', pair)
w = model.get(pair)
if w != None:
# pair may not have a value in the model
# when b == target_bid, pair have no value, too
collect.append((pair, w, b))
# else:
# collect.append((pair, 0, b))
# print(collect)
collect.sort(key=lambda x: x[1], reverse=True)
if len(collect) < N_NEIGHBORS_ITEMBASED:
return None
neighbors = collect[:N_NEIGHBORS_ITEMBASED]
sum_w = 0
n = 0
for p, w, b in neighbors:
star = corated[b]
n += star * w
sum_w += w
if sum_w == 0:
return None
else:
return n /sum_w
# predict_stars = n / sum_w
# origin_n = Decimal(str(predict_stars))
# ans_n = origin_n.quantize(Decimal('0'), rounding=ROUND_HALF_UP)
# return float(ans_n)
def loadAlsModel(model_file):
with open(model_file, 'r', encoding='utf-8') as fp:
model = json.load(fp)
user_feature = model[0]
product_feature = model[1]
als_model = support.Als()
als_model.setModel(user_feature, product_feature)
return als_model
def outputResultToFile(prediction, output_file):
with open(output_file, 'w', encoding='utf-8') as fp:
for item in prediction:
t = {
'user_id': item[0][0],
'business_id': item[0][1],
'stars': item[1]
}
fp.write(json.dumps(t))
fp.write('\n')
def dealwithNone(v, bid, business_avg):
if v != None:
return v
b_avg = business_avg.get(bid, business_avg['UNK'])
return b_avg
def normalX(x):
if x == None:
return x
if x > 5.0:
x = 5.0
elif x < 0.0:
x = 0.0
else:
x = x
return x
def predictAlsOrTrain(uid, bid, als_model, train_data):
res1 = train_data.get((uid, bid))
res2 = als_model.predict(uid, bid)
res = None
if res1 != None and res2 != None:
res = res1 * 0.3 + res2 * 0.7
elif res1 == None and res2 != None:
res = res2
elif res1 != None and res2 == None:
res = res1
else:
res = None
res = normalX(res2)
return res
def predict():
conf = SparkConf() \
.setAppName("project") \
.setMaster("local[*]") \
.set("spark.driver.memory","4g")
sc = SparkContext(conf=conf)
sc.setLogLevel("ERROR")
business_avg = support.getAvg(business_avg_file)
agm_train_l = support.readRenameTable(agm_train_file)
train_data = sc.parallelize(agm_train_l).map(lambda x: (tuple(x[0]), [x[1]])) \
.reduceByKey(lambda x, y: x + y) \
.mapValues(lambda vs: support.meanList(vs)) \
.collectAsMap()
# without none
als_model = loadAlsModel(als_model_file)
prediction = sc.textFile(test_file) \
.map(json.loads) \
.map(lambda x: (x['user_id'], x['business_id'])) \
.map(lambda x: (x, predictAlsOrTrain(x[0], x[1], als_model, train_data))) \
.map(lambda x: (x[0], dealwithNone(x[1], x[0][1], business_avg))) \
.collect()
outputResultToFile(prediction, output_file)
if __name__ == "__main__":
t_start = time.time()
predict()
t_end = time.time()
print('time: %fs' % (t_end-t_start))
|
#!/usr/bin/env python3
# coding:utf-8
# 倒推
peaches = 4 # 第 5 只猴子分完桃,起码剩 4 个桃子
while peaches < 5000: # 答案不唯一,暂且在 5000 以内搜索
tmp = peaches
for i in range(5): # 5 只猴子
tmp = tmp / 4 * 5 + 1 # 从第 5 只猴子倒推至第 1 只
if tmp % 1 == 0:
print("Sum = %d" % tmp)
break
else:
peaches += 4 # 5 只猴子分完桃,剩余桃子必是 4 的倍数
|
import bakefont3 as bf3
import bakefont3.encode
import unicodedata
from PIL import Image
import numpy as np
class Saveable:
def __init__(self, data):
self.data = data
def bytes(self):
return self.data
def save(self, filename):
with open(filename, 'wb') as fp:
fp.write(self.data)
class _default_cb:
def __init__(self):
pass
def stage(self, msg):
pass
def step(self, current, total):
pass
def info(self, msg):
pass
class pack:
def getFontID(self, name):
for index, font in enumerate(self.fonts):
if name == font[0]: return index
raise ValueError
def getModeID(self, fontmode):
name, size, antialias = fontmode
if isinstance(name, str): name = self.getFontID(name)
for index, mode in enumerate(self.modes):
if (name, size, antialias) == mode: return index
raise ValueError
def __init__(self, fonts, tasks, sizes, cb=_default_cb()):
self.data = None
self.image = None
self.size = (0, 0, 0)
"""
:param fonts: a mapping font name => font face
:param tasks: a list of (mode, charset name, charset) tuples, where
mode is a tuple (font name, size, antialias?)
:param sizes: a (possibly infinite) sequence of sizes to try
:param cb: a callback object with methods `stage(self, msg)` and
`step(self, current, total)`, `info(self, msg)`.
"""
# capture args just once if they're generated
fonts = dict(fonts)
tasks = list(tasks)
# ---------------------------------------------------------------------
cb.stage("Processing Parameters")
# ---------------------------------------------------------------------
seen = set()
for name in fonts:
assert isinstance(name, str)
# implements freetype-py interface
assert hasattr(fonts[name], "family_name")
assert fonts[name].is_scalable
if fonts[name] in seen:
print("warning: two different font names share the same font face object")
else:
seen.add(fonts[name])
del seen
for fontmode, setname, charset in tasks:
# don't clobber charset in case its a generator
assert isinstance(fontmode, tuple)
assert isinstance(setname, str)
name, size, antialias = fontmode
assert isinstance(name, str)
assert 1 < size < 255
assert name in fonts, "font mode references a missing font name"
# convert parameters for use in lookup tables
# construct a mapping font ID => (font name, font face)
fontlist = []
for name in fonts:
fontlist.append((name, fonts[name]))
self.fonts = fontlist
# construct a mapping fontmode ID => (font ID, size, antialias?)
modelist = []
for mode, _, _ in tasks:
name, size, antialias = mode
fontID = self.getFontID(name)
mode = (fontID, size, antialias)
if mode not in modelist:
modelist.append(mode)
modelist = sorted(modelist) # by ascending fontID, size
self.modes = modelist
# construct a concrete list of tasks
# (modeID, name, set(characters))
_tasks = []
seen = set()
for mode, name, charset in tasks:
modeID = self.getModeID(mode)
pair = (modeID, name)
task = (modeID, name, set(charset))
_tasks.append(task)
if pair in seen:
raise KeyError("task contains a duplicate (mode, charname) pair (%d, %s)" % (modeID, name))
else:
seen.add((modeID, name))
tasks = _tasks
# construct a mapping fontmode ID => superset of characters
# used by *all* tasks sharing that fontmode
modeChars = {}
# and a table (fontmode ID, charsetname, charset)
modeTable = []
for modeID, name, charset in tasks:
assert (modeID, name, charset) not in modeTable
modeTable.append((modeID, name, charset))
if not modeChars.get(modeID):
modeChars[modeID] = set()
modeChars[modeID] = modeChars[modeID].union(charset)
self.modeTable = modeTable
# for each modeChars charset, create a mapping codepoint => rendered glyph
# ---------------------------------------------------------------------
cb.stage("Rendering Glyphs")
# ---------------------------------------------------------------------
count = 0
numglyphs = 0
for modeID, charset in modeChars.items():
numglyphs += len(charset)
modeGlyphs = dict()
for modeID, charset in modeChars.items():
glyphset = dict()
fontID, size, antialias = self.modes[modeID]
fontname, face = self.fonts[fontID]
size_fp = int(size * 64.0) # convert to fixed point 26.6 format
dpi = 72 # typographic DPI where 1pt = 1px
face.set_char_size(size_fp, 0, dpi, 0)
for char in charset:
cb.step(count, numglyphs); count += 1
if isinstance(char, str) and len(char) == 1:
codepoint = ord(char)
elif isinstance(char, int) and 0 <= char <= 2**32:
codepoint = char
else:
raise TypeError("Invalid codepoint in charset")
if face.get_char_index(codepoint):
render = bf3.Render(face, codepoint, antialias)
glyphset[codepoint] = bf3.Glyph(codepoint, render)
else:
print("notice: font %s doesn't include codepoint %#x / %s (%s)" %
(repr(fontname), codepoint, repr(chr(codepoint)), unicodedata.name(chr(codepoint), "unknown name")))
modeGlyphs[modeID] = glyphset
self.modeGlyphs = modeGlyphs
# make a list of all glyph objects, for fitting
allGlyphs = []
for _, glyphset in modeGlyphs.items():
for _, glyph in glyphset.items():
allGlyphs.append(glyph)
self.allGlyphs = allGlyphs
# ---------------------------------------------------------------------
cb.stage("Fitting Glyphs")
# ---------------------------------------------------------------------
# sort by height for packing - good heuristic
allGlyphs.sort(key=lambda glyph: glyph.render.height, reverse=True)
# estimate area for best-case smallest area with 100% packing efficiency
minVolume = 0
for glyph in allGlyphs:
minVolume += glyph.render.width * glyph.render.height;
# attempt a fit for each size
count = 0
for size in sizes:
cb.step(0, len(allGlyphs))
width, height, depth = size
volume = width * height * depth
if minVolume > volume:
cb.info("Early discard for size %s" % repr(size))
continue # skip this size
if _fit(size, allGlyphs, cb):
self.size = size
break
else:
cb.info("No fit for size %s" % repr(size))
continue
# ---------------------------------------------------------------------
cb.stage("Composing Texture Atlas")
# ---------------------------------------------------------------------
if self.size[0]:
self.image = _image(self.size, allGlyphs)
# ---------------------------------------------------------------------
cb.stage("Generating binary")
# ---------------------------------------------------------------------
if self.size[0]:
self.data = Saveable(b''.join(bakefont3.encode.all(self, cb)))
# ---------------------------------------------------------------------
cb.stage("Done")
# ---------------------------------------------------------------------
def _fit(size, glyphs, cb):
if not glyphs: return True
width, height, depth = size
cube = bf3.Cube(0, 0, 0, width, height, depth)
spaces = bf3.TernaryTree(cube)
count = 0
num = len(glyphs)
for glyph in glyphs:
cb.step(count, num); count+=1
if glyph.render.width and glyph.render.height:
fit = spaces.fit(glyph.render)
if not fit: return False
glyph.x0 = fit.x0
glyph.y0 = fit.y0
glyph.z0 = fit.z0
glyph.x1 = fit.x0 + glyph.render.width
glyph.y1 = fit.y0 + glyph.render.height
glyph.z1 = fit.z0 + glyph.render.depth
# because we don't want people to think their image is broken,
# make sure the alpha channel has the most information
# by swapping red and alpha
if depth == 4 and glyph.z0 == 0:
glyph.z0 = 3
glyph.z1 = 4
elif depth == 4 and glyph.z0 == 3:
glyph.z0 = 0
glyph.z1 = 1
return True
def _image(size, glyphs):
width, height, depth = size
# create a greyscale image for each channel i.e. z-layer
if depth == 4:
mode = 'RGBA'
channels = [
Image.new("L", (width, height), 0),
Image.new("L", (width, height), 0),
Image.new("L", (width, height), 0),
Image.new("L", (width, height), 0),
]
elif depth == 3:
mode = 'RGB'
channels = [
Image.new("L", (width, height), 0),
Image.new("L", (width, height), 0),
Image.new("L", (width, height), 0),
]
elif depth == 1:
# greyscale
channels = [
Image.new("L", (width, height), 0),
]
else:
raise ValueError("Invalid depth for image (expected 1, 3, 4) got %d" % depth)
for g in glyphs:
if not g.render.image: continue
channels[g.z0].paste(g.render.image, (g.x0, g.y0, g.x1, g.y1))
if depth == 1:
return channels[0]
# merge into a RGB or RGBA image
# convert each channel to a numpy array
img8 = [None] * depth
for i in range(0, depth):
data = channels[i].getdata()
img = np.fromiter(data, np.uint8)
img = np.reshape(img, (channels[i].height, channels[i].width))
img8[i] = img
# merge each channel into a RGBA image
if depth == 4:
image = np.stack((img8[0], img8[1], img8[2], img8[3]), axis=-1)
elif depth == 3:
image = np.stack((img8[0], img8[1], img8[2]), axis=-1)
return Image.fromarray(image, mode=mode)
|
from nnunet.training.loss_functions.dice_loss import CrossentropyND
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
class nnUNetTrainerCE(nnUNetTrainer):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super(nnUNetTrainerCE, self).__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage,
unpack_data, deterministic, fp16)
self.loss = CrossentropyND()
|
from .agents import (DefaultGymAgent, RandomAgent, BalancedRandomAgent, MinimaxAgent, BaseMonteCarloAgent, HumanInputAgent,
DQNAgent2L_56x5, DQNAgent4L_56x5, DQNAgent2L_56x5_2_sep, DQNAgent2L_17x5_2, DQNAgent2L_17x5_2_sep,
DoubleAgent)
|
# Taken from http://www2.census.gov/geo/docs/maps-data/maps/reg_div.txt
census_regions = {
"CT": "NORTHEAST",
"ME": "NORTHEAST",
"MA": "NORTHEAST",
"NH": "NORTHEAST",
"RI": "NORTHEAST",
"VT": "NORTHEAST",
"NJ": "NORTHEAST",
"NY": "NORTHEAST",
"PA": "NORTHEAST",
"IL": "MIDWEST",
"IN": "MIDWEST",
"MI": "MIDWEST",
"OH": "MIDWEST",
"WI": "MIDWEST",
"IA": "MIDWEST",
"KS": "MIDWEST",
"MN": "MIDWEST",
"MO": "MIDWEST",
"NE": "MIDWEST",
"ND": "MIDWEST",
"SD": "MIDWEST",
"DE": "SOUTH",
"DC": "SOUTH",
"FL": "SOUTH",
"GA": "SOUTH",
"MD": "SOUTH",
"NC": "SOUTH",
"SC": "SOUTH",
"VA": "SOUTH",
"WV": "SOUTH",
"AL": "SOUTH",
"KY": "SOUTH",
"MS": "SOUTH",
"TN": "SOUTH",
"AR": "SOUTH",
"LA": "SOUTH",
"OK": "SOUTH",
"TX": "SOUTH",
"AZ": "WEST",
"CO": "WEST",
"ID": "WEST",
"MT": "WEST",
"NV": "WEST",
"NM": "WEST",
"UT": "WEST",
"WY": "WEST",
"AK": "WEST",
"CA": "WEST",
"HI": "WEST",
"OR": "WEST",
"WA": "WEST",
}
|
import numpy as np
import scipy.special as sp
from scipy.stats import norm
import matplotlib.pyplot as pl
# Gaussian Process Regression with Censored Data Using
# Expectation Propagation
# Perry Groot, Peter Lucas
l = 1.0
s = 0.2
n = 2000
f = np.linspace(0.0, 2.0, n+1)
sqrt2 = np.sqrt(2.0)
sqrtpi = np.sqrt(np.pi)
cdf_integral_all = 0.5 * sp.erf(1.0/(sqrt2 * s)) + 0.5 + s / (sqrt2 * sqrtpi) * np.exp(-1.0/(2*s**2))
cdf_integral = l * sp.erf(l/(sqrt2 * s)) + s / (sqrt2 * sqrtpi) * (np.exp(-l**2/(2*s**2)) - 1.0)
print(cdf_integral, cdf_integral_all, cdf_integral / cdf_integral_all)
fig, ax = pl.subplots()
cdf = norm.cdf((l - f) / s)
ax.plot(f, cdf)
print(np.trapz(cdf, f))
print(np.trapz(cdf[0:n//2], f[0:n//2]) / np.trapz(cdf, f))
|
"""Contains functions for doing the inverse and forward normal mode transforms.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Classes:
nm_trans: Uses matrix multiplication to do normal mode transformations.
nm_rescale: Uses matrix multiplication to do ring polymer contraction
or expansion.
nm_fft: Uses fast-Fourier transforms to do normal modes transformations.
Functions:
mk_nm_matrix: Makes a matrix to transform between the normal mode and bead
representations.
mk_rs_matrix: Makes a matrix to transform between one number of beads and
another. Higher normal modes in the case of an expansion are set to zero.
"""
__all__ = ['nm_trans', 'nm_rescale', 'nm_fft']
import numpy as np
from ipi.utils.messages import verbosity, info
def mk_nm_matrix(nbeads):
"""Gets the matrix that transforms from the bead representation
to the normal mode representation.
If we return from this function a matrix C, then we transform between the
bead and normal mode representation using q_nm = C . q_b, q_b = C.T . q_nm
Args:
nbeads: The number of beads.
"""
b2nm = np.zeros((nbeads,nbeads))
b2nm[0,:] = np.sqrt(1.0)
for j in range(nbeads):
for i in range(1, nbeads/2+1):
b2nm[i,j] = np.sqrt(2.0)*np.cos(2*np.pi*j*i/float(nbeads))
for i in range(nbeads/2+1, nbeads):
b2nm[i,j] = np.sqrt(2.0)*np.sin(2*np.pi*j*i/float(nbeads))
if (nbeads%2) == 0:
b2nm[nbeads/2,0:nbeads:2] = 1.0
b2nm[nbeads/2,1:nbeads:2] = -1.0
return b2nm/np.sqrt(nbeads)
def mk_rs_matrix(nb1, nb2):
"""Gets the matrix that transforms a path with nb1 beads into one with
nb2 beads.
If we return from this function a matrix T, then we transform between the
system with nb1 bead and the system of nb2 beads using q_2 = T . q_1
Args:
nb1: The initial number of beads.
nb2: The final number of beads.
"""
if (nb1 == nb2):
return np.identity(nb1,float)
elif (nb1 > nb2):
b1_nm = mk_nm_matrix(nb1)
nm_b2 = mk_nm_matrix(nb2).T
#builds the "reduction" matrix that picks the normal modes we want to keep
b1_b2 = np.zeros((nb2, nb1), float)
b1_b2[0,0] = 1.0
for i in range(1, nb2/2+1):
b1_b2[i,i] = 1.0
b1_b2[nb2-i, nb1-i] = 1.0
if (nb2 % 2 == 0):
#if we are contracting down to an even number of beads, then we have to
#pick just one of the last degenerate modes to match onto the single
#stiffest mode in the new path
b1_b2[nb2/2, nb1-nb2/2] = 0.0
rs_b1_b2 = np.dot(nm_b2, np.dot(b1_b2, b1_nm))
return rs_b1_b2*np.sqrt(float(nb2)/float(nb1))
else:
return mk_rs_matrix(nb2, nb1).T*(float(nb2)/float(nb1))
class nm_trans:
"""Helper class to perform beads <--> normal modes transformation.
Attributes:
_b2nm: The matrix to transform between the bead and normal mode
representations.
_nm2b: The matrix to transform between the normal mode and bead
representations.
"""
def __init__(self, nbeads):
"""Initializes nm_trans.
Args:
nbeads: The number of beads.
"""
self._b2nm = mk_nm_matrix(nbeads)
self._nm2b = self._b2nm.T
def b2nm(self, q):
"""Transforms a matrix to the normal mode representation.
Args:
q: A matrix with nbeads rows, in the bead representation.
"""
return np.dot(self._b2nm,q)
def nm2b(self, q):
"""Transforms a matrix to the bead representation.
Args:
q: A matrix with nbeads rows, in the normal mode representation.
"""
return np.dot(self._nm2b,q)
class nm_rescale:
"""Helper class to rescale a ring polymer between different number of beads.
Attributes:
_b1tob2: The matrix to transform between a ring polymer with 'nbeads1'
beads and another with 'nbeads2' beads.
_b2tob1: The matrix to transform between a ring polymer with 'nbeads2'
beads and another with 'nbeads1' beads.
"""
def __init__(self, nbeads1, nbeads2):
"""Initializes nm_rescale.
Args:
nbeads1: The initial number of beads.
nbeads2: The rescaled number of beads.
"""
self._b1tob2 = mk_rs_matrix(nbeads1,nbeads2)
self._b2tob1 = self._b1tob2.T*(float(nbeads1)/float(nbeads2))
def b1tob2(self, q):
"""Transforms a matrix from one value of beads to another.
Args:
q: A matrix with nbeads1 rows, in the bead representation.
"""
return np.dot(self._b1tob2,q)
def b2tob1(self, q):
"""Transforms a matrix from one value of beads to another.
Args:
q: A matrix with nbeads2 rows, in the bead representation.
"""
return np.dot(self._b2tob1,q)
class nm_fft:
"""Helper class to perform beads <--> normal modes transformation
using Fast Fourier transforms.
Attributes:
fft: The fast-Fourier transform function to transform between the
bead and normal mode representations.
ifft: The inverse fast-Fourier transform function to transform
between the normal mode and bead representations.
qdummy: A matrix to hold a copy of the bead positions to transform
them to the normal mode representation.
qnmdummy: A matrix to hold a copy of the normal modes to transform
them to the bead representation.
nbeads: The number of beads.
natoms: The number of atoms.
"""
def __init__(self, nbeads, natoms):
"""Initializes nm_trans.
Args:
nbeads: The number of beads.
natoms: The number of atoms.
"""
self.nbeads = nbeads
self.natoms = natoms
try:
import pyfftw
info("Import of PyFFTW successful", verbosity.medium)
self.qdummy = pyfftw.n_byte_align_empty((nbeads, 3*natoms), 16, 'float32')
self.qnmdummy = pyfftw.n_byte_align_empty((nbeads//2+1, 3*natoms), 16, 'complex64')
self.fft = pyfftw.FFTW(self.qdummy, self.qnmdummy, axes=(0,), direction='FFTW_FORWARD')
self.ifft = pyfftw.FFTW(self.qnmdummy, self.qdummy, axes=(0,), direction='FFTW_BACKWARD')
except ImportError: #Uses standard numpy fft library if nothing better
#is available
info("Import of PyFFTW unsuccessful, using NumPy library instead", verbosity.medium)
self.qdummy = np.zeros((nbeads,3*natoms), dtype='float32')
self.qnmdummy = np.zeros((nbeads//2+1,3*natoms), dtype='complex64')
def dummy_fft(self):
self.qnmdummy = np.fft.rfft(self.qdummy, axis=0)
def dummy_ifft(self):
self.qdummy = np.fft.irfft(self.qnmdummy, n=self.nbeads, axis=0)
self.fft = lambda: dummy_fft(self)
self.ifft = lambda: dummy_ifft(self)
def b2nm(self, q):
"""Transforms a matrix to the normal mode representation.
Args:
q: A matrix with nbeads rows and 3*natoms columns,
in the bead representation.
"""
if self.nbeads == 1:
return q
self.qdummy[:] = q
self.fft()
if self.nbeads == 2:
return self.qnmdummy.real/np.sqrt(self.nbeads)
nmodes = self.nbeads/2
self.qnmdummy /= np.sqrt(self.nbeads)
qnm = np.zeros(q.shape)
qnm[0,:] = self.qnmdummy[0,:].real
if self.nbeads % 2 == 0:
self.qnmdummy[1:-1,:] *= np.sqrt(2)
(qnm[1:nmodes,:], qnm[self.nbeads:nmodes:-1,:]) = (self.qnmdummy[1:-1,:].real, self.qnmdummy[1:-1,:].imag)
qnm[nmodes,:] = self.qnmdummy[nmodes,:].real
else:
self.qnmdummy[1:,:] *= np.sqrt(2)
(qnm[1:nmodes+1,:], qnm[self.nbeads:nmodes:-1,:]) = (self.qnmdummy[1:,:].real, self.qnmdummy[1:,:].imag)
return qnm
def nm2b(self, qnm):
"""Transforms a matrix to the bead representation.
Args:
qnm: A matrix with nbeads rows and 3*natoms columns,
in the normal mode representation.
"""
if self.nbeads == 1:
return qnm
if self.nbeads == 2:
self.qnmdummy[:] = qnm
self.ifft()
return self.qdummy*np.sqrt(self.nbeads)
nmodes = self.nbeads/2
odd = self.nbeads - 2*nmodes # 0 if even, 1 if odd
qnm_complex = np.zeros((nmodes+1, len(qnm[0,:])), complex)
qnm_complex[0,:] = qnm[0,:]
if not odd:
(qnm_complex[1:-1,:].real, qnm_complex[1:-1,:].imag) = (qnm[1:nmodes,:], qnm[self.nbeads:nmodes:-1,:])
qnm_complex[1:-1,:] /= np.sqrt(2)
qnm_complex[nmodes,:] = qnm[nmodes,:]
else:
(qnm_complex[1:,:].real, qnm_complex[1:,:].imag) = (qnm[1:nmodes+1,:], qnm[self.nbeads:nmodes:-1,:])
qnm_complex[1:,:] /= np.sqrt(2)
self.qnmdummy[:] = qnm_complex
self.ifft()
return self.qdummy*np.sqrt(self.nbeads)
|
### Import required modules ###
import numpy
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
#import cdms2
### Define globals ###
res='c'
area_threshold = 1.0
# Zoom out for PSA
h = 12000 #height of satellite,
lon_central = 235
lat_central = -60
### Plot the map ###
#map = Basemap(projection='spaeqd',boundinglat=-40,lon_0=180,resolution='l') # Polar Azimuthal Equidistant Projection
#map = Basemap(projection='splaea',boundinglat=-10,lon_0=90,resolution='l') # Polar Lambert Azimuthal Projection
map = Basemap(projection='nsper',lon_0=lon_central,lat_0=lat_central,satellite_height=h*1000.,resolution=res,area_thresh=area_threshold)
#plot coastlines, draw label meridians and parallels.
map.drawcoastlines()
map.drawparallels(numpy.arange(-90,90,30),labels=[1,0,0,0],color='grey',dashes=[1,3])
map.drawmeridians(numpy.arange(0,360,30),labels=[0,0,0,1],color='grey',dashes=[1,3])
## Simple plot (no data) ##
# fill continents 'coral' (with zorder=0), color wet areas 'aqua'
map.drawmapboundary(fill_color='#99ffff')
map.fillcontinents(color='#cc9966',lake_color='#99ffff')
### More complex plot ##
#
#fin = cdms2.open(,'r')
#tVar = fin(variable_list[row,col],time=(timmean[0],timmean[1]),squeeze=1)
#tVar_lon = tVar.getLongitude()[:]
#tVar_lat = tVar.getLatitude()[:]
#
#tVar = tVar - 273.16
#
## make 2-d grid of lons, lats
#lons, lats = np.meshgrid(longitudes,latitudes)
#
##set desired contour levels
#clevs = np.arange(960,1061,5)
#
## compute native x,y coordinates of grid.
#x, y = m(lons, lats)
#
#CS2 = m.contourf(x,y,slp,clevs,cmap=plt.cm.RdBu_r,animated=True)
#
#plt.show()
plt.savefig('/work/dbirving/processed/spatial_maps/Antarctica_zoomed_out.eps')
|
from app.routes.inventory import inventory
# Silence flake8 by referencing otherwise unused imports
__all__ = ['inventory']
|
from . import pkcs7, spc, oids, ctl
def guarded_ber_decode(data, asn1_spec=None):
from pyasn1.codec.ber import decoder as ber_decoder
from signify.exceptions import ParseError
from signify import _print_type
try:
result, rest = ber_decoder.decode(data, asn1Spec=asn1_spec)
except Exception as e:
raise ParseError("Error while parsing %s BER: %s" % (_print_type(asn1_spec), e))
if rest:
raise ParseError("Extra information after parsing %s BER" % _print_type(asn1_spec))
return result
def guarded_der_decode(data, asn1_spec=None):
from pyasn1.codec.der import decoder as der_decoder
from signify.exceptions import ParseError
from signify import _print_type
try:
result, rest = der_decoder.decode(data, asn1Spec=asn1_spec)
except Exception as e:
raise ParseError("Error while parsing %s DER: %s" % (_print_type(asn1_spec), e))
if rest:
raise ParseError("Extra information after parsing %s DER" % _print_type(asn1_spec))
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.