content stringlengths 5 1.05M |
|---|
from distutils.core import setup
setup(
name='pystream', # How you named your package folder (MyLib)
packages=['pystream'], # Chose the same as "name"
version='0.2', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description='Java Stream Api analogue.', # Give a short description about your library
author='Yahor Paromau', # Type in your name
author_email='yahor.paromau@gmail.com', # Type in your E-Mail
url='https://github.com/RikiTikkiTavi/PyStream-API', # Provide either the link to your github or to your website
download_url='https://github.com/RikiTikkiTavi/PyStream-API/archive/v_01.tar.gz', # I explain this later on
keywords=['stream-api', 'stream', 'java stream-api'], # Keywords that define your package best
classifiers=[
'Development Status :: 4 - Beta',
# Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3.8',
],
)
|
from typing import TypeVar, Mapping, Any, List, Generic, Type, overload
from brigadier import Command, RedirectModifier, ImmutableStringReader
from brigadier.tree import CommandNode
S = TypeVar('S')
T = TypeVar('T')
V = TypeVar('V')
# necessary for type hint linting to understand that the class exists >:(
class StringRange:
def __init__(self, start: int, end: int):
self.start = start
self.end = end
class StringRange:
def __init__(self, start: int, end: int):
self.start = start
self.end = end
@staticmethod
def at(pos: int) -> StringRange:
return StringRange(pos, pos)
@staticmethod
def between(start: int, end: int) -> StringRange:
return StringRange(start, end)
@staticmethod
def encompassing(a: StringRange, b: StringRange) -> StringRange:
return StringRange(min(a.start, b.start), max(a.end, b.end))
@overload
def get(self, reader: ImmutableStringReader) -> str:
return reader.get_string()[self.start:self.end]
def get(self, string: str) -> str:
return string[self.start:self.end]
def is_empty(self):
return self.start == self.end
def __len__(self):
return self.end - self.start
def __str__(self):
return 'StringRange{start=' + str(self.start) + ', end=' + str(self.end) + '}'
def __hash__(self):
return hash((self.start, self.end))
class ParsedArgument(Generic[S, T]): # TODO: impl
def __init__(self, start: int, end: int, result: T):
self.range = StringRange.between(start, end)
self.result = result
def __hash__(self):
return hash((self.range, self.result))
class ParsedCommandNode(Generic[S]):
pass
# necessary for type hint linting to understand that the class exists >:(
class CommandContext(Generic[S]):
pass
class CommandContext(Generic[S]):
def __init__(self, source: S, input: str, arguments: Mapping[str, ParsedArgument[S, Any]], command: Command[S],
root_node: CommandNode[S], nodes: List[ParsedCommandNode[S]], range: StringRange,
child: CommandContext[S], modifier: RedirectModifier[S], forks: bool):
self.source = source
self.input = input
self.arguments = arguments
self.command = command
self.root_node = root_node
self.nodes = nodes
self.range = range
self.child = child
self.modifier = modifier
self.forks = forks
def copy_for(self, source: S) -> CommandContext[S]:
if source == self.source:
return self
return CommandContext(self.source, self.input, self.arguments, self.command, self.root_node, self.nodes,
self.range, self.child, self.modifier, self.forks)
def get_last_child(self) -> CommandContext[S]:
result = self
while result.child is not None:
result = result.child
return result
def get_argument(self, name: str, clazz: Type[V]) -> V:
if name not in self.arguments:
raise KeyError('No such argument \'' + name + '\' exists on this command')
argument = self.arguments[name]
result = argument.result
if isinstance(result, clazz):
return result
raise TypeError('Argument ' + name + ' is defined as ' + str(type(result)) + ', not ' + str(clazz))
def has_nodes(self) -> bool:
return len(self.nodes) != 0
def __hash__(self):
result = hash(self.source)
result = 31 * result + hash(self.arguments)
result = 31 * result + hash(self.command) if self.command is not None else 0
result = 31 * result + hash(self.root_node)
result = 31 * result + hash(self.nodes)
result = 31 * result + hash(self.child) if self.child is not None else 0
return result
|
#!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook statespace_sarimax_pymc3.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Fast Bayesian estimation of SARIMAX models
# ## Introduction
#
# This notebook will show how to use fast Bayesian methods to estimate
# SARIMAX (Seasonal AutoRegressive Integrated Moving Average with eXogenous
# regressors) models. These methods can also be parallelized across multiple
# cores.
#
# Here, fast methods means a version of Hamiltonian Monte Carlo called the
# No-U-Turn Sampler (NUTS) developed by Hoffmann and Gelman: see [Hoffman,
# M. D., & Gelman, A. (2014). The No-U-Turn sampler: adaptively setting path
# lengths in Hamiltonian Monte Carlo. Journal of Machine Learning Research,
# 15(1), 1593-1623.](https://arxiv.org/abs/1111.4246). As they say, "the
# cost of HMC per independent sample from a target distribution of dimension
# $D$ is roughly $\mathcal{O}(D^{5/4})$, which stands in sharp contrast with
# the $\mathcal{O}(D^{2})$ cost of random-walk Metropolis". So for problems
# of larger dimension, the time-saving with HMC is significant. However it
# does require the gradient, or Jacobian, of the model to be provided.
#
# This notebook will combine the Python libraries
# [statsmodels](https://www.statsmodels.org/stable/index.html), which does
# econometrics, and [PyMC3](https://docs.pymc.io/), which is for Bayesian
# estimation, to perform fast Bayesian estimation of a simple SARIMAX model,
# in this case an ARMA(1, 1) model for US CPI.
#
# Note that, for simple models like AR(p), base PyMC3 is a quicker way to
# fit a model; there's an [example
# here](https://docs.pymc.io/notebooks/AR.html). The advantage of using
# statsmodels is that it gives access to methods that can solve a vast range
# of statespace models.
#
# The model we'll solve is given by
#
# $$
# y_t = \phi y_{t-1} + \varepsilon_t + \theta_1 \varepsilon_{t-1}, \qquad
# \varepsilon_t \sim N(0, \sigma^2)
# $$
#
# with 1 auto-regressive term and 1 moving average term. In statespace
# form it is written as:
#
# $$
# \begin{align}
# y_t & = \underbrace{\begin{bmatrix} 1 & \theta_1 \end{bmatrix}}_{Z}
# \underbrace{\begin{bmatrix} \alpha_{1,t} \\ \alpha_{2,t}
# \end{bmatrix}}_{\alpha_t} \\
# \begin{bmatrix} \alpha_{1,t+1} \\ \alpha_{2,t+1} \end{bmatrix} & =
# \underbrace{\begin{bmatrix}
# \phi & 0 \\
# 1 & 0 \\
# \end{bmatrix}}_{T} \begin{bmatrix} \alpha_{1,t} \\ \alpha_{2,t}
# \end{bmatrix} +
# \underbrace{\begin{bmatrix} 1 \\ 0 \end{bmatrix}}_{R}
# \underbrace{\varepsilon_{t+1}}_{\eta_t} \\
# \end{align}
# $$
#
# The code will follow these steps:
# 1. Import external dependencies
# 2. Download and plot the data on US CPI
# 3. Simple maximum likelihood estimation (MLE) as an example
# 4. Definitions of helper functions to provide tensors to the library
# doing Bayesian estimation
# 5. Bayesian estimation via NUTS
# 6. Application to US CPI series
#
# Finally, Appendix A shows how to re-use the helper functions from step
# (4) to estimate a different state space model, `UnobservedComponents`,
# using the same Bayesian methods.
# ### 1. Import external dependencies
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm
import statsmodels.api as sm
import theano
import theano.tensor as tt
from pandas.plotting import register_matplotlib_converters
from pandas_datareader.data import DataReader
plt.style.use("seaborn")
register_matplotlib_converters()
# ### 2. Download and plot the data on US CPI
#
# We'll get the data from FRED:
cpi = DataReader("CPIAUCNS", "fred", start="1971-01", end="2018-12")
cpi.index = pd.DatetimeIndex(cpi.index, freq="MS")
# Define the inflation series that we'll use in analysis
inf = np.log(cpi).resample("QS").mean().diff()[1:] * 400
inf = inf.dropna()
print(inf.head())
# Plot the series
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
ax.plot(inf.index, inf, label=r"$\Delta \log CPI$", lw=2)
ax.legend(loc="lower left")
plt.show()
# ### 3. Fit the model with maximum likelihood
#
# Statsmodels does all of the hard work of this for us - creating and
# fitting the model takes just two lines of code. The model order parameters
# correspond to auto-regressive, difference, and moving average orders
# respectively.
# Create an SARIMAX model instance - here we use it to estimate
# the parameters via MLE using the `fit` method, but we can
# also re-use it below for the Bayesian estimation
mod = sm.tsa.statespace.SARIMAX(inf, order=(1, 0, 1))
res_mle = mod.fit(disp=False)
print(res_mle.summary())
# It's a good fit. We can also get the series of one-step ahead
# predictions and plot it next to the actual data, along with a confidence
# band.
#
predict_mle = res_mle.get_prediction()
predict_mle_ci = predict_mle.conf_int()
lower = predict_mle_ci["lower CPIAUCNS"]
upper = predict_mle_ci["upper CPIAUCNS"]
# Graph
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
# Plot data points
inf.plot(ax=ax, style="-", label="Observed")
# Plot predictions
predict_mle.predicted_mean.plot(ax=ax,
style="r.",
label="One-step-ahead forecast")
ax.fill_between(predict_mle_ci.index, lower, upper, color="r", alpha=0.1)
ax.legend(loc="lower left")
plt.show()
# ### 4. Helper functions to provide tensors to the library doing Bayesian
# estimation
#
# We're almost on to the magic but there are a few preliminaries. Feel
# free to skip this section if you're not interested in the technical
# details.
# ### Technical Details
#
# PyMC3 is a Bayesian estimation library ("Probabilistic Programming in
# Python: Bayesian Modeling and Probabilistic Machine Learning with Theano")
# that is a) fast and b) optimized for Bayesian machine learning, for
# instance [Bayesian neural networks](https://docs.pymc.io/notebooks/bayesia
# n_neural_network_advi.html). To do all of this, it is built on top of a
# Theano, a library that aims to evaluate tensors very efficiently and
# provide symbolic differentiation (necessary for any kind of deep
# learning). It is the symbolic differentiation that means PyMC3 can use
# NUTS on any problem formulated within PyMC3.
#
# We are not formulating a problem directly in PyMC3; we're using
# statsmodels to specify the statespace model and solve it with the Kalman
# filter. So we need to put the plumbing of statsmodels and PyMC3 together,
# which means wrapping the statsmodels SARIMAX model object in a Theano-
# flavored wrapper before passing information to PyMC3 for estimation.
#
# Because of this, we can't use the Theano auto-differentiation directly.
# Happily, statsmodels SARIMAX objects have a method to return the Jacobian
# evaluated at the parameter values. We'll be making use of this to provide
# gradients so that we can use NUTS.
# #### Defining helper functions to translate models into a PyMC3 friendly
# form
#
# First, we'll create the Theano wrappers. They will be in the form of
# 'Ops', operation objects, that 'perform' particular tasks. They are
# initialized with a statsmodels `model` instance.
#
# Although this code may look somewhat opaque, it is generic for any state
# space model in statsmodels.
class Loglike(tt.Op):
itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, model):
self.model = model
self.score = Score(self.model)
def perform(self, node, inputs, outputs):
(theta, ) = inputs # contains the vector of parameters
llf = self.model.loglike(theta)
outputs[0][0] = np.array(llf) # output the log-likelihood
def grad(self, inputs, g):
# the method that calculates the gradients - it actually returns the
# vector-Jacobian product - g[0] is a vector of parameter values
(theta, ) = inputs # our parameters
out = [g[0] * self.score(theta)]
return out
class Score(tt.Op):
itypes = [tt.dvector]
otypes = [tt.dvector]
def __init__(self, model):
self.model = model
def perform(self, node, inputs, outputs):
(theta, ) = inputs
outputs[0][0] = self.model.score(theta)
# ### 5. Bayesian estimation with NUTS
#
# The next step is to set the parameters for the Bayesian estimation,
# specify our priors, and run it.
# Set sampling params
ndraws = 3000 # number of draws from the distribution
nburn = 600 # number of "burn-in points" (which will be discarded)
# Now for the fun part! There are three parameters to estimate: $\phi$,
# $\theta_1$, and $\sigma$. We'll use uninformative uniform priors for the
# first two, and an inverse gamma for the last one. Then we'll run the
# inference optionally using as many computer cores as I have.
# Construct an instance of the Theano wrapper defined above, which
# will allow PyMC3 to compute the likelihood and Jacobian in a way
# that it can make use of. Here we are using the same model instance
# created earlier for MLE analysis (we could also create a new model
# instance if we preferred)
loglike = Loglike(mod)
with pm.Model() as m:
# Priors
arL1 = pm.Uniform("ar.L1", -0.99, 0.99)
maL1 = pm.Uniform("ma.L1", -0.99, 0.99)
sigma2 = pm.InverseGamma("sigma2", 2, 4)
# convert variables to tensor vectors
theta = tt.as_tensor_variable([arL1, maL1, sigma2])
# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist("likelihood", loglike, observed=theta)
# Draw samples
trace = pm.sample(
ndraws,
tune=nburn,
return_inferencedata=True,
cores=1,
compute_convergence_checks=False,
)
# Note that the NUTS sampler is auto-assigned because we provided
# gradients. PyMC3 will use Metropolis or Slicing samplers if it does not
# find that gradients are available. There are an impressive number of draws
# per second for a "block box" style computation! However, note that if the
# model can be represented directly by PyMC3 (like the AR(p) models
# mentioned above), then computation can be substantially faster.
#
# Inference is complete, but are the results any good? There are a number
# of ways to check. The first is to look at the posterior distributions
# (with lines showing the MLE values):
plt.tight_layout()
# Note: the syntax here for the lines argument is required for
# PyMC3 versions >= 3.7
# For version <= 3.6 you can use lines=dict(res_mle.params) instead
_ = pm.plot_trace(
trace,
lines=[(k, {}, [v]) for k, v in dict(res_mle.params).items()],
combined=True,
figsize=(12, 12),
)
# The estimated posteriors clearly peak close to the parameters found by
# MLE. We can also see a summary of the estimated values:
pm.summary(trace)
# Here $\hat{R}$ is the Gelman-Rubin statistic. It tests for lack of
# convergence by comparing the variance between multiple chains to the
# variance within each chain. If convergence has been achieved, the between-
# chain and within-chain variances should be identical. If $\hat{R}<1.2$ for
# all model parameters, we can have some confidence that convergence has
# been reached.
#
# Additionally, the highest posterior density interval (the gap between
# the two values of HPD in the table) is small for each of the variables.
#
# ### 6. Application of Bayesian estimates of parameters
#
# We'll now re-instigate a version of the model but using the parameters
# from the Bayesian estimation, and again plot the one-step-ahead forecasts.
# Retrieve the posterior means
params = pm.summary(trace)["mean"].values
# Construct results using these posterior means as parameter values
res_bayes = mod.smooth(params)
predict_bayes = res_bayes.get_prediction()
predict_bayes_ci = predict_bayes.conf_int()
lower = predict_bayes_ci["lower CPIAUCNS"]
upper = predict_bayes_ci["upper CPIAUCNS"]
# Graph
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
# Plot data points
inf.plot(ax=ax, style="-", label="Observed")
# Plot predictions
predict_bayes.predicted_mean.plot(ax=ax,
style="r.",
label="One-step-ahead forecast")
ax.fill_between(predict_bayes_ci.index, lower, upper, color="r", alpha=0.1)
ax.legend(loc="lower left")
plt.show()
# ## Appendix A. Application to `UnobservedComponents` models
# We can reuse the `Loglike` and `Score` wrappers defined above to
# consider a different state space model. For example, we might want to
# model inflation as the combination of a random walk trend and
# autoregressive error term:
#
# $$
# \begin{aligned}
# y_t & = \mu_t + \varepsilon_t \\
# \mu_t & = \mu_{t-1} + \eta_t \\
# \varepsilon_t &= \phi \varepsilon_t + \zeta_t
# \end{aligned}
# $$
#
# This model can be constructed in Statsmodels with the
# `UnobservedComponents` class using the `rwalk` and `autoregressive`
# specifications. As before, we can fit the model using maximum likelihood
# via the `fit` method.
# Construct the model instance
mod_uc = sm.tsa.UnobservedComponents(inf, "rwalk", autoregressive=1)
# Fit the model via maximum likelihood
res_uc_mle = mod_uc.fit()
print(res_uc_mle.summary())
# As noted earlier, the Theano wrappers (`Loglike` and `Score`) that we
# created above are generic, so we can re-use essentially the same code to
# explore the model with Bayesian methods.
# Set sampling params
ndraws = 3000 # number of draws from the distribution
nburn = 600 # number of "burn-in points" (which will be discarded)
# Here we follow the same procedure as above, but now we instantiate the
# Theano wrapper `Loglike` with the UC model instance instead of the
# SARIMAX model instance
loglike_uc = Loglike(mod_uc)
with pm.Model():
# Priors
sigma2level = pm.InverseGamma("sigma2.level", 1, 1)
sigma2ar = pm.InverseGamma("sigma2.ar", 1, 1)
arL1 = pm.Uniform("ar.L1", -0.99, 0.99)
# convert variables to tensor vectors
theta_uc = tt.as_tensor_variable([sigma2level, sigma2ar, arL1])
# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist("likelihood", loglike_uc, observed=theta_uc)
# Draw samples
trace_uc = pm.sample(
ndraws,
tune=nburn,
return_inferencedata=True,
cores=1,
compute_convergence_checks=False,
)
# And as before we can plot the marginal posteriors. In contrast to the
# SARIMAX example, here the posterior modes are somewhat different from the
# MLE estimates.
plt.tight_layout()
# Note: the syntax here for the lines argument is required for
# PyMC3 versions >= 3.7
# For version <= 3.6 you can use lines=dict(res_mle.params) instead
_ = pm.plot_trace(
trace_uc,
lines=[(k, {}, [v]) for k, v in dict(res_uc_mle.params).items()],
combined=True,
figsize=(12, 12),
)
pm.summary(trace_uc)
# Retrieve the posterior means
params = pm.summary(trace_uc)["mean"].values
# Construct results using these posterior means as parameter values
res_uc_bayes = mod_uc.smooth(params)
# One benefit of this model is that it gives us an estimate of the
# underling "level" of inflation, using the smoothed estimate of $\mu_t$,
# which we can access as the "level" column in the results objects'
# `states.smoothed` attribute. In this case, because the Bayesian posterior
# mean of the level's variance is larger than the MLE estimate, its
# estimated level is a little more volatile.
# Graph
fig, ax = plt.subplots(figsize=(9, 4), dpi=300)
# Plot data points
inf["CPIAUCNS"].plot(ax=ax, style="-", label="Observed data")
# Plot estimate of the level term
res_uc_mle.states.smoothed["level"].plot(ax=ax, label="Smoothed level (MLE)")
res_uc_bayes.states.smoothed["level"].plot(ax=ax,
label="Smoothed level (Bayesian)")
ax.legend(loc="lower left")
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
import sys
import clr
import System
sys.path.append(sys.exec_prefix)
clr.AddReference("Microsoft.Scripting")
clr.AddReference("Microsoft.Dynamic")
if clr.IsNetCoreApp:
clr.AddReference("System.IO.FileSystem")
clr.AddReference("System.Runtime.Extensions")
else:
if System.Environment.Version.Major >=4:
clr.AddReference("System.Core")
else:
clr.AddReference("Microsoft.Scripting.Core")
clr.AddReference("IronPython")
clr.AddReference("IronPython.Modules")
from Microsoft.Scripting import SourceCodeKind, ErrorSink
from Microsoft.Scripting.Hosting import ScriptRuntime
from Microsoft.Scripting.Hosting.Providers import HostingHelpers
from Microsoft.Scripting.Runtime import CompilerContext
from IronPython import PythonOptions
from IronPython.Hosting import Python
from IronPython.Runtime import PythonContext, ModuleOptions, Symbols
from IronPython.Compiler import Parser, PythonCompilerOptions
from IronPython.Compiler.Ast import SuiteStatement, FunctionDefinition
from System import Type, Array, UriBuilder
from System.Reflection import Assembly
from System.IO import Directory, Path, File
#--------------------------------------------------------------------------------------
# Class that takes a file and runs it in interactive mode using the hosting APIs
class FileConsole(object):
def __init__(self, fileName):
scriptEnv = Python.CreateRuntime()
self.fileName = fileName
self.engine = scriptEnv.GetEngine("python")
self.context = HostingHelpers.GetLanguageContext(self.engine)
scriptEnv.LoadAssembly(Type.GetType("System.String").Assembly) #mscorlib.dll
scriptEnv.LoadAssembly(UriBuilder().GetType().Assembly) #System.dll
self.InitializePath()
executable = Assembly.GetEntryAssembly().Location
prefix = Path.GetDirectoryName(executable)
self.context.SystemState.executable = executable
self.context.SystemState.exec_prefix = self.context.SystemState.prefix = prefix
import imp
mod = imp.new_module('__main__')
mod.__file__ = fileName
mod.__builtins__ = sys.modules['__builtin__']
self.context.SystemState.modules['__main__'] = mod
self.mainScope = scriptEnv.CreateScope(mod.__dict__)
def InitializePath(self):
searchPath = []
currentDir = Directory.GetCurrentDirectory()
searchPath.append(currentDir)
filePathDir = Path.GetDirectoryName(Path.Combine(currentDir, self.fileName))
searchPath.append(filePathDir)
entryDir = Path.GetDirectoryName(Assembly.GetEntryAssembly().Location)
searchPath.append(entryDir)
siteDir = Path.Combine(entryDir, "Lib")
searchPath.append(siteDir)
devStdLibDir = Path.Combine(entryDir, '../../External.LCA_RESTRICTED/Languages/IronPython/27/Lib')
searchPath.append(devStdLibDir)
dllsDir = Path.Combine(entryDir, "DLLs")
if Directory.Exists(dllsDir):
searchPath.append(dllsDir)
self.engine.SetSearchPaths(Array[str](searchPath))
def CreateASTFromFile(self, fileName):
completeCode = self.engine.CreateScriptSourceFromFile(fileName)
sourceUnit = HostingHelpers.GetSourceUnit(completeCode)
cc = CompilerContext(sourceUnit, PythonCompilerOptions(), ErrorSink.Default)
parser = Parser.CreateParser(cc, PythonOptions())
return parser.ParseFile(False), sourceUnit.GetCode()
def GetCodeForStatement(self, codeText, statement):
decoratorStart, decoratorLength = -1, 0
if isinstance(statement, FunctionDefinition):
if (statement.Decorators != None and len(statement.Decorators) != 0):
decoratorStart = min([x.Start.Index for x in statement.Decorators])
decoratorLength = statement.Start.Index - decoratorStart
return codeText.Substring( statement.Start.Index if decoratorStart == -1 else decoratorStart, statement.Span.Length + decoratorLength)
def Run(self):
ast, codeText = self.CreateASTFromFile(self.fileName)
if isinstance(ast.Body, SuiteStatement):
suiteStatement = ast.Body
for statement in suiteStatement.Statements:
code = self.GetCodeForStatement(codeText, statement)
codeUnit = self.engine.CreateScriptSourceFromString(code + '\n\n', SourceCodeKind.InteractiveCode)
codeProps = codeUnit.GetCodeProperties()
codeUnit.Execute(self.mainScope)
#--------------------------------------------------------------------------------------
def run_interactive_main():
#if the commandline was invoked so: ipy run_interactive.py test_x.py then run just that one test.
testName = sys.argv[1] if len(sys.argv) > 1 else None
if testName:
testsToRun = Directory.GetFiles(Directory.GetCurrentDirectory() , testName)
else:
print "No test name provided"
sys.exit(-1)
allErrors = []
for test in testsToRun:
try:
print "\nRunning test in interactive mode - ", test
con = FileConsole(test)
con.Run()
except Exception, e:
print e, e.clsException
allErrors.append((test, sys.exc_info()[0], sys.exc_info()[1]))
if(allErrors):
print "\n##################################################################################"
print "Summary of all errors"
for file, type, message in allErrors:
print file, type, message
sys.exit(len(allErrors))
#--------------------------------------------------------------------------------------
if __name__ == "__main__":
run_interactive_main()
#-------------------------------------------------------------------------------------- |
import uuid
from django.conf import settings
from django.db import models
from django.db.models import signals
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from main_app.models import Niche
class BrandManagerProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, primary_key=True)
class Brand(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=200)
managers = models.ManyToManyField(BrandManagerProfile)
info = models.TextField()
class Campaign(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=200)
brand = models.ForeignKey(Brand, on_delete=models.CASCADE)
goals = models.TextField()
target_audience = models.TextField()
detailed_description = models.TextField()
niches = models.ManyToManyField(Niche)
budget = models.PositiveIntegerField()
submission_deadline = models.DateField(auto_now=False, auto_now_add=False,)
posted = models.DateTimeField(auto_now=False, auto_now_add=True)
|
# constants.py
# Walker M. White (wmw2)
# November 12, 2014
"""Constants for Breakout
This module global constants for the game Breakout. These constants
need to be used in the model, the view, and the controller. As these
are spread across multiple modules, we separate the constants into
their own module. This allows all modules to access them."""
import colormodel
import sys
######### WINDOW CONSTANTS (all coordinates are in pixels) #########
#: the width of the game display
GAME_WIDTH = 480
#: the height of the game display
GAME_HEIGHT = 620
######### PADDLE CONSTANTS #########
#: the width of the paddle
PADDLE_WIDTH = 58
#: the height of the paddle
PADDLE_HEIGHT = 11
#: the distance of the (bottom of the) paddle from the bottom
PADDLE_OFFSET = 30
######### BRICK CONSTANTS #########
#: the horizontal separation between bricks
BRICK_SEP_H = 5
#: the vertical separation between bricks
BRICK_SEP_V = 4
#: the height of a brick
BRICK_HEIGHT = 8
#: the offset of the top brick row from the top
BRICK_Y_OFFSET = 70
#: the number of bricks per row
BRICKS_IN_ROW = 10
#: the number of rows of bricks, in range 1..10.
BRICK_ROWS = 10
#: the width of a brick
BRICK_WIDTH = GAME_WIDTH / BRICKS_IN_ROW - BRICK_SEP_H
######### BALL CONSTANTS #########
#: the diameter of the ball in pixels
BALL_DIAMETER = 18
######### GAME CONSTANTS #########
#: the number of attempts in a game
NUMBER_TURNS = 3
#: state before the game has started
STATE_INACTIVE = 0
#: state when we are initializing a new game
STATE_NEWGAME = 1
#: state when we are counting down to the ball serve
STATE_COUNTDOWN = 2
#: state when we are waiting for user to click the mouse
STATE_PAUSED = 3
#: state when the ball is in play and being animated
STATE_ACTIVE = 4
######### COMMAND LINE ARGUMENTS TO CHANGE NUMBER OF BRICKS IN ROW #########
"""sys.argv is a list of the command line arguments when you run
python. These arguments are everything after the work python. So
if you start the game typing
python breakout.py 3 4
Python puts ['breakout.py', '3', '4'] into sys.argv. Below, we
take advantage of this fact to change the constants BRICKS_IN_ROW
and BRICK_ROWS"""
try:
if (not sys.argv is None and len(sys.argv) == 3):
bs_in_row = int(sys.argv[1])
brick_rows = int(sys.argv[2])
if (bs_in_row > 0 and brick_rows > 0):
# ALTER THE CONSTANTS
BRICKS_IN_ROW = bs_in_row
BRICK_ROWS = brick_rows
BRICK_WIDTH = GAME_WIDTH / BRICKS_IN_ROW - BRICK_SEP_H
except: # Leave the contants alone
pass
######### ADD MORE CONSTANTS (PROPERLY COMMENTED) AS NECESSARY #########
FONT_SIZE=50 # Size of Font
NUM_STATES=5 # Number of States
MESSAGE = 'Press any key to start'# Welcome Message
BRICK_COLORS= [colormodel.RED, colormodel.RED, colormodel.ORANGE,
colormodel.ORANGE, colormodel.YELLOW, colormodel.YELLOW,
colormodel.GREEN, colormodel.GREEN, colormodel.CYAN,
colormodel.CYAN] # list of colors of bricks
PADDLE_COLOR=colormodel.BLACK # paddle color
BALL_COLOR=colormodel.BLACK # Ball color
RIGHT=5 # Number to add to x attribute of paddle
LEFT=5# number to add to x attribute of paddle
BALL_RADIUS=BALL_DIAMETER/2.0 # radius of the ball
LOST='Better luck next time!' # Message when game is lost
WON='Congratulations'# Message when game is won
#STATE where game is finished
STATE_COMPLETE=5
|
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, 2017 Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from fluids import *
from fluids.numerics import assert_close, assert_close1d, assert_close2d, isclose, linspace
from math import *
from fluids.constants import *
import pytest
def test_SA_partial_sphere():
SA1 = SA_partial_sphere(1., 0.7)
assert_close(SA1, 2.199114857512855)
SA2 = SA_partial_sphere(2, 1) # One spherical head's surface area:
assert_close(SA2, 6.283185307179586)
def test_V_partial_sphere():
V1 = V_partial_sphere(1., 0.7)
assert_close(V1, 0.4105014400690663)
assert 0.0 == V_partial_sphere(1., 0.0)
def test_V_horiz_conical():
# Two examples from [1]_, and at midway, full, and empty.
Vs_horiz_conical1 = [V_horiz_conical(D=108., L=156., a=42., h=i)/231. for i in (36, 84, 54, 108, 0)]
Vs_horiz_conical1s = [2041.1923581273443, 6180.540773905826, 3648.490668241736, 7296.981336483472, 0.0]
assert_close1d(Vs_horiz_conical1, Vs_horiz_conical1s)
with pytest.raises(Exception):
V_horiz_conical(D=108., L=156., a=42., h=109)
# Head only custom example:
V_head1 = V_horiz_conical(D=108., L=156., a=42., h=84., headonly=True)/231.
V_head2 = V_horiz_conical(108., 156., 42., 84., headonly=True)/231.
assert_close1d([V_head1, V_head2], [508.8239000645628]*2)
assert V_horiz_conical(D=108., L=156., a=42., h=0, headonly=True) == 0.0
def test_V_horiz_ellipsoidal():
# Two examples from [1]_, and at midway, full, and empty.
Vs_horiz_ellipsoidal = [V_horiz_ellipsoidal(D=108., L=156., a=42., h=i)/231. for i in (36, 84, 54, 108, 0)]
Vs_horiz_ellipsoidals = [2380.9565415578145, 7103.445235921378, 4203.695769930696, 8407.391539861392, 0.0]
assert_close1d(Vs_horiz_ellipsoidal, Vs_horiz_ellipsoidals)
#Head only custom example:
V_head1 = V_horiz_ellipsoidal(D=108., L=156., a=42., h=84., headonly=True)/231.
V_head2 = V_horiz_ellipsoidal(108., 156., 42., 84., headonly=True)/231.
assert_close1d([V_head1, V_head2], [970.2761310723387]*2)
assert 0.0 == V_horiz_ellipsoidal(108., 156., 42., 0., headonly=True)
def test_V_horiz_guppy():
# Two examples from [1]_, and at midway, full, and empty.
V_calc = [V_horiz_guppy(D=108., L=156., a=42., h=i)/231. for i in (36, 84, 54, 108, 0)]
Vs = [1931.7208029476762, 5954.110515329029, 3412.8543046053724, 7296.981336483472, 0.0]
assert_close1d(V_calc, Vs)
# Head only custom example:
V_head1 = V_horiz_guppy(D=108., L=156., a=42., h=36, headonly=True)/231.
V_head2 = V_horiz_guppy(108., 156., 42., 36, headonly=True)/231.
assert_close1d([V_head1, V_head2], [63.266257496613804]*2)
assert 0.0 == V_horiz_guppy(108., 156., 42., 0.0, headonly=True)
def test_V_horiz_spherical():
# Two examples from [1]_, and at midway, full, and empty.
V_calc = [V_horiz_spherical(D=108., L=156., a=42., h=i)/231. for i in (36, 84, 54, 108, 0)]
Vs = [2303.9615116986183, 6935.163365275476, 4094.025626387197, 8188.051252774394, 0.0]
assert_close1d(V_calc, Vs)
assert 0.0 == V_horiz_spherical(D=108., L=156., a=42., h=0)
# Test z = 0 zero division error
base = V_horiz_spherical(D=108., L=156., a=54, h=36, headonly=True)
perturbed = V_horiz_spherical(D=108., L=156., a=53.999999999, h=36, headonly=True)
assert_close(base, perturbed, rtol=1e-10)
# Test while z is very very slow
perturbed = V_horiz_spherical(D=108., L=156., a=53.99999999, h=36, headonly=True)
assert_close(base, perturbed, rtol=1e-7)
# Test when the integration function is called, on its limits:
# The integral can be done analytically, but there's a zero to the power of negative integer error
# the expression is
# -cmath.atan(cmath.sqrt((R ** 2 - x ** 2) / (-R ** 2 + r ** 2))) * x ** 3 / 3 + cmath.atan(cmath.sqrt((R ** 2 - x ** 2) / (-R ** 2 + r ** 2))) * r ** 2 * x + x * (R ** 2 - r ** 2) * cmath.sqrt(0.1e1 / (R ** 2 - r ** 2) * x ** 2 - R ** 2 / (R ** 2 - r ** 2)) / 6 + R ** 2 * cmath.log(x / (R ** 2 - r ** 2) * (0.1e1 / (R ** 2 - r ** 2)) ** (-0.1e1 / 0.2e1) + cmath.sqrt(0.1e1 / (R ** 2 - r ** 2) * x ** 2 - R ** 2 / (R ** 2 - r ** 2))) * (0.1e1 / (R ** 2 - r ** 2)) ** (-0.1e1 / 0.2e1) / 6 - 0.2e1 / 0.3e1 * r ** 2 * cmath.log(x / (R ** 2 - r ** 2) * (0.1e1 / (R ** 2 - r ** 2)) ** (-0.1e1 / 0.2e1) + cmath.sqrt(0.1e1 / (R ** 2 - r ** 2) * x ** 2 - R ** 2 / (R ** 2 - r ** 2))) * (0.1e1 / (R ** 2 - r ** 2)) ** (-0.1e1 / 0.2e1) - r ** 3 * cmath.atan((-2 + 2 / (R ** 2 - r ** 2) * r * (x - r)) * (0.1e1 / (R ** 2 - r ** 2) * (x - r) ** 2 + 2 / (R ** 2 - r ** 2) * r * (x - r) - 1) ** (-0.1e1 / 0.2e1) / 2) / 3 + r ** 3 * cmath.atan((-2 - 2 / (R ** 2 - r ** 2) * r * (x + r)) * (0.1e1 / (R ** 2 - r ** 2) * (x + r) ** 2 - 2 / (R ** 2 - r ** 2) * r * (x + r) - 1) ** (-0.1e1 / 0.2e1) / 2) / 3
Vs = [V_horiz_spherical(D=108., L=156., a=i, h=84.)/231. for i in (108*.009999999, 108*.01000001)]
V_calc = [5201.54341872961, 5201.543461255985]
assert_close1d(Vs, V_calc)
# Head only custom example:
V_head1 = V_horiz_spherical(D=108., L=156., a=42., h=84., headonly=True)/231.
V_head2 = V_horiz_spherical(108., 156., 42., 84., headonly=True)/231.
assert_close1d([V_head1, V_head2], [886.1351957493874]*2)
def test_V_horiz_torispherical():
# Two examples from [1]_, and at midway, full, empty, and 1 inch; covering
# all code cases.
V_calc = [V_horiz_torispherical(D=108., L=156., f=1., k=0.06, h=i)/231. for i in [36, 84, 54, 108, 0, 1]]
Vs = [2028.626670842139, 5939.897910157917, 3534.9973314622794, 7069.994662924554, 0.0, 9.580013820942611]
assert_close1d(V_calc, Vs)
# Head only custom example:
V_head1 = V_horiz_torispherical(D=108., L=156., f=1., k=0.06, h=36, headonly=True)/231.
V_head2 = V_horiz_torispherical(108., 156., 1., 0.06, 36, headonly=True)/231.
assert_close1d([V_head1, V_head2], [111.71919144384525]*2)
assert 0.0 == V_horiz_torispherical(108., 156., 1., 0.06, 0.0)
def test_V_vertical_conical():
# Two examples from [1]_, and at empty and h=D.
Vs_calc = [V_vertical_conical(132., 33., i)/231. for i in [24, 60, 0, 132]]
Vs = [250.67461381371024, 2251.175535772343, 0.0, 6516.560761446257]
assert_close1d(Vs_calc, Vs)
assert 0.0 == V_vertical_conical(132., 33., 0.0)
def test_V_vertical_ellipsoidal():
# Two examples from [1]_, and at empty and h=D.
Vs_calc = [V_vertical_ellipsoidal(132., 33., i)/231. for i in [24, 60, 0, 132]]
Vs = [783.3581681678445, 2902.831611916969, 0.0, 7168.216837590883]
assert_close1d(Vs_calc, Vs)
assert 0.0 == V_vertical_ellipsoidal(132., 33., 0.0)
def test_V_vertical_spherical():
# Two examples from [1]_, and at empty and h=D.
Vs_calc = [V_vertical_spherical(132., 33., i)/231. for i in [24, 60, 0, 132]]
Vs = [583.6018352850442, 2658.4605833627343, 0.0, 6923.845809036648]
assert_close1d(Vs_calc, Vs)
assert 0.0 == V_vertical_spherical(132., 33., 0.0)
def test_V_vertical_torispherical():
# Two examples from [1]_, and at empty, 1, 22, and h=D.
Vs_calc = [V_vertical_torispherical(132., 1.0, 0.06, i)/231. for i in [24, 60, 0, 1, 22, 132]]
Vs = [904.0688283793511, 3036.7614412163075, 0.0, 1.7906624793188568, 785.587561468186, 7302.146666890221]
assert_close1d(Vs_calc, Vs)
assert 0.0 == V_vertical_torispherical(132., 1.0, 0.06, 0.0)
def test_V_vertical_conical_concave():
# Three examples from [1]_, and at empty and with h=D.
Vs_calc = [V_vertical_conical_concave(113., -33.0, i)/231 for i in [15., 25., 50., 0, 113]]
Vs = [251.15825565795188, 614.6068425492208, 1693.1654406426783, 0.0, 4428.278844757774]
assert_close1d(Vs_calc, Vs)
assert 0.0 == V_vertical_conical_concave(113., -33.0, 0.0)
def test_V_vertical_ellipsoidal_concave():
# Three examples from [1]_, and at empty and with h=D.
Vs_calc = [V_vertical_ellipsoidal_concave(113., -33.0, i)/231 for i in [15., 25., 50., 0, 113]]
Vs = [44.84968851034856, 207.6374468071692, 1215.605957384487, 0.0, 3950.7193614995826]
assert_close1d(Vs_calc, Vs)
assert 0.0 == V_vertical_ellipsoidal_concave(113., -33, 0.0)
def test_V_vertical_spherical_concave():
# Three examples from [1]_, and at empty and with h=D.
Vs_calc = [V_vertical_spherical_concave(113., -33.0, i)/231 for i in [15., 25., 50., 0, 113]]
Vs = [112.81405437348528, 341.7056403375114, 1372.9286894955042, 0.0, 4108.042093610599]
assert_close1d(Vs_calc, Vs)
assert 0.0 == V_vertical_spherical_concave(113., -33, 0.0)
def test_V_vertical_torispherical_concave():
# Three examples from [1]_, and at empty and with h=D.
Vs_calc = [V_vertical_torispherical_concave(D=113., f=0.71, k=0.081, h=i)/231 for i in [15., 25., 50., 0, 113]]
Vs = [103.88569287163769, 388.72142877582087, 1468.762358198084, 0.0, 4203.87576231318]
assert_close1d(Vs_calc, Vs)
assert 0.0 == V_vertical_torispherical_concave(D=113., f=0.71, k=0.081, h=0.0)
# Does not use 0 <= h < a2; and then does use it; should be the same
base = V_vertical_torispherical_concave(D=113., f=0.71, k=0.16794375443150927, h=15)
perturbed = V_vertical_torispherical_concave(D=113., f=0.71, k=0.16794375443151, h=15)
assert_close(base, perturbed, rtol=1e-14)
def test_geometry():
SA1 = SA_ellipsoidal_head(2, 1)
SA2 = SA_ellipsoidal_head(2, 0.999)
SAs = [6.283185307179586, 6.278996936093318]
assert_close1d([SA1, SA2], SAs)
SA = SA_ellipsoidal_head(2, 1.5)
assert_close(SA, 8.459109081729984, rtol=1e-12)
# Check code avoids zero division error
assert_close(SA_ellipsoidal_head(2, 1e-8), pi)
SA1 = SA_conical_head(2, 1)
SAs = 4.442882938158366
assert_close(SA1, SAs)
SA1 = SA_guppy_head(2, 1)
assert_close(SA1, 6.654000019110157)
SA1 = SA_torispheroidal(D=2.54, f=1.039370079, k=0.062362205)
assert_close(SA1, 6.00394283477063, rtol=1e-12)
SA1 = SA_tank(D=2, L=2)[0]
SA2 = SA_tank(D=1., L=0, sideA='ellipsoidal', sideA_a=2, sideB='ellipsoidal', sideB_a=2)[0]
SA3 = SA_tank(D=1., L=5, sideA='conical', sideA_a=2, sideB='conical', sideB_a=2)[0]
SA4 = SA_tank(D=1., L=5, sideA='spherical', sideA_a=0.5, sideB='spherical', sideB_a=0.5)[0]
SAs = [18.84955592153876, 10.124375616183064, 22.18452243965656, 18.84955592153876]
assert_close1d([SA1, SA2, SA3, SA4], SAs)
SA1, SA2, SA3, SA4 = SA_tank(D=2.54, L=5, sideA='torispherical', sideB='torispherical', sideA_f=1.039370079, sideA_k=0.062362205, sideB_f=1.039370079, sideB_k=0.062362205)
SAs = [51.90611237013163, 6.00394283477063, 6.00394283477063, 39.89822670059037]
assert_close1d([SA1, SA2, SA3, SA4], SAs)
SA1 = SA_tank(D=1., L=5, sideA='guppy', sideA_a=0.5, sideB='guppy', sideB_a=0.5)[0]
assert_close(SA1, 19.034963277504044)
a1 = a_torispherical(D=96., f=0.9, k=0.2)
a2 = a_torispherical(D=108., f=1., k=0.06)
ais = [25.684268924767125, 18.288462280484797]
assert_close1d([a1, a2], ais)
# Horizontal configurations, compared with TankCalc - Ellipsoidal*2,
# Ellipsoidal/None, spherical/conical, None/None. Final test is guppy/torispherical,
# no checks available.
Vs_calc = [V_from_h(h=h, D=10., L=25., horizontal=True, sideA='ellipsoidal', sideB='ellipsoidal', sideA_a=2, sideB_a=2) for h in [1, 2.5, 5, 7.5, 10]]
Vs = [108.05249928250362, 416.5904542901302, 1086.4674593664702, 1756.34446444281, 2172.9349187329403]
assert_close1d(Vs_calc, Vs)
Vs_calc =[V_from_h(h=h, D=10., L=25., horizontal=True, sideA='ellipsoidal', sideA_a=2) for h in [1, 2.5, 5, 7.5, 10]]
Vs = [105.12034613915314, 400.22799255268336, 1034.1075818066402, 1667.9871710605971, 2068.2151636132803]
assert_close1d(Vs_calc, Vs)
Vs_calc = [V_from_h(h=h, D=10., L=25., horizontal=True, sideA='spherical', sideB='conical', sideA_a=2, sideB_a=2) for h in [1, 2.5, 5, 7.5, 10]]
Vs = [104.20408244287965, 400.47607362329063, 1049.291946298991, 1698.107818974691, 2098.583892597982]
assert_close1d(Vs_calc, Vs)
Vs_calc = [V_from_h(h=h, D=10., L=25., horizontal=True, sideB='spherical', sideA='conical', sideB_a=2, sideA_a=2) for h in [1, 2.5, 5, 7.5, 10]]
assert_close1d(Vs_calc, Vs)
Vs_calc = [V_from_h(h=h, D=1.5, L=5., horizontal=True) for h in [0, 0.75, 1.5]]
Vs = [0.0, 4.417864669110647, 8.835729338221293]
assert_close1d(Vs_calc, Vs)
Vs_calc = [V_from_h(h=h, D=10., L=25., horizontal=True, sideA='guppy', sideB='torispherical', sideA_a=2, sideB_f=1., sideB_k=0.06) for h in [1, 2.5, 5, 7.5, 10]]
Vs = [104.68706323659293, 399.0285611453449, 1037.3160340613756, 1683.391972469731, 2096.854290344973]
assert_close1d(Vs_calc, Vs)
Vs_calc = [V_from_h(h=h, D=10., L=25., horizontal=True, sideB='guppy', sideA='torispherical', sideB_a=2, sideA_f=1., sideA_k=0.06) for h in [1, 2.5, 5, 7.5, 10]]
assert_close1d(Vs_calc, Vs)
with pytest.raises(Exception):
V_from_h(h=7, D=1.5, L=5)
# bad head cases
with pytest.raises(Exception):
V_from_h(h=2.6, D=10., L=25., horizontal=True, sideA='BADHEAD', sideB='torispherical', sideA_a=2, sideB_f=1., sideB_k=0.06)
with pytest.raises(Exception):
V_from_h(h=2.6, D=10., L=25., horizontal=True, sideA='torispherical', sideB='BADHEAD', sideA_a=2, sideB_f=1., sideB_k=0.06)
# Vertical configurations, compared with TankCalc - conical*2, spherical*2,
# ellipsoidal*2. Torispherical*2 has no check. None*2 checks.
Vs_calc = [V_from_h(h=h, D=1.5, L=5., horizontal=False, sideA='conical', sideB='conical', sideA_a=2., sideB_a=1.) for h in [0, 1, 2, 5., 7, 7.2, 8]]
Vs = [0.0, 0.14726215563702155, 1.1780972450961726, 6.4795348480289485, 10.013826583317465, 10.301282311120932, 10.602875205865551]
assert_close1d(Vs_calc, Vs)
Vs_calc = [V_from_h(h=h, D=8., L=10., horizontal=False, sideA='spherical', sideB='spherical', sideA_a=3., sideB_a=4.) for h in [0, 1.5, 3, 8.5, 13., 15., 16.2, 17]]
Vs = [0.0, 25.91813939211579, 89.5353906273091, 365.99554414321085, 592.190215201676, 684.3435997069765, 718.7251897078633, 726.2315017548405]
assert_close1d(Vs_calc, Vs)
Vs_calc = [V_from_h(h=h, D=8., L=10., horizontal=False, sideA='ellipsoidal', sideB='ellipsoidal', sideA_a=3., sideB_a=4.) for h in [0, 1.5, 3, 8.5, 13., 15., 16.2, 17]]
Vs = [0.0, 31.41592653589793, 100.53096491487338, 376.99111843077515, 603.1857894892403, 695.3391739945409, 729.7207639954277, 737.2270760424049]
assert_close1d(Vs_calc, Vs)
Vs_calc = [V_from_h(h=h, D=8., L=10., horizontal=False, sideA='torispherical', sideB='torispherical', sideA_a=1.3547, sideB_a=1.3547, sideA_f=1., sideA_k=0.06, sideB_f=1., sideB_k=0.06) for h in [0, 1.3, 9.3, 10.1, 10.7094, 12]]
Vs = [0.0, 38.723353379954276, 440.84578224136413, 481.0581682073135, 511.68995321687544, 573.323556832692]
assert_close1d(Vs_calc, Vs)
Vs_calc = [V_from_h(h=h, D=1.5, L=5., horizontal=False) for h in [0, 2.5, 5]]
Vs = [0, 4.417864669110647, 8.835729338221293]
assert_close1d(Vs_calc, Vs)
with pytest.raises(Exception):
V_from_h(h=7, D=1.5, L=5., horizontal=False)
def test_TANK_cross_sectional_area():
T1 = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='torispherical' ,sideB='same')
assert_close(T1.A_cross_sectional(0.5*T1.h_max), 0.25*pi*T1.D**2)
def test_from_two_specs():
# Takes about 1 ms
T0 = TANK(horizontal=True, L=15, D=3)
A_cross = T0.A_cross_sectional(1.5)
T1 = TANK.from_two_specs(T0.V_total, A_cross, spec0_name='V', spec1_name='A_cross',
h=1e-10, horizontal=False)
assert_close(T1.V_total, T0.V_total)
assert_close(T0.A_cross_sectional(1.5), T1.A_cross_sectional(1e-10))
def test_SA_partial():
# Checked with
# https://www.aqua-calc.com/calculate/volume-in-a-horizontal-cylinder
SA = SA_partial_cylindrical_body(L=120*inch, D=72*inch, h=24*inch) + 2*A_partial_circle(D=72*inch, h=24*inch)
assert_close(SA/(foot**2), (8.250207631*2+73.85756504))
# partial area of one circle
# Checked at https://www.aqua-calc.com/calculate/volume-in-a-horizontal-cylinder
assert_close(A_partial_circle(D=72, h=24), 1188.02989891)
assert_close(A_partial_circle(D=72, h=72), 0.25*pi*72**2)
assert_close(A_partial_circle(D=72, h=0), 0)
# hard checks
assert A_partial_circle(D=72, h=72*(1+1e-15)) == A_partial_circle(D=72, h=72)
assert 0 == A_partial_circle(D=72, h=1e-9)
assert 0 == A_partial_circle(D=72, h=-1e-20)
assert_close(SA_partial_cylindrical_body(L=200.0, D=96., h=22.0), 19168.852890279868, rtol=1e-12)
assert_close(SA_partial_cylindrical_body(L=200.0, D=96., h=96), pi*96*200.0, rtol=1e-15) # Pi D L check for full
assert 0 == SA_partial_cylindrical_body(L=200.0, D=96., h=0)
assert 0 == SA_partial_cylindrical_body(L=200.0, D=96., h=-1e-14)
SA_higher = SA_partial_cylindrical_body(L=200.0, D=1., h=1+1e-15)
assert_close(SA_higher, pi*200.0, rtol=1e-15)
def test_SA_partial_horiz_conical_head():
# Conical heads
As_expect = [101.35826, 141.37167, 181.38508]
hs = [24*inch, 36*inch, 48*inch]
for h, A_expect in zip(hs, As_expect):
SA = (2*SA_partial_horiz_conical_head(D=72*inch, a=48*inch, h=h)
+ SA_partial_cylindrical_body(D=72*inch, L=120*inch, h=h))
A_calc = SA/(foot**2)
assert_close(A_calc, A_expect, rtol=4e-8)
assert 0 == SA_partial_horiz_conical_head(D=72., a=48.0, h=0)
assert 0 == SA_partial_horiz_conical_head(D=72., a=48.0, h=-1e-16)
assert SA_partial_horiz_conical_head(D=72., a=48.0, h=72) == SA_partial_horiz_conical_head(D=72., a=48.0, h=72+1e-5)
assert_close(SA_partial_horiz_conical_head(D=72., a=0, h=35),
A_partial_circle(D=72, h=35), rtol=1e-12)
# Integration tests
T1 = TANK(L=120*inch, D=72*inch, horizontal=True,
sideA='conical', sideA_a=48*inch, sideB='same')
assert_close(T1.SA_from_h(24*inch)/foot**2, 101.35826, rtol=1e-7)
assert_close(T1.SA_from_h(36*inch)/foot**2, 141.37167, rtol=1e-7)
assert_close(T1.SA_from_h(48*inch)/foot**2, 181.38508, rtol=1e-7)
assert T1.SA_from_h(0) == 0.0
assert_close(T1.SA_from_h(T1.h_max), T1.A, rtol=1e-14)
def test_SA_partial_horiz_spherical_head():
L = 120*inch
D = 72*inch
a_values = [24*inch]*3 + [36*inch]*3
h_values = [24*inch, 36*inch, 48*inch]*2
SA_expect = [99.49977, 135.08848, 170.67720, 111.55668, 150.79645, 190.03622]
SA_expect = [i*foot**2 for i in SA_expect]
for i in range(6):
SA = (2*SA_partial_horiz_spherical_head(D=D, a=a_values[i], h=h_values[i])
+ SA_partial_cylindrical_body(D=D, L=L, h=h_values[i]))
assert_close(SA, SA_expect[i], rtol=4e-8)
# numerical integral, expect 1e-7 tol from the code
SA_calc = SA_partial_horiz_spherical_head(D=72., a=48.0, h=24.0)
assert_close(SA_calc, 2027.2672091672684, rtol=1e-7)
assert 0 == SA_partial_horiz_spherical_head(D=72., a=48.0, h=1e-20)
assert 0 == SA_partial_horiz_spherical_head(D=72., a=48.0, h=-1e-12)
assert SA_partial_horiz_spherical_head(D=72., a=48.0, h=7200) == SA_partial_horiz_spherical_head(D=72., a=48.0, h=72)
assert_close(SA_partial_horiz_spherical_head(D=72., a=36+1e-11, h=22),
SA_partial_horiz_spherical_head(D=72., a=36, h=22), rtol=1e-8)
# Integration tests
T1 = TANK(L=120*inch, D=72*inch, horizontal=True,
sideA='spherical', sideA_a=24*inch, sideB='same')
assert_close(T1.SA_from_h(24*inch)/foot**2, 99.49977, rtol=1e-7)
assert_close(T1.SA_from_h(36*inch)/foot**2, 135.08848, rtol=1e-7)
assert_close(T1.SA_from_h(48*inch)/foot**2, 170.67720, rtol=1e-7)
assert 0.0 == T1.SA_from_h(0)
# Numerical integral here too
assert_close(T1.SA_from_h(T1.h_max), T1.A, rtol=1e-7)
T2 = TANK(L=120*inch, D=72*inch, horizontal=True,
sideA='spherical', sideA_a=36*inch, sideB='same')
assert_close(T2.SA_from_h(24*inch)/foot**2, 111.55668, rtol=1e-7)
assert_close(T2.SA_from_h(36*inch)/foot**2, 150.79645, rtol=1e-7)
assert_close(T2.SA_from_h(48*inch)/foot**2, 190.03622, rtol=1e-7)
assert 0.0 == T2.SA_from_h(0)
assert_close(T2.SA_from_h(T2.h_max), T2.A, rtol=2e-12)
def test_SA_partial_horiz_guppy_head():
L = 120*inch
D = 72*inch
h_values = [24*inch, 36*inch, 48*inch]
SA_expect = [94.24500, 129.98330, 167.06207]
SA_expect = [i*foot**2 for i in SA_expect]
for i in range(3):
SA = (2*SA_partial_horiz_guppy_head(D=D, a=48*inch, h=h_values[i])
+ SA_partial_cylindrical_body(D=D, L=L, h=h_values[i]))
assert_close(SA, SA_expect[i], rtol=5e-8)
assert 0 == SA_partial_horiz_guppy_head(D=72., a=48.0, h=1e-20)
assert 0 == SA_partial_horiz_guppy_head(D=72., a=48.0, h=-1e-12)
assert SA_partial_horiz_guppy_head(D=72., a=48.0, h=7200) == SA_partial_horiz_guppy_head(D=72., a=48.0, h=72)
assert_close(SA_partial_horiz_guppy_head(D=72., a=48.0, h=24.0), 1467.8949780037, rtol=1e-8)
assert pi*72*inch/2*72*inch == SA_partial_horiz_guppy_head(D=72*inch, a=36*inch, h=72*inch)
# Area is NOT CONSISTENT!
T1 = TANK(L=120*inch, D=72*inch, horizontal=True,
sideA='guppy', sideA_a=48*inch, sideB='same')
assert_close(T1.SA_from_h(24*inch)/foot**2, 94.24500, rtol=1e-7)
assert_close(T1.SA_from_h(36*inch)/foot**2, 129.98330, rtol=1e-7)
assert_close(T1.SA_from_h(48*inch)/foot**2, 167.06207, rtol=1e-7)
assert 0.0 == T1.SA_from_h(0)
# assert_close(T1.SA_from_h(T1.h_max), T1.A, rtol=1e-12)
def test_SA_partial_horiz_ellipsoidal_head():
L = 120*inch
D = 72*inch
h_values = [24*inch, 36*inch, 48*inch]*3
SA_expect = [102.59905, 138.74815, 174.89725,
111.55668, 150.79645, 190.03622,
121.09692, 163.71486, 206.33279]
SA_expect = [i*foot**2 for i in SA_expect]
a_values = [24*inch]*3 + [36*inch]*3 + [48*inch]*3
for i in range(9):
SA = (2*SA_partial_horiz_ellipsoidal_head(D=D, a=a_values[i], h=h_values[i])
+ SA_partial_cylindrical_body(D=D, L=L, h=h_values[i]))
assert_close(SA, SA_expect[i], rtol=5e-8)
assert 0 == SA_partial_horiz_ellipsoidal_head(D=72., a=48.0, h=1e-20)
assert 0 == SA_partial_horiz_ellipsoidal_head(D=72., a=48.0, h=-1e-12)
assert SA_partial_horiz_ellipsoidal_head(D=72., a=48.0, h=7200) == SA_partial_horiz_ellipsoidal_head(D=72., a=48.0, h=72)
assert_close(SA_partial_horiz_ellipsoidal_head(D=72., a=48.0, h=24.0), 3401.2336225352738, rtol=1e-11)
# Integration tests
T1 = TANK(L=120*inch, D=72*inch, horizontal=True,
sideA='ellipsoidal', sideA_a=24*inch, sideB='same')
assert_close(T1.SA_from_h(24*inch)/foot**2, 102.59905, rtol=1e-7)
assert_close(T1.SA_from_h(36*inch)/foot**2, 138.74815, rtol=1e-7)
assert_close(T1.SA_from_h(48*inch)/foot**2, 174.89725, rtol=1e-7)
assert_close(T1.SA_from_h(T1.h_max), T1.A, rtol=1e-12)
assert 0.0 == T1.SA_from_h(0)
T2 = TANK(L=120*inch, D=72*inch, horizontal=True,
sideA='ellipsoidal', sideA_a=36*inch, sideB='same')
assert_close(T2.SA_from_h(24*inch)/foot**2, 111.55668, rtol=1e-7)
assert_close(T2.SA_from_h(36*inch)/foot**2, 150.79645, rtol=1e-7)
assert_close(T2.SA_from_h(48*inch)/foot**2, 190.03622, rtol=1e-7)
assert 0.0 == T2.SA_from_h(0)
assert_close(T2.SA_from_h(T2.h_max), T2.A, rtol=1e-12)
T3 = TANK(L=120*inch, D=72*inch, horizontal=True,
sideA='ellipsoidal', sideA_a=48*inch, sideB='same')
assert_close(T3.SA_from_h(24*inch)/foot**2, 121.09692, rtol=1e-7)
assert_close(T3.SA_from_h(36*inch)/foot**2, 163.71486, rtol=1e-7)
assert_close(T3.SA_from_h(48*inch)/foot**2, 206.33279, rtol=1e-7)
assert 0.0 == T3.SA_from_h(0)
assert_close(T3.SA_from_h(T3.h_max), T3.A, rtol=1e-12)
def test_SA_partial_horiz_torispherical_head():
# Nasty Python-2 only numerical issue in _SA_partial_horiz_torispherical_head_int_1 ; fixed
# by ensuring numbers were complex
assert_close(SA_partial_horiz_torispherical_head(D=1.8288, f=1.0, k=0.06, h=0.6095999999999999), 0.9491605631461236)
# Python 2 issue with trig due to my own mistake
assert_close(SA_partial_horiz_torispherical_head(D=1.8288, f=0.9, k=0.1, h=0.6095999999999999),
1.037030313486593, rtol=1e-6)
L = 120*inch
D = 72*inch
h_values = [2.28*inch, 24*inch, 36*inch, 48*inch, 69.72*inch]
h_values += [3*inch, 24*inch, 36*inch, 48*inch, 69*inch]
SA_expect = [22.74924, 94.29092, 127.74876,
161.20660, 232.74828,
26.82339, 96.18257, 130.22802,
164.27347, 233.63265]
SA_expect = [i*foot**2 for i in SA_expect]
k_values = [.06]*5 + [.1]*5
f_values = [1.0]*5 + [.9]*5
for i in range(9):
SA = (2*SA_partial_horiz_torispherical_head(D=D, f=f_values[i], k=k_values[i], h=h_values[i])
+ SA_partial_cylindrical_body(D=D, L=L, h=h_values[i]))
assert_close(SA, SA_expect[i], rtol=2e-7)
# Precision points for the three regimes
SA = SA_partial_horiz_torispherical_head(D=72., f=1, k=.06, h=2)
assert_close(SA, 80.54614956735351, rtol=1e-7)
# Only have 1e-7 tolerance here due to numerical itnegration
SA = SA_partial_horiz_torispherical_head(D=72., f=1, k=.06, h=20)
assert_close(SA, 1171.9138610357936, rtol=1e-7)
SA = SA_partial_horiz_torispherical_head(D=72., f=1, k=.06, h=71)
assert_close(SA, 4784.441787378645, rtol=1e-7)
# Error handling
# Was a bug computing this
SA_partial_horiz_torispherical_head(D=72., f=1, k=.06, h=1e-20)
assert 0 == SA_partial_horiz_torispherical_head(D=72., f=1, k=.06, h=0)
assert 0 == SA_partial_horiz_torispherical_head(D=72., f=1, k=.06, h=-1e-12)
assert SA_partial_horiz_torispherical_head(D=72., f=1, k=.06, h=7200) == SA_partial_horiz_torispherical_head(D=72., f=1, k=.06, h=72)
# Check G returns a real number
assert_close(SA_partial_horiz_torispherical_head(D=72., f=1, k=.06, h=1e-13), 3.859157404406146e-12, rtol=.1)
# Torispherical tests
T1 = TANK(L=120*inch, D=72*inch, horizontal=True,
sideA='torispherical', sideA_f=1, sideA_k=.06, sideB='same')
assert_close(T1.SA_from_h(2.28*inch)/foot**2, 22.74924, rtol=1e-7)
assert_close(T1.SA_from_h(24*inch)/foot**2, 94.29092, rtol=1e-7)
assert_close(T1.SA_from_h(36*inch)/foot**2, 127.74876, rtol=1e-7)
assert_close(T1.SA_from_h(48*inch)/foot**2, 161.20660, rtol=1e-7)
assert_close(T1.SA_from_h(69.72*inch)/foot**2, 232.74828, rtol=1e-7)
assert 0.0 == T1.SA_from_h(0)
assert_close(T1.SA_from_h(T1.h_max), T1.A, rtol=1e-12)
T2 = TANK(L=120*inch, D=72*inch, horizontal=True,
sideA='torispherical', sideA_f=.9, sideA_k=.1, sideB='same')
assert_close(T2.SA_from_h(3*inch)/foot**2, 26.82339, rtol=2e-7)
assert_close(T2.SA_from_h(24*inch)/foot**2, 96.18257, rtol=1e-7)
assert_close(T2.SA_from_h(36*inch)/foot**2, 130.22802, rtol=1e-7)
assert_close(T2.SA_from_h(48*inch)/foot**2, 164.27347, rtol=1e-7)
assert_close(T2.SA_from_h(69*inch)/foot**2, 233.63265, rtol=1e-7)
assert 0.0 == T2.SA_from_h(0)
assert_close(T2.SA_from_h(T2.h_max), T2.A, rtol=1e-12)
def test_SA_vert_flat_got_area():
T_actually_flat = TANK(L=120, D=72, horizontal=False)
assert_close(T_actually_flat.SA_from_h(0), 4071.5040790523717, rtol=1e-15)
assert_close(T_actually_flat.SA_from_h(T_actually_flat.h_max), 35286.36868512056, rtol=1e-15)
T_actually_flat2 = TANK(L=1e-100, D=72, horizontal=False)
assert_close(T_actually_flat2.SA_from_h(0), 4071.5040790523717, rtol=1e-15)
assert_close(T_actually_flat2.SA_from_h(T_actually_flat2.h_max), 2*4071.5040790523717, rtol=1e-15)
def test_SA_partial_vertical_conical_head():
SA = SA_partial_vertical_conical_head(D=72., a=48.0, h=24.0)
assert_close(SA, 1696.4600329384882)
# Integration tests
T1 = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='conical', sideA_a=36*inch, sideB='same')
assert_close(T1.SA_from_h(36*inch)/foot**2, 39.98595)
assert_close(T1.SA_from_h(0)/foot**2, 0)
assert_close(T1.SA_from_h(-1e-14)/foot**2, 0)
T2 = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='conical', sideA_a=48*inch, sideB='same')
assert_close(T2.SA_from_h(24*inch)/foot**2, 11.78097, rtol=3e-7)
assert_close(T2.SA_from_h(36*inch)/foot**2, 26.50719, rtol=1e-7)
assert_close(T2.SA_from_h(48*inch)/foot**2, 47.12389, rtol=1e-7)
assert_close(T2.SA_from_h(60*inch)/foot**2, 65.97345, rtol=1e-7)
assert_close(T2.SA_from_h(72*inch)/foot**2, 84.82300, rtol=1e-7)
assert_close(T2.SA_from_h(0)/foot**2,0, rtol=1e-7)
assert_close(T2.SA_from_h(-1e-14)/foot**2,0, rtol=1e-7)
assert_close(T2.SA_from_h(T2.h_max), 26.26771571641428, rtol=1e-12)
assert_close(T2.SA_from_h(T2.h_max*.95), 26.046081865057033, rtol=1e-12)
T_flat = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='conical', sideA_a=0, sideB='same')
T_actually_flat = TANK(L=120*inch, D=72*inch, horizontal=False)
for h in (0, T_flat.h_max*.1, T_flat.h_max*.4, T_flat.h_max*.9, T_flat.h_max):
assert_close(T_flat.SA_from_h(h), T_actually_flat.SA_from_h(h), rtol=1e-11)
def test_SA_partial_vertical_spherical_head():
SA = SA_partial_vertical_spherical_head(72, a=24, h=12)
assert_close(SA, 2940.5307237600464)
# Make sure we cover zeros, avoid the zero division
assert_close(SA_partial_vertical_spherical_head(D=1, a=0.0, h=1e-100), 0.7853981633974483, rtol=1e-12)
# Integration tests
T1 = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='spherical', sideA_a=24*inch, sideB='same')
assert_close(T1.SA_from_h(12*inch)/foot**2, 20.42035, rtol=2e-7)
assert_close(T1.SA_from_h(24*inch)/foot**2, 40.84070, rtol=2e-7)
assert_close(T1.SA_from_h(48*inch)/foot**2, 78.53982, rtol=2e-7)
T2 = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='spherical', sideA_a=36*inch, sideB='same')
assert_close(T2.SA_from_h(18*inch)/foot**2, 28.27433, rtol=2e-7)
assert_close(T2.SA_from_h(36*inch)/foot**2, 56.54867, rtol=2e-7)
assert_close(T2.SA_from_h(60*inch)/foot**2, 94.24778, rtol=2e-7)
assert_close(T2.SA_from_h(T2.h_max), 28.01889676417523, rtol=1e-10)
assert 0 == T2.SA_from_h(0)
assert 0 == T2.SA_from_h(-1e-12)
# T2.SA_from_h(1e-320) # works :)
T_flat = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='spherical', sideA_a=0, sideB='same')
T_actually_flat = TANK(L=120*inch, D=72*inch, horizontal=False)
for h in (0, T_flat.h_max*.1, T_flat.h_max*.4, T_flat.h_max*.9, T_flat.h_max):
assert_close(T_flat.SA_from_h(h), T_actually_flat.SA_from_h(h), rtol=1e-15)
def test_SA_partial_vertical_torispherical_head():
assert_close(SA_partial_vertical_torispherical_head(D=1.8288, f=1, k=.06, h=0.2127198169675985*(1-1e-12)),
SA_partial_vertical_torispherical_head(D=1.8288, f=1, k=.06, h=0.2127198169675985*(1+1e-12)), rtol=1e-9)
assert_close(SA_partial_vertical_torispherical_head(D=1.8288, f=1, k=.06, h=.2), 2.2981378579540053, rtol=1e-12)
assert_close(SA_partial_vertical_torispherical_head(D=1.8288, f=1, k=.06, h=.3), 3.056637737809865, rtol=1e-12)
assert 0 == SA_partial_vertical_torispherical_head(D=72*inch, f=1, k=.06, h=0)
assert 0 == SA_partial_vertical_torispherical_head(D=72*inch, f=1, k=.06, h=-1e-16)
# Integration tests
T1 = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='torispherical', sideA_f=1, sideA_k=.06, sideB='same')
assert_close(T1.SA_from_h(2*inch)/foot**2, 6.28319, rtol=1e-6)
assert_close(T1.SA_from_h(6*inch)/foot**2, 18.84956, rtol=2.5e-7)
assert_close(T1.SA_from_h(12*inch)/foot**2, 33.19882, rtol=1e-7)
assert_close(T1.SA_from_h(T1.sideA_a)/foot**2, 33.50098, rtol=1e-7)
assert_close(T1.SA_from_h(36.19231*inch)/foot**2, 71.20010, rtol=1e-7)
T2 = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='torispherical', sideA_f=.9, sideA_k=.1, sideB='same')
assert_close(T2.SA_from_h(6*inch)/foot**2, 16.96460, rtol=2e-7)
assert_close(T2.SA_from_h(8*inch)/foot**2, 22.61947, rtol=2e-7)
assert_close(T2.SA_from_h(12*inch)/foot**2, 31.28983, rtol=2e-7)
assert_close(T2.SA_from_h(14.91694*inch)/foot**2, 35.98024, rtol=2e-7)
assert_close(T2.SA_from_h(38.91694*inch)/foot**2, 73.67936, rtol=2e-7)
assert_close(T2.SA_from_h(1), 6.911163458435757, rtol=1e-12)
assert_close(T2.SA_from_h(T2.h_max), 24.197157590255674, rtol=1e-12)
assert_close(T2.SA_from_h(T2.h_max*.9642342), 22.789489525238952, rtol=1e-12)
assert T2.SA_from_h(0) == 0
assert T2.SA_from_h(-1e-12) == 0
# Cannot do flat tests with torispherical, it does not support it
def test_SA_partial_vertical_ellipsoidal_head():
SA = SA_partial_vertical_ellipsoidal_head(D=72., a=48.0, h=24.0)
assert_close(SA, 4675.237891376319, rtol=1e-12)
# Integration tests
T1 = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='ellipsoidal', sideA_a=24*inch, sideB='same')
assert_close(T1.SA_from_h(12*inch)/foot**2, 24.71061)
assert_close(T1.SA_from_h(24*inch)/foot**2, 44.50037)
assert_close(T1.SA_from_h(48*inch)/foot**2, 82.19948)
T2 = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='ellipsoidal', sideA_a=36*inch, sideB='same')
assert_close(T2.SA_from_h(18*inch)/foot**2, 28.27433, rtol=1.5e-7)
assert_close(T2.SA_from_h(36*inch)/foot**2, 56.54867, rtol=1e-7)
assert_close(T2.SA_from_h(60*inch)/foot**2, 94.24778, rtol=1e-7)
T3 = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='ellipsoidal', sideA_a=48*inch, sideB='same')
assert_close(T3.SA_from_h(24*inch)/foot**2, 32.46693, rtol=1e-7)
assert_close(T3.SA_from_h(48*inch)/foot**2, 69.46708, rtol=1e-7)
assert_close(T3.SA_from_h(72*inch)/foot**2, 107.16619, rtol=1e-7)
assert_close(T3.SA_from_h(T3.h_max), 30.419215944509517, rtol=1e-12)
assert T3.SA_from_h(0) == 0
assert T3.SA_from_h(-1e-13) == 0
# Flat test case
T_flat = TANK(L=120*inch, D=72*inch, horizontal=False, sideA='ellipsoidal', sideA_a=0, sideB='same')
T_actually_flat = TANK(L=120*inch, D=72*inch, horizontal=False)
for h in (0, T_flat.h_max*.1, T_flat.h_max*.4, T_flat.h_max*.9, T_flat.h_max):
assert_close(T_flat.SA_from_h(h), T_actually_flat.SA_from_h(h), rtol=1e-11)
# Numerical issue - unsolved, when `a` approaches `R`. The switch is smooth if we zoom out but not if we are close
low = SA_partial_vertical_ellipsoidal_head(72*inch, a=36.*inch*(1+1e-9), h=18*inch)
high = SA_partial_vertical_ellipsoidal_head(72*inch, a=36.*inch*(1-1e-9), h=18*inch)
assert_close(low, high, rtol=1e-8)
# The issue has been identified to be occuring in the just-above 36 code
with pytest.raises(Exception):
low = SA_partial_vertical_ellipsoidal_head(72*inch, a=36.*inch*(1+1e-12), h=18*inch)
high = SA_partial_vertical_ellipsoidal_head(72*inch, a=36.*inch*(1-1e-12), h=18*inch)
assert_close(low, high, rtol=1e-8)
assert_close(0.25*pi*72**2*inch**2, SA_partial_vertical_ellipsoidal_head(72*inch, a=0.0, h=1e-20))
def test_SA_from_h_basics():
# Bad side names
with pytest.raises(ValueError):
SA_from_h(h=7, D=1.5, L=5., horizontal=False, sideA='conical', sideB='NOTASIDE', sideA_a=2., sideB_a=1.)
with pytest.raises(ValueError):
SA_from_h(h=7, D=1.5, L=5., horizontal=False, sideB='conical', sideA='NOTASIDE', sideA_a=2., sideB_a=1.)
# height above tank height
with pytest.raises(ValueError):
SA_from_h(h=70, D=1.5, L=5., horizontal=False, sideA='conical', sideB='conical', sideA_a=2., sideB_a=1.)
# height above tank height
with pytest.raises(ValueError):
SA_from_h(h=15, D=1.5, L=5., horizontal=True, sideA=None, sideB=None)
assert_close(SA_from_h(h=1.5, D=1.5, L=5., horizontal=True, sideA=None, sideB=None),
0.25*pi*1.5**2*2 + 1.5*5*pi, rtol=1e-13)
def test_pitch_angle_solver():
ans = [{'angle': 30.0, 'pitch': 2., 'pitch_parallel': 1.7320508075688774, 'pitch_normal': 1.},
{'angle': 60.0, 'pitch': 2., 'pitch_parallel': 1., 'pitch_normal': 1.7320508075688774},
{'angle': 45.0, 'pitch': 2., 'pitch_parallel': 1.414213562373095, 'pitch_normal': 1.414213562373095},
{'angle': 90.0, 'pitch': 1., 'pitch_parallel': 0., 'pitch_normal': 1.},
{'angle': 0.0, 'pitch': 1., 'pitch_parallel': 1., 'pitch_normal': 0.},
]
for ans_set in ans:
for k1, v1 in ans_set.items():
for k2, v2 in ans_set.items():
if k1 != k2 and v1 != 0 and v2 != 0:
angle, pitch, pitch_parallel, pitch_normal = pitch_angle_solver(**{k1:v1, k2:v2})
assert_close(ans_set['angle'], angle, atol=1e-16)
assert_close(ans_set['pitch'], pitch, atol=1e-16)
assert_close(ans_set['pitch_parallel'], pitch_parallel, atol=1e-16)
assert_close(ans_set['pitch_normal'], pitch_normal, atol=1e-16)
with pytest.raises(Exception):
pitch_angle_solver(30)
def test_AirCooledExchanger():
# Full solution, exchanger in Serth
AC = AirCooledExchanger(tube_rows=1, tube_passes=1, tubes_per_row=56, tube_length=10.9728,
tube_diameter=1*inch, fin_thickness=0.013*inch, fin_density=10/inch,
angle=30, pitch=2.5*inch, fin_height=0.625*inch, tube_thickness=0.00338)
assert_close(AC.A_fin_per_tube, 18.041542744557212)
# Minimal solution
AC = AirCooledExchanger(tube_rows=1, tube_passes=1, tubes_per_row=56, tube_length=10.9728,
tube_diameter=1*inch, fin_thickness=0.013*inch, fin_density=10/inch,
angle=30, pitch=2.5*inch, fin_height=0.625*inch)
with pytest.raises(Exception):
AirCooledExchanger(tube_rows=1, tube_passes=1, tubes_per_row=56, tube_length=10.9728,
tube_diameter=1*inch, fin_thickness=0.013*inch, fin_density=10/inch,
angle=30, pitch=2.5*inch)
# test AC with geometry whose minimum area is lower on the diagonal plane
AC = AirCooledExchanger(tube_rows=1, tube_passes=1, tubes_per_row=56, tube_length=10.9728,
tube_diameter=1*inch, fin_thickness=0.013*inch, fin_density=10/inch,
angle=60, pitch=2.2*inch, fin_height=0.625*inch, tube_thickness=0.00338)
assert_close(AC.A_diagonal_per_bundle, AC.A_min_per_bundle)
def test_AirCooledExchangerFull():
AC = AirCooledExchanger(tube_rows=4, tube_passes=4, tubes_per_row=56, tube_length=10.9728,
tube_diameter=1*inch, fin_thickness=0.013*inch, fin_density=10/inch,
angle=30, pitch=2.5*inch, fin_height=0.625*inch, tube_thickness=0.00338,
bundles_per_bay=2, parallel_bays=3)
assert_close(AC.bare_length, 0.0022097999999999996)
assert AC.tubes_per_bundle == 224
assert AC.tubes_per_bay == 224*2
assert AC.tubes == 224*2*3
assert_close(AC.pitch_diagonal, 0.057238126497990836)
assert_close(AC.A_bare_tube_per_tube, 0.875590523880476)
assert_close(AC.A_bare_tube_per_row, AC.A_bare_tube_per_tube*AC.tubes_per_row)
assert_close(AC.A_bare_tube_per_bundle, AC.A_bare_tube_per_tube*AC.tubes_per_bundle)
assert_close(AC.A_bare_tube_per_bay, AC.A_bare_tube_per_tube*AC.tubes_per_bay)
assert_close(AC.A_bare_tube, AC.A_bare_tube_per_tube*AC.tubes)
assert_close(AC.A_tube_showing_per_tube, 0.7617637557760141)
assert_close(AC.A_tube_showing_per_row, AC.A_tube_showing_per_tube*AC.tubes_per_row)
assert_close(AC.A_tube_showing_per_bundle, AC.A_tube_showing_per_tube*AC.tubes_per_bundle)
assert_close(AC.A_tube_showing_per_bay, AC.A_tube_showing_per_tube*AC.tubes_per_bay)
assert_close(AC.A_tube_showing, AC.A_tube_showing_per_tube*AC.tubes)
assert_close(AC.A_per_fin, 0.0041762830427215765)
assert_close(AC.A_fin_per_tube, 18.041542744557212)
assert_close(AC.A_fin_per_row, AC.A_fin_per_tube*AC.tubes_per_row)
assert_close(AC.A_fin_per_bundle, AC.A_fin_per_tube*AC.tubes_per_bundle)
assert_close(AC.A_fin_per_bay, AC.A_fin_per_tube*AC.tubes_per_bay)
assert_close(AC.A_fin, AC.A_fin_per_tube*AC.tubes)
assert_close(AC.A_per_tube, 18.803306500333225)
assert_close(AC.A_per_row, AC.A_per_tube*AC.tubes_per_row)
assert_close(AC.A_per_bundle, AC.A_per_tube*AC.tubes_per_bundle)
assert_close(AC.A_per_bay, AC.A_per_tube*AC.tubes_per_bay)
assert_close(AC.A, AC.A_per_tube*AC.tubes)
assert_close(AC.A_increase, 21.47500000000001)
assert_close(AC.A_diagonal_per_bundle, 34.05507419296123)
assert_close(AC.A_normal_per_bundle, 1.365674687999997)
assert_close(AC.A_normal_per_bundle, AC.A_normal_per_bundle)
assert_close(AC.A_min_per_bay, AC.A_min_per_bundle*AC.bundles_per_bay)
assert_close(AC.A_min, AC.A_min_per_bay*AC.parallel_bays)
assert_close(AC.A_face_per_bundle, 19.858025)
assert_close(AC.A_face_per_bay, AC.A_face_per_bundle*AC.bundles_per_bay)
assert_close(AC.A_face, AC.A_face_per_bay*AC.parallel_bays)
assert_close(AC.flow_area_contraction_ratio, 0.06877192982456128)
assert_close(AC.Di, 0.018639999999999997)
assert_close(AC.A_tube_flow, 0.00027288627771317794)
assert_close(AC.tube_volume_per_tube, 0.0029943265480911587)
assert_close(AC.tube_volume_per_row, AC.tube_volume_per_tube*AC.tubes_per_row)
assert_close(AC.tube_volume_per_bundle, AC.tube_volume_per_tube*AC.tubes_per_bundle)
assert_close(AC.tube_volume, AC.tube_volume_per_tube*AC.tubes)
assert AC.channels == 56
assert AC.pitch_str == 'triangular'
assert AC.pitch_class == 'staggered'
# test with corbels
AC = AirCooledExchanger(tube_rows=4, tube_passes=4, tubes_per_row=56, tube_length=10.9728,
tube_diameter=1*inch, fin_thickness=0.013*inch, fin_density=10/inch,
angle=30, pitch=2.5*inch, fin_height=0.625*inch, tube_thickness=0.00338,
bundles_per_bay=2, parallel_bays=3, corbels=True)
assert_close(AC.A_face_per_bundle, 19.683831599999998)
def test_geometry_tank():
V1 = TANK(D=1.2, L=4, horizontal=False).V_total
assert_close(V1, 4.523893421169302)
V2 = TANK(D=1.2, L=4, horizontal=False).V_from_h(.5)
assert_close(V2, 0.5654866776461628)
V3 = TANK(D=1.2, L=4, horizontal=False).h_from_V(.5)
assert_close(V3, 0.44209706414415373)
T1 = TANK(V=10, L_over_D=0.7, sideB='conical', sideB_a=0.5)
# T1.set_table(dx=0.001)
things_calc = T1.A, T1.A_sideA, T1.A_sideB, T1.A_lateral
things = (24.94775907657148, 5.118555935958284, 5.497246519930003, 14.331956620683194)
assert_close1d(things_calc, things)
L1 = TANK(D=10., horizontal=True, sideA='conical', sideB='conical', V=500).L
D1 = TANK(L=4.69953105701, horizontal=True, sideA='conical', sideB='conical', V=500).D
L2 = TANK(L_over_D=0.469953105701, horizontal=True, sideA='conical', sideB='conical', V=500).L
assert_close1d([L1, D1, L2], [4.699531057009146, 9.999999999999407, 4.69953105700979])
L1 = TANK(D=10., horizontal=False, sideA='conical', sideB='conical', V=500).L
D1 = TANK(L=4.69953105701, horizontal=False, sideA='conical', sideB='conical', V=500).D
L2 = TANK(L_over_D=0.469953105701, horizontal=False, sideA='conical', sideB='conical', V=500).L
assert_close1d([L1, D1, L2], [4.699531057009146, 9.999999999999407, 4.69953105700979])
# Test L_over_D setting simple cases
L1 = TANK(D=1.2, L_over_D=3.5, horizontal=False).L
D1 = TANK(L=1.2, L_over_D=3.5, horizontal=False).D
assert_close1d([L1, D1], [4.2, 0.342857142857])
# Test toripsherical a calculation
V = TANK(L=1.2, L_over_D=3.5, sideA='torispherical', sideB='torispherical', sideA_f=1., sideA_k=0.06, sideB_f=1., sideB_k=0.06).V_total
assert_close(V, 0.117318265914)
# Test default a_ratio
assert_close(0.25, TANK(V=10, L=10, sideA='conical', sideA_a_ratio=None).sideA_a_ratio)
with pytest.raises(Exception):
# Test overdefinition case
TANK(V=10, L=10, D=10)
with pytest.raises(Exception):
# Test sides specified with V solving
TANK(V=10, L=10, sideA='conical', sideB_a=0.5)
# Couple points that needed some polishing
base = TANK(D=10., horizontal=True, sideA_a_ratio=.25, sideB_f=1., sideB_k=0.06,
sideA='conical', sideB='torispherical', V=500)
forward = TANK(D=10.0, horizontal=True, sideA_a_ratio=.25, sideB_f=1., sideB_k=0.06,
sideA='conical', sideB='torispherical', L=base.L)
assert_close(base.V, forward.V_total, rtol=1e-11)
base = TANK(D=10., horizontal=True, sideB_a_ratio=.25, sideA_f=1., sideA_k=0.06,
sideB='conical', sideA='torispherical', V=500)
forward = TANK(D=10.0, horizontal=True, sideB_a_ratio=.25, sideA_f=1., sideA_k=0.06,
sideB='conical', sideA='torispherical', L=base.L)
assert_close(base.V, forward.V_total, rtol=1e-11)
# Same tank keyword
T1 = TANK(V=10, L_over_D=0.7, sideB='conical', sideB_a=0.5, sideA='same')
assert T1.sideB == T1.sideA
assert T1.sideB_a == T1.sideA_a
assert T1.sideB_f == T1.sideA_f
assert T1.sideB_k == T1.sideB_k
assert T1.sideB_a_ratio == T1.sideA_a_ratio
T1 = TANK(D=10.0, horizontal=True, sideA_f=1., sideA_k=0.06, sideA='torispherical', L=3, sideB='same')
assert T1.sideB == T1.sideA
assert T1.sideB_a == T1.sideA_a
assert T1.sideB_f == T1.sideA_f
assert T1.sideB_k == T1.sideB_k
assert T1.sideB_a_ratio == T1.sideA_a_ratio
T1 = TANK(D=10.0, horizontal=True, sideB_f=1., sideB_k=0.06, sideB='torispherical', L=3, sideA='same')
assert T1.sideB == T1.sideA
assert T1.sideB_a == T1.sideA_a
assert T1.sideB_f == T1.sideA_f
assert T1.sideB_k == T1.sideB_k
assert T1.sideB_a_ratio == T1.sideA_a_ratio
# No spec at all
T1 = TANK(D=10.0, horizontal=True, L=3, sideA='same')
assert T1.sideB == T1.sideA
assert T1.sideB_a == T1.sideA_a
assert T1.sideB_f == T1.sideA_f
assert T1.sideB_k == T1.sideB_k
assert T1.sideB_a_ratio == T1.sideA_a_ratio
assert T1.sideB_a == 0
with pytest.raises(Exception):
T1 = TANK(D=10.0, horizontal=True, L=3, sideA='same', sideB='same')
# Default k, f
T1 = TANK(D=10.0, horizontal=True, L=3, sideA='torispherical', sideB='torispherical')
assert T1.sideB == T1.sideA
assert T1.sideB_a == T1.sideA_a
assert T1.sideB_f == T1.sideA_f
assert T1.sideB_k == T1.sideB_k
assert T1.sideB_a_ratio == T1.sideA_a_ratio
assert T1.sideB_k == 0.06
assert T1.sideB_f == 1.0
def test_TANK_issues():
# GH issue 31
Tk = TANK(L=3, D=5, horizontal=False, sideA='torispherical', sideA_f=1, sideA_k=0.1, sideB='torispherical', sideB_f=1, sideB_k=0.1) #DIN28011
assert_close(Tk.V_total, Tk.V_from_h(Tk.h_max*.9999999999), rtol=1e-12)
# Issue where checking sideA_a was for truthiness and not not None
kwargs = {'L': 2.0, 'horizontal': False, 'L_over_D': None,
'V': None, 'sideA': 'ellipsoidal', 'sideB': 'ellipsoidal',
'sideA_a': 0.0, 'sideB_a': 1e-06, 'sideA_a_ratio': None,
'sideB_a_ratio': None, 'sideA_f': None, 'sideA_k': None,
'sideB_f': None, 'sideB_k': None}
assert_close(TANK(D=.5, **kwargs).V_total, 0.39269921259841806, rtol=1e-11)
# case that failed once
kwargs = {'D': 0.5, 'L': 2.0, 'horizontal': False, 'L_over_D': None,
'V': None, 'sideA': 'ellipsoidal', 'sideB': 'ellipsoidal',
'sideA_a': 0.0, 'sideB_a': 0.0, 'sideA_a_ratio': None,
'sideB_a_ratio': None, 'sideA_f': None, 'sideA_k': None,
'sideB_f': None, 'sideB_k': None}
TANK(**kwargs)
def assert_TANKs_equal(T1, T2):
for k, v in T1.__dict__.items():
if isinstance(v, (float, int)):
assert_close(v, T2.__dict__[k])
else:
assert v == T2.__dict__[k]
def test_add_thickness():
t = 1e-4
T1 = TANK(L=3, D=.6, sideA='ellipsoidal', sideA_a = .2, sideB='conical', sideB_a=0.5)
T1 = T1.add_thickness(t)
T2 = TANK(L=3+2*t, D=.6+2*t, sideA='ellipsoidal', sideA_a = .2+t, sideB='conical', sideB_a=0.5+t)
assert_TANKs_equal(T1, T2)
# Also add a test that there are no default values for `k` and `f` when the tank is not torispherical
# and the `a` ratios are correctly calculated not default values
for T in (T1, T2):
assert T.sideA_f is None
assert T.sideA_k is None
assert T.sideB_f is None
assert T.sideB_k is None
assert_close(T.sideA_a_ratio, 0.3333888703765412)
assert_close(T.sideB_a_ratio, 0.8332222592469177)
t = .1
T1 = TANK(L=3, D=.6, sideA='spherical', sideA_a = .2, sideB='guppy', sideB_a=0.5)
T1 = T1.add_thickness(t)
T2 = TANK(L=3+2*t, D=.6+2*t, sideA='spherical', sideA_a = .2+t, sideB='guppy', sideB_a=0.5+t)
assert_TANKs_equal(T1, T2)
for T in (T1, T2):
assert T.sideA_f is None
assert T.sideA_k is None
assert T.sideB_f is None
assert T.sideB_k is None
assert_close(T.sideA_a_ratio, 0.375)
assert_close(T.sideB_a_ratio, .75)
# Torispherical as well
t = .15311351231
T1 = TANK(L=3, D=.6, sideA='torispherical', sideB='torispherical',
sideA_f=0.9, sideA_k=0.17)
T1 = T1.add_thickness(t)
T2 = TANK(L=3+2*t, D=.6+2*t, sideA='torispherical', sideA_f=0.9, sideA_k=0.17, sideB='torispherical')
assert_TANKs_equal(T1, T2)
@pytest.mark.slow
def test_geometry_tank_chebyshev():
# Test auto set Chebyshev table
T = TANK(L=1.2, L_over_D=3.5)
assert_close(T.h_from_V(T.V_total, 'chebyshev'), T.h_max)
assert_close(T.h_from_V(.1, 'chebyshev'), 0.2901805880470152, rtol=1e-4)
assert_close(T.h_from_V(.05, 'chebyshev'), 0.15830377515496144, rtol=1e-4)
assert_close(T.h_from_V(.02, 'chebyshev'), 0.08101343184833742, rtol=1e-4)
T = TANK(L=1.2, L_over_D=3.5)
assert_close(T.V_from_h(T.h_max, 'chebyshev'), T.V_total)
@pytest.mark.slow
def test_geometry_tank_fuzz_h_from_V():
T = TANK(L=1.2, L_over_D=3.5, sideA='torispherical', sideB='torispherical', sideA_f=1., horizontal=True, sideA_k=0.06, sideB_f=1., sideB_k=0.06)
T.set_chebyshev_approximators(deg_forward=100, deg_backwards=600)
# test V_from_h - pretty easy to get right
for h in linspace(0, T.h_max, 30):
# It's the top and the bottom of the tank that works poorly
V1 = T.V_from_h(h, 'full')
V2 = T.V_from_h(h, 'chebyshev')
assert_close(V1, V2, rtol=1E-7, atol=1E-7)
with pytest.raises(Exception):
T.V_from_h(1E-5, 'NOTAMETHOD')
# reverse - the spline is also pretty easy, with a limited number of points
# when the required precision is low
T.set_table(n=150)
for V in linspace(0, T.V_total, 30):
h1 = T.h_from_V(V, 'brenth')
h2 = T.h_from_V(V, 'spline')
assert_close(h1, h2, rtol=1E-5, atol=1E-6)
h3 = T.h_from_V(V, 'chebyshev')
# Even with a 600-degree polynomial, there will be failures if N
# is high enough, but the tolerance should just be lowered
assert_close(h1, h3, rtol=1E-7, atol=1E-7)
with pytest.raises(Exception):
T.h_from_V(1E-5, 'NOTAMETHOD')
def test_basic():
psi = sphericity(10., 2.)
assert_close(psi, 0.767663317071005)
a_r = aspect_ratio(.2, 2.)
assert_close(a_r, 0.1)
f_circ = circularity(1.5, .1)
assert_close(f_circ, 1884.9555921538756)
A = A_cylinder(0.01, .1)
assert_close(A, 0.0032986722862692833)
V = V_cylinder(0.01, .1)
assert_close(V, 7.853981633974484e-06)
A = A_hollow_cylinder(0.005, 0.01, 0.1)
assert_close(A, 0.004830198704894308)
V = V_hollow_cylinder(0.005, 0.01, 0.1)
assert_close(V, 5.890486225480862e-06)
A = A_multiple_hole_cylinder(0.01, 0.1, [(0.005, 1)])
assert_close(A, 0.004830198704894308)
V = V_multiple_hole_cylinder(0.01, 0.1, [(0.005, 1)])
assert_close(V, 5.890486225480862e-06)
def test_HelicalCoil():
for kwargs in [{'Do': 30, 'H': 20, 'pitch': 5, 'Dt':2},
{'Do': 30, 'N': 4, 'pitch': 5, 'Dt':2},
{'Do': 30, 'N': 4, 'H': 20, 'Dt':2},
{'Do_total': 32, 'N': 4, 'H': 20, 'Dt':2},
{'Do_total': 32, 'N': 4, 'H_total': 22, 'Dt':2}]:
a = HelicalCoil(Di=1.8, **kwargs)
assert_close(a.N, 4)
assert_close(a.H, 20)
assert_close(a.H_total, 22)
assert_close(a.Do_total, 32)
assert_close(a.pitch, 5)
assert_close(a.tube_length, 377.5212621504738)
assert_close(a.surface_area, 2372.0360474917497)
# Other parameters
assert_close(a.curvature, 0.06)
assert_close(a.helix_angle, 0.053001960689651316)
assert_close(a.tube_circumference, 94.24777960769379)
assert_close(a.total_inlet_area, 3.141592653589793)
assert_close(a.total_volume, 1186.0180237458749)
# with Di specified
assert_close(a.Di, 1.8)
assert_close(a.inner_surface_area, 2134.832442742575)
assert_close(a.inlet_area, 2.5446900494077327)
assert_close(a.inner_volume, 960.6745992341587)
assert_close(a.annulus_area, 0.5969026041820604)
assert_close(a.annulus_volume, 225.3434245117162)
# Fusion 360 agrees with the tube length.
# It says the SA should be 2370.3726964956063057
# Hopefully its own calculation is flawed
# Test successfully creating a helix with
HelicalCoil(Di=1.8, Do=30, H=20, pitch=2, Dt=2)
with pytest.raises(Exception):
HelicalCoil(Di=1.8, Do=30, H=20, pitch=1.999, Dt=2)
with pytest.raises(Exception):
HelicalCoil(Di=1.8, Do=30, H=20, N=10.0001, Dt=2)
# Test Dt < Do
HelicalCoil(Do=10, H=30, N=2, Dt=10)
with pytest.raises(Exception):
HelicalCoil(Do=10, H=30, N=2, Dt=10.00000001)
with pytest.raises(Exception):
HelicalCoil(Do_total=20-1E-9, H=30, N=3., Dt=10.000)
def test_PlateExchanger():
ex = PlateExchanger(amplitude=5E-4, wavelength=3.7E-3, length=1.2, width=.3, d_port=.05, plates=51)
assert ex.plate_exchanger_identifier == 'L3.7A0.5B45-45'
assert_close(ex.amplitude, 0.0005)
assert_close(ex.a, 0.0005)
assert_close(ex.b, 0.001)
assert_close(ex.wavelength, 3.7E-3)
assert_close(ex.pitch, 3.7E-3)
assert ex.chevron_angle == 45
assert ex.chevron_angles == (45, 45)
assert ex.inclination_angle == 45
assert_close(ex.plate_corrugation_aspect_ratio, 0.5405405405405406)
assert_close(ex.gamma, 0.5405405405405406)
assert_close(ex.plate_enlargement_factor, 1.1611862034509677)
assert_close(ex.D_eq, 0.002)
assert_close(ex.D_hydraulic, 0.0017223766473078426)
assert_close(ex.length_port, 1.25)
assert_close(ex.A_plate_surface, 0.41802703324234836)
assert_close(ex.A_heat_transfer, 20.483324628875071)
assert_close(ex.A_channel_flow, 0.0003)
assert ex.channels == 50
assert ex.channels_per_fluid == 25
ex = PlateExchanger(amplitude=5E-4, wavelength=3.7E-3, length=1.2, width=.3, d_port=.05, plates=51, chevron_angles=(30, 60))
assert ex.chevron_angle == 45
assert ex.chevron_angles == (30, 60)
ex = PlateExchanger(amplitude=5E-4, wavelength=3.7E-3)
def plate_enlargement_factor_numerical(amplitude, wavelength):
from scipy.integrate import quad
lambda1 = wavelength
b = amplitude
gamma = 4*b/lambda1
def to_int(s):
return (1 + (gamma*pi/2)**2*cos(2*pi/lambda1*s)**2)**0.5
main = quad(to_int, 0, lambda1)[0]
return main/lambda1
def test_plate_enhancement_factor():
def plate_enlargement_factor_approx(amplitude, wavelength):
# Approximate formula
lambda1 = wavelength
b = amplitude
A = 2*pi*b/lambda1
return 1/6.*(1 + (1 + A**2)**0.5 + 4*(1 + 0.5*A**2)**0.5)
# 1.218 in VDI example
phi = plate_enlargement_factor_approx(amplitude=0.002, wavelength=0.0126)
assert_close(phi, 1.217825410973735)
assert_close(phi, 1.218, rtol=1E-3)
phi = plate_enlargement_factor_numerical(amplitude=0.002, wavelength=0.0126)
assert_close(phi, 1.2149896289702244)
@pytest.mark.fuzz
@pytest.mark.slow
def test_plate_enhancement_factor_fuzz():
# Confirm it's correct to within 1E-7
for x in linspace(1E-5, 100, 3):
for y in linspace(1E-5, 100, 3):
a = plate_enlargement_factor(x, y)
b = plate_enlargement_factor_numerical(x, y)
assert_close(a, b, rtol=1E-7)
def test_RectangularFinExchanger():
PFE = RectangularFinExchanger(0.03, 0.001, 0.012)
assert_close(PFE.fin_height, 0.03)
assert_close(PFE.fin_thickness, 0.001)
assert_close(PFE.fin_spacing, 0.012)
# calculated values
assert_close(PFE.channel_height, 0.029)
assert_close(PFE.blockage_ratio, 0.8861111111111111)
assert_close(PFE.fin_count, 83.33333333333333)
assert_close(PFE.Dh, 0.01595)
assert_close(PFE.channel_width, 0.011)
# with layers, plate thickness, width, and length (fully defined)
PFE = RectangularFinExchanger(0.03, 0.001, 0.012, length=1.2, width=2.401, plate_thickness=.005, layers=40)
assert_close(PFE.A_HX_layer, 19.2)
assert_close(PFE.layer_fin_count, 200)
assert_close(PFE.A_HX, 768.0)
assert_close(PFE.height, 1.4+.005)
assert_close(PFE.volume, 4.048085999999999)
assert_close(PFE.A_specific_HX, 189.71928956054794)
def test_RectangularOffsetStripFinExchanger():
ROSFE = RectangularOffsetStripFinExchanger(fin_length=.05, fin_height=.01, fin_thickness=.003, fin_spacing=.05)
assert_close(ROSFE.fin_length, 0.05)
assert_close(ROSFE.fin_height, 0.01)
assert_close(ROSFE.fin_thickness, 0.003)
assert_close(ROSFE.fin_spacing, 0.05)
assert_close(ROSFE.blockage_ratio, 0.348)
assert_close(ROSFE.blockage_ratio_Kim, 0.34199999999999997)
assert_close(ROSFE.alpha, 5)
assert_close(ROSFE.delta, 0.06)
assert_close(ROSFE.gamma, 0.06)
assert_close(ROSFE.A_channel, 0.000329)
# assert_close(ROSFE.SA_fin, 0.005574)
assert_close(ROSFE.Dh, 0.011804808037316112)
assert_close(ROSFE.Dh_Kays_London, 0.012185185185185186)
assert_close(ROSFE.Dh_Joshi_Webb, 0.011319367879456085)
# With layers, plate thickness, width (fully defined)
# ROSFE = RectangularOffsetStripFinExchanger(fin_length=.05, fin_height=.01, fin_thickness=.003, fin_spacing=.05, length=1.2, width=2.401, plate_thickness=.005, layers=40)
# assert_close(ROSFE.A_HX_layer, 0.267552)
def test_HyperbolicCoolingTower():
pass |
from rtamt.exception.stl.exception import STLException
from rtamt.exception.stl.exception import STLParseException
from rtamt.exception.stl.exception import STLOfflineException
from rtamt.exception.stl.exception import STLSpecificationException
from rtamt.exception.stl.exception import STLNotImplementedException
from rtamt.exception.ltl.exception import LTLNotImplementedException
from rtamt.exception.ltl.exception import LTLPastifyException
from rtamt.exception.ltl.exception import LTLException
from rtamt.exception.ltl.exception import LTLOfflineException
from rtamt.exception.ltl.exception import LTLParseException
from rtamt.exception.ltl.exception import LTLSpecificationException
from rtamt.exception.exception import RTAMTException
from rtamt.spec.stl.discrete_time.io_type import StlIOType
from rtamt.enumerations.options import Language, Semantics, TimeInterpretation
from rtamt.spec.stl.discrete_time.specification import STLDiscreteTimeSpecification
from rtamt.spec.stl.discrete_time.specification import STLDiscreteTimeSpecification as STLSpecification
from rtamt.spec.stl.dense_time.specification import STLDenseTimeSpecification
from rtamt.spec.stl.dense_time.specification import STLDenseTimeSpecification as STLCTSpecification
from rtamt.spec.ltl.discrete_time.specification import LTLDiscreteTimeSpecification
from rtamt.spec.ltl.discrete_time.specification import LTLDiscreteTimeSpecification as LTLSpecification
|
from typing import Optional
from starlette.config import Config
from starlette.datastructures import Secret
config = Config(env_file=".env")
# whether the application is in development mode.
DEBUG: bool = config("DEBUG", cast=bool, default=False)
# SQLAlchemy database URL.
DATABASE_URL: str = config("DATABASE_URL", cast=str)
# mail client host name.
MAIL_HOST: str = config("MAIL_HOST", cast=str)
# mail client port.
MAIL_PORT: int = config("MAIL_PORT", cast=int)
# mail client auth username.
MAIL_USERNAME: Optional[str] = config("MAIL_USERNAME", cast=str, default=None)
# mail client auth password.
MAIL_PASSWORD: Optional[Secret] = config("MAIL_PASSWORD", cast=Secret, default=None)
# mail client sender address.
MAIL_SENDER: Optional[str] = config("MAIL_SENDER", cast=str, default=None)
# celery broker URL.
CELERY_BROKER: str = config("CELERY_BROKER", cast=str)
# celery result backend URL.
CELERY_BACKEND: str = config("CELERY_BACKEND", cast=str)
|
import cv2
from sys import argv
from numpy import histogram, arange, linspace, concatenate
from matplotlib.pyplot import plot, show, xticks, yticks, title
art = 'Pollock No. 5.png' if len(argv) < 0b10 else argv[1]
img = cv2.cvtColor(cv2.imread(f'{art}'), cv2.COLOR_BGR2RGB)
redux = lambda x: x[0]
hR, hG, hB = [redux(histogram(img[..., c], bins = 0x100)) for c in range(0b11)]
maxRGB = max(concatenate([hR, hG, hB]))
xticks(arange(0, 0x101, 0x20)); yticks(linspace(0, maxRGB, 0o12))
plot(hR, 'r', hG, 'g', hB, 'b'); title(art); show()
|
import glob, os, shutil, random
SEED = 1
image_dir = "D:/data/fashion/image_retrieval/cafe24multi/images_mall_prd"
output_dir = "D:/data/fashion/image_retrieval/cafe24multi/images_split_train_test"
train_dir = os.path.join(output_dir, "train")
test_dir = os.path.join(output_dir, "test")
mall_id_list = os.listdir(image_dir)
malls_cnt = len(mall_id_list)
for i, mall_id in enumerate(mall_id_list):
mall_dir = os.path.join(image_dir, mall_id)
if not os.path.isdir(mall_dir):
print("not dir")
continue
prds = glob.glob(os.path.join(mall_dir, "*"))
prd_cnt = len(prds)
p_tot = len(prds)
test_cnt = int(prd_cnt * 0.5)
random.seed(SEED)
random.shuffle(prds)
test_dirs = prds[:test_cnt]
train_dirs = prds[test_cnt:]
te_tot = len(test_dirs)
for j, test_d in enumerate(test_dirs):
print("dir %d/%d, file %d/%d" % (i, prd_cnt, j, te_tot))
pd = os.path.basename(test_d)
files = glob.glob(os.path.join(test_d, "*.jpg"))
tmp_output_dir = os.path.join(test_dir, pd)
if not os.path.isdir(tmp_output_dir):
os.makedirs(tmp_output_dir)
for f in files:
shutil.copy(f, os.path.join(tmp_output_dir, os.path.basename(f)))
tr_tot = len(train_dirs)
for j, train_d in enumerate(train_dirs):
print("dir %d/%d, file %d/%d" % (i, prd_cnt, j, tr_tot))
pd = os.path.basename(train_d)
files = glob.glob(os.path.join(train_d, "*.jpg"))
tmp_output_dir = os.path.join(train_dir, pd)
if not os.path.isdir(tmp_output_dir):
os.makedirs(tmp_output_dir)
for f in files:
shutil.copy(f, os.path.join(tmp_output_dir, os.path.basename(f)))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 26 11:00:38 2018
@author: ddeng
"""
import pickle
import pdb
def read_file(file):
main_dict = {}
for line in file:
line = line.strip('\n')
line = line.strip('\t')
[vid, uttr, pos_uttr, neg_uttr, pos_vid, neg_vid,words_uttr] = line.split(' ')
if vid not in main_dict.keys():
main_dict[vid] = {}
uttr_index = uttr.split('.')[0].split('_')[-1]
main_dict[vid][uttr_index] = [float(pos_uttr), float(neg_uttr), float(pos_vid), float(neg_vid)]
return main_dict
def parse_MPQA_feature():
train_feature_path = 'omg_TrainTranscripts_features.txt'
valid_feature_path = 'omg_ValidationTranscripts_features.txt'
data = {}
train_file = open (train_feature_path,'r')
data['Train'] = read_file(train_file)
valid_file = open(valid_feature_path, 'r')
data['Validation'] = read_file(valid_file)
with open('../MPQA_Word_Feature.pkl','wb') as fout:
pickle.dump(data, fout)
def parse_MPQA_feature_for_test():
test_feature_path = 'omg_TestTranscripts_features.txt'
data = {}
test_file = open(test_feature_path,'r')
data['Test'] = read_file(test_file)
with open('../MPQA_Word_Feature_Test.pkl','wb') as fout:
pickle.dump(data, fout)
if __name__ == '__main__':
pdb.set_trace()
#parse_MPQA_feature()
parse_MPQA_feature_for_test()
|
from utils.views import ContactViewSet, IssueViewSet
drf_routers = (
('contact', ContactViewSet),
('issue', IssueViewSet)
)
|
# Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from keystone.common import json_home
from keystone.common import wsgi
from keystone.contrib import oauth2
from keystone.contrib.oauth2 import controllers
build_resource_relation = functools.partial(
json_home.build_v3_extension_resource_relation,
extension_name='OS-OAUTH2', extension_version='1.0')
build_parameter_relation = functools.partial(
json_home.build_v3_extension_parameter_relation,
extension_name='OS-OAUTH2', extension_version='1.0')
class OAuth2Extension(wsgi.V3ExtensionRouter):
"""API Endpoints for the OAuth2 extension.
The goal of this extension is to allow third-party service providers
to acquire tokens with a limited subset of a user's roles for acting
on behalf of that user. This is done using an oauth-similar flow and
api.
The API looks like::
# Basic admin-only consumer crud
POST /OS-OAUTH2/consumers
GET /OS-OAUTH2/consumers
PATCH /OS-OAUTH2/consumers/$consumer_id
GET /OS-OAUTH2/consumers/$consumer_id
DELETE /OS-OAUTH2/consumers/$consumer_id
# User access token endpoint
GET /users/$user_id/OS-OAUTH2/access_tokens
GET /users/$user_id/OS-OAUTH2/access_tokens/{access_token_id}
DELETE /users/$user_id/OS-OAUTH2/access_tokens/{access_token_id} # revoke an access token
# OAuth interfaces
GET /OS-OAUTH2/authorize # request authorization
POST /OS-OAUTH2/authorize # authorize a consumer
POST /OS-OAUTH2/access_token # create an access token
"""
PATH_PREFIX = '/OS-OAUTH2'
def add_routes(self, mapper):
consumer_controller = controllers.ConsumerCrudV3()
access_token_controller = controllers.AccessTokenEndpointV3()
authorization_code_controller = controllers.AuthorizationCodeEndpointV3()
oauth2_controller = controllers.OAuth2ControllerV3()
# Admin only consumer CRUD
self._add_resource(
mapper, consumer_controller,
path=self.PATH_PREFIX + '/consumers',
get_action='list_consumers',
post_action='create_consumer',
rel=build_resource_relation(resource_name='consumers'))
self._add_resource(
mapper, consumer_controller,
path=self.PATH_PREFIX + '/consumers/{consumer_id}',
get_action='get_consumer',
patch_action='update_consumer',
delete_action='delete_consumer',
rel=build_resource_relation(resource_name='consumer'),
path_vars={
'consumer_id':
build_parameter_relation(parameter_name='consumer_id'),
})
# Resource Owner CRUD for Access Tokens
self._add_resource(
mapper, access_token_controller,
path='/users/{user_id}' + self.PATH_PREFIX + '/access_tokens',
get_action='list_access_tokens',
rel=build_resource_relation(resource_name='access_tokens'),
path_vars={
'user_id':
build_parameter_relation(parameter_name='user_id'),
})
self._add_resource(
mapper, access_token_controller,
path='/users/{user_id}' + self.PATH_PREFIX + '/access_tokens/{access_token_id}',
get_action='get_access_token',
delete_action='revoke_access_token',
rel=build_resource_relation(resource_name='access_token'),
path_vars={
'user_id': build_parameter_relation(parameter_name='user_id'),
'access_token_id': build_parameter_relation(parameter_name='access_token_id'),
})
# Resource Owner endpoint for Authorization Codes
self._add_resource(
mapper, authorization_code_controller,
path=self.PATH_PREFIX + '/users/{user_id}/authorization_codes',
get_action='list_authorization_codes',
rel=build_resource_relation(resource_name='authorization_code'),
path_vars={
'user_id':
build_parameter_relation(parameter_name='user_id'),
})
# OAuth2 flow calls
self._add_resource(
mapper, oauth2_controller,
path=self.PATH_PREFIX + '/authorize',
post_action='create_authorization_code',
get_action='request_authorization_code',
rel=build_resource_relation(resource_name='authorization_code'))
self._add_resource(
mapper, oauth2_controller,
path=self.PATH_PREFIX + '/access_token',
post_action='create_access_token',
rel=build_resource_relation(resource_name='access_tokens'))
|
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.associationproxy import association_proxy
from app.db.base_class import Base
class FilmActor(Base):
__tablename__ = "film_actors"
film_id = Column(Integer, ForeignKey("films.id"), primary_key=True)
actor_id = Column(Integer, ForeignKey("actors.id"), primary_key=True)
actor = relationship("Actor")
actor_name = association_proxy("actor", attr="name")
|
import json
from functools import partial
import geopandas as gpd
import pandas
import pytest
import scipy
import xarray
from numpy.testing import assert_almost_equal
from packaging import version
from rasterio.enums import MergeAlg
from shapely.geometry import mapping
from shapely.wkt import loads
from geocube.api.core import make_geocube
from geocube.exceptions import VectorDataError
from geocube.rasterize import (
rasterize_image,
rasterize_points_griddata,
rasterize_points_radial,
)
from test.conftest import TEST_COMPARE_DATA_DIR, TEST_INPUT_DATA_DIR
TEST_GARS_PROJ = "epsg:32615"
TEST_GARS_POLY = loads(
"POLYGON (("
"-90.58343333333333 41.48343333333334, "
"-90.59989999999999 41.48343333333334, "
"-90.59989999999999 41.4999, "
"-90.58343333333333 41.4999, "
"-90.58343333333333 41.48343333333334"
"))"
)
@pytest.mark.parametrize(
"input_geodata",
[
str(TEST_INPUT_DATA_DIR / "soil_data_flat.geojson"),
gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_flat.geojson"),
pandas.DataFrame(gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_flat.geojson")),
],
)
def test_make_geocube(input_geodata, tmpdir):
soil_attribute_list = [
"om_r",
"sandtotal_r",
"silttotal_r",
"claytotal_r",
"cec7_r",
"ph1to1h2o_r",
"dbthirdbar_r",
"awc_r",
]
out_grid = make_geocube(
vector_data=input_geodata,
measurements=soil_attribute_list,
output_crs=TEST_GARS_PROJ,
geom=json.dumps(mapping(TEST_GARS_POLY)),
resolution=(-10, 10),
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("make_geocube_soil") / "soil_grid_flat.nc")
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_flat.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
@pytest.mark.parametrize(
"input_geodata",
[gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_flat.geojson")],
)
def test_make_geocube__categorical(input_geodata, tmpdir):
input_geodata["soil_type"] = [
"sand",
"silt",
"clay",
"frank",
"silt",
"clay",
"sand",
]
out_grid = make_geocube(
vector_data=input_geodata,
output_crs=TEST_GARS_PROJ,
geom=json.dumps(mapping(TEST_GARS_POLY)),
resolution=(-10, 10),
categorical_enums={"soil_type": ("sand", "silt", "clay")},
fill=-9999.0,
)
assert out_grid.soil_type.dtype.name == "int16"
# test writing to netCDF
out_grid.to_netcdf(
tmpdir.mkdir("make_geocube_soil") / "soil_grid_flat_categorical.nc"
)
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_flat_categorical.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
@pytest.mark.parametrize(
"input_geodata",
[
TEST_INPUT_DATA_DIR / "soil_data_flat.geojson",
gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_flat.geojson"),
],
)
def test_make_geocube__interpolate_na(input_geodata, tmpdir):
soil_attribute_list = [
"om_r",
"sandtotal_r",
"silttotal_r",
"claytotal_r",
"cec7_r",
"ph1to1h2o_r",
"dbthirdbar_r",
"awc_r",
]
out_grid = make_geocube(
vector_data=input_geodata,
measurements=soil_attribute_list,
output_crs=TEST_GARS_PROJ,
geom=json.dumps(mapping(TEST_GARS_POLY)),
resolution=(-10, 10),
interpolate_na_method="nearest",
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(
tmpdir.mkdir("make_geocube_soil") / "soil_grid_flat_interpolate_na.nc"
)
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_flat_interpolate_na.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
@pytest.mark.parametrize(
"input_geodata",
[
TEST_INPUT_DATA_DIR / "soil_data_flat.geojson",
gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_flat.geojson"),
],
)
def test_make_geocube__like(input_geodata, tmpdir):
soil_attribute_list = [
"om_r",
"sandtotal_r",
"silttotal_r",
"claytotal_r",
"cec7_r",
"ph1to1h2o_r",
"dbthirdbar_r",
"awc_r",
]
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_flat.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
out_grid = make_geocube(
vector_data=input_geodata,
measurements=soil_attribute_list,
like=xdc,
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("make_geocube_soil") / "soil_grid_flat.nc")
xarray.testing.assert_allclose(out_grid, xdc)
@pytest.mark.parametrize(
"input_geodata",
[
TEST_INPUT_DATA_DIR / "soil_data_flat.geojson",
gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_flat.geojson"),
],
)
def test_make_geocube__only_resolution(input_geodata, tmpdir):
soil_attribute_list = [
"om_r",
"sandtotal_r",
"silttotal_r",
"claytotal_r",
"cec7_r",
"ph1to1h2o_r",
"dbthirdbar_r",
"awc_r",
]
out_grid = make_geocube(
vector_data=input_geodata,
measurements=soil_attribute_list,
resolution=(-0.001, 0.001),
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(
tmpdir.mkdir("make_geocube_soil") / "soil_grid_flat_original_crs.nc"
)
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_flat_original_crs.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
@pytest.mark.parametrize(
"input_geodata",
[
TEST_INPUT_DATA_DIR / "time_vector_data.geojson",
gpd.read_file(TEST_INPUT_DATA_DIR / "time_vector_data.geojson"),
],
)
def test_make_geocube__convert_time(input_geodata, tmpdir):
out_grid = make_geocube(
vector_data=input_geodata,
measurements=["test_attr", "test_time_attr", "test_str_attr"],
datetime_measurements=["test_time_attr"],
resolution=(-0.00001, 0.00001),
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("geocube_time") / "time_vector_data.nc")
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "time_vector_data.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
assert out_grid.test_time_attr.attrs["units"] == "seconds from 1970-01-01T00:00:00"
assert out_grid.test_time_attr.attrs["_FillValue"] == 0
@pytest.mark.parametrize(
"load_extra_kwargs",
[
{"output_crs": "epsg:4326"},
{"resolution": (-10, 10)},
{"align": (0, 0)},
{"output_crs": "epsg:4326", "resolution": (-10, 10), "align": (0, 0)},
],
)
def test_make_geocube__like_error_invalid_args(load_extra_kwargs):
soil_attribute_list = [
"om_r",
"sandtotal_r",
"silttotal_r",
"claytotal_r",
"cec7_r",
"ph1to1h2o_r",
"dbthirdbar_r",
"awc_r",
]
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_flat.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
with pytest.raises(AssertionError):
make_geocube(
vector_data=TEST_INPUT_DATA_DIR / "soil_data_flat.geojson",
measurements=soil_attribute_list,
like=xdc,
fill=-9999.0,
**load_extra_kwargs,
)
@pytest.mark.parametrize(
"input_geodata",
[
TEST_INPUT_DATA_DIR / "soil_data_flat.geojson",
gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_flat.geojson"),
],
)
def test_make_geocube__no_measurements(input_geodata, tmpdir):
out_grid = make_geocube(
vector_data=input_geodata,
output_crs=TEST_GARS_PROJ,
geom=json.dumps(mapping(TEST_GARS_POLY)),
resolution=(-10, 10),
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("make_geocube_soil") / "soil_grid_flat.nc")
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_flat.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
tmpdir.remove()
def test_make_geocube__no_geom(tmpdir):
out_grid = make_geocube(
vector_data=TEST_INPUT_DATA_DIR / "soil_data_flat.geojson",
measurements=["sandtotal_r"],
resolution=(-0.001, 0.001),
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("make_geocube_soil") / "soil_grid_flat_no_geom.nc")
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_flat_no_geom.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
tmpdir.remove()
@pytest.mark.parametrize(
"input_geodata",
[
gpd.GeoDataFrame(columns=["test_col", "geometry"]),
gpd.GeoDataFrame(),
gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_flat.geojson").drop(
columns="geometry"
),
],
)
def test_make_geocube__invalid_gdf(input_geodata):
with pytest.raises(VectorDataError):
make_geocube(vector_data=input_geodata, resolution=(-0.001, 0.001))
def test_make_geocube__no_resolution_error():
with pytest.raises(RuntimeError):
make_geocube(
vector_data=TEST_INPUT_DATA_DIR / "soil_data_flat.geojson",
measurements=["sandtotal_r"],
output_crs=TEST_GARS_PROJ,
geom=json.dumps(mapping(TEST_GARS_POLY)),
fill=-9999.0,
)
@pytest.mark.parametrize(
"input_geodata",
[
TEST_INPUT_DATA_DIR / "soil_data_group.geojson",
gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_group.geojson"),
],
)
def test_make_geocube__group_by(input_geodata, tmpdir):
soil_attribute_list = [
"cokey",
"mukey",
"drclassdcd",
"hzdept_r",
"hzdepb_r",
"sandtotal_r",
"silttotal_r",
"claytotal_r",
]
out_grid = make_geocube(
vector_data=input_geodata,
measurements=soil_attribute_list,
output_crs=TEST_GARS_PROJ,
geom=json.dumps(mapping(TEST_GARS_POLY)),
group_by="hzdept_r",
resolution=(-10, 10),
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("make_geocube_soil") / "soil_grid_group.nc")
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_group.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
tmpdir.remove()
@pytest.mark.parametrize(
"input_geodata",
[gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_group.geojson")],
)
def test_make_geocube__group_by__categorical(input_geodata, tmpdir):
input_geodata["soil_type"] = [
"sand",
"bob",
"clay",
"sand",
"silt",
"clay",
"sand",
] * 11
soil_attribute_list = ["sandtotal_r", "silttotal_r", "soil_type", "claytotal_r"]
out_grid = make_geocube(
vector_data=input_geodata,
measurements=soil_attribute_list,
output_crs=TEST_GARS_PROJ,
geom=json.dumps(mapping(TEST_GARS_POLY)),
group_by="hzdept_r",
resolution=(-10, 10),
categorical_enums={"soil_type": ("sand", "silt", "clay")},
fill=-9999.0,
)
assert out_grid.soil_type.dtype.name == "int16"
# test writing to netCDF
out_grid.to_netcdf(
tmpdir.mkdir("make_geocube_soil") / "soil_grid_group_categorical.nc"
)
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_group_categorical.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
tmpdir.remove()
@pytest.mark.parametrize(
"input_geodata",
[
TEST_INPUT_DATA_DIR / "soil_data_group.geojson",
gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_group.geojson"),
],
)
def test_make_geocube__group_by_like(input_geodata, tmpdir):
soil_attribute_list = [
"cokey",
"mukey",
"drclassdcd",
"hzdept_r",
"hzdepb_r",
"sandtotal_r",
"silttotal_r",
"claytotal_r",
]
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_group.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
out_grid = make_geocube(
vector_data=input_geodata,
measurements=soil_attribute_list,
group_by="hzdept_r",
like=xdc,
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("make_geocube_soil") / "soil_grid_group.nc")
xarray.testing.assert_allclose(out_grid, xdc)
tmpdir.remove()
@pytest.mark.parametrize(
"input_geodata",
[
TEST_INPUT_DATA_DIR / "soil_data_group.geojson",
gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_group.geojson"),
],
)
def test_make_geocube__group_by_only_resolution(input_geodata, tmpdir):
soil_attribute_list = ["sandtotal_r", "silttotal_r", "claytotal_r"]
out_grid = make_geocube(
vector_data=input_geodata,
measurements=soil_attribute_list,
group_by="hzdept_r",
resolution=(-0.001, 0.001),
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(
tmpdir.mkdir("make_geocube_soil") / "soil_grid_grouped_original_crs.nc"
)
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_grouped_original_crs.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
tmpdir.remove()
@pytest.mark.parametrize(
"input_geodata",
[
TEST_INPUT_DATA_DIR / "time_vector_data.geojson",
gpd.read_file(TEST_INPUT_DATA_DIR / "time_vector_data.geojson"),
],
)
def test_make_geocube__group_by_time(input_geodata, tmpdir):
out_grid = make_geocube(
vector_data=input_geodata,
datetime_measurements=["test_time_attr"],
resolution=(-0.00001, 0.00001),
group_by="test_time_attr",
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("make_geocube_time") / "vector_time_data_group.nc")
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "vector_time_data_group.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
tmpdir.remove()
@pytest.mark.parametrize(
"input_geodata",
[
TEST_INPUT_DATA_DIR / "time_vector_data.geojson",
gpd.read_file(TEST_INPUT_DATA_DIR / "time_vector_data.geojson"),
],
)
def test_make_geocube__group_by_convert_with_time(input_geodata, tmpdir):
out_grid = make_geocube(
vector_data=input_geodata,
datetime_measurements=["test_time_attr"],
resolution=(-0.00001, 0.00001),
group_by="test_attr",
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("make_geocube_time") / "vector_data_group.nc")
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "vector_data_group.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
assert out_grid.test_time_attr.attrs["units"] == "seconds from 1970-01-01T00:00:00"
assert out_grid.test_time_attr.attrs["_FillValue"] == 0
tmpdir.remove()
@pytest.mark.parametrize(
"load_extra_kwargs",
[
{"output_crs": "epsg:4326"},
{"resolution": (-10, 10)},
{"align": (0, 0)},
{"output_crs": "epsg:4326", "resolution": (-10, 10), "align": (0, 0)},
],
)
def test_make_geocube__group_by_like_error_invalid_args(load_extra_kwargs):
soil_attribute_list = [
"cokey",
"mukey",
"drclassdcd",
"hzdept_r",
"hzdepb_r",
"sandtotal_r",
"silttotal_r",
"claytotal_r",
]
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_group.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
with pytest.raises(AssertionError):
make_geocube(
vector_data=TEST_INPUT_DATA_DIR / "soil_data_group.geojson",
measurements=soil_attribute_list,
like=xdc,
group_by="hzdept_r",
fill=-9999.0,
**load_extra_kwargs,
)
@pytest.mark.parametrize(
"input_geodata",
[
TEST_INPUT_DATA_DIR / "soil_data_group.geojson",
gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_group.geojson"),
],
)
def test_make_geocube__group_by_no_measurements(input_geodata, tmpdir):
out_grid = make_geocube(
vector_data=input_geodata,
output_crs=TEST_GARS_PROJ,
geom=json.dumps(mapping(TEST_GARS_POLY)),
group_by="hzdept_r",
resolution=(-10, 10),
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("make_geocube_soil") / "soil_grid_group.nc")
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_group.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
tmpdir.remove()
def test_make_geocube__group_by__no_geom(tmpdir):
out_grid = make_geocube(
vector_data=TEST_INPUT_DATA_DIR / "soil_data_group.geojson",
measurements=["sandtotal_r"],
group_by="hzdept_r",
resolution=(-0.001, 0.001),
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("make_geocube_soil") / "soil_grid_group_no_geom.nc")
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / "soil_grid_group_no_geom.nc",
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc)
tmpdir.remove()
def test_make_geocube__group_by__no_resolution_error():
with pytest.raises(RuntimeError):
make_geocube(
vector_data=TEST_INPUT_DATA_DIR / "soil_data_group.geojson",
measurements=["sandtotal_r"],
output_crs=TEST_GARS_PROJ,
geom=json.dumps(mapping(TEST_GARS_POLY)),
group_by="hzdept_r",
fill=-9999.0,
)
def test_make_geocube__new_bounds_crs():
utm_cube = make_geocube(
vector_data=TEST_INPUT_DATA_DIR / "wgs84_geom.geojson",
output_crs="epsg:32614",
resolution=(-1, 1),
fill=-9999.0,
)
assert_almost_equal(
utm_cube.id.rio.bounds(), (1665478.0, 7018306.0, 1665945.0, 7018509.0)
)
@pytest.mark.parametrize(
"function,compare_name",
[
(rasterize_points_griddata, "rasterize_griddata_nearest.nc"),
(
partial(rasterize_points_griddata, rescale=True),
"rasterize_griddata_rescale.nc",
),
(
partial(rasterize_points_griddata, method="cubic"),
"rasterize_griddata_cubic.nc",
),
(rasterize_points_radial, "rasterize_radial_linear.nc"),
(partial(rasterize_image, merge_alg=MergeAlg.add), "rasterize_image_sum.nc"),
(partial(rasterize_image, all_touched=True), "rasterize_unchanged.nc"),
],
)
@pytest.mark.xfail(
version.parse(scipy.__version__) < version.parse("1.4.0"),
reason="griddata behaves differently across versions",
)
def test_make_geocube__custom_rasterize_function(function, compare_name, tmpdir):
input_geodata = TEST_INPUT_DATA_DIR / "time_vector_data.geojson"
out_grid = make_geocube(
vector_data=input_geodata,
measurements=["test_attr", "test_time_attr", "test_str_attr"],
resolution=(-0.00001, 0.00001),
rasterize_function=function,
fill=-9999.0,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("geocube_custom") / compare_name)
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / compare_name,
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc, rtol=0.1, atol=0.1)
@pytest.mark.parametrize(
"function,compare_name",
[
(
partial(rasterize_points_griddata, filter_nan=True),
"rasterize_griddata_nearest_nodata.nc",
),
(
partial(rasterize_points_griddata, method="cubic", filter_nan=True),
"rasterize_griddata_cubic_nodata.nc",
),
(
partial(rasterize_points_radial, filter_nan=True),
"rasterize_radial_linear_nodata.nc",
),
(
partial(rasterize_image, merge_alg=MergeAlg.add, filter_nan=True),
"rasterize_image_sum_nodata.nc",
),
],
)
@pytest.mark.xfail(
version.parse(scipy.__version__) < version.parse("1.4.0"),
reason="griddata behaves differently across versions",
)
def test_make_geocube__custom_rasterize_function__filter_null(
function, compare_name, tmpdir
):
input_geodata = TEST_INPUT_DATA_DIR / "point_with_null.geojson"
out_grid = make_geocube(
vector_data=input_geodata,
resolution=(-0.00001, 0.00001),
rasterize_function=function,
)
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("geocube_custom") / compare_name)
# test output data
with xarray.open_dataset(
TEST_COMPARE_DATA_DIR / compare_name,
mask_and_scale=False,
decode_coords="all",
) as xdc:
xarray.testing.assert_allclose(out_grid, xdc, rtol=0.1, atol=0.1)
@pytest.mark.parametrize(
"dtype,fill,expected_type",
[
("uint16", 0, "uint16"),
("uint16", float("NaN"), "float32"),
("int32", 0, "int32"),
("int32", float("NaN"), "float64"),
("int64", 0, "float64"),
("int64", float("NaN"), "float64"),
],
)
def test_make_geocube__minimize_dtype(dtype, fill, expected_type, tmpdir):
gdf = gpd.read_file(TEST_INPUT_DATA_DIR / "soil_data_flat.geojson")
gdf["mask"] = 1
gdf["mask"] = gdf["mask"].astype(dtype)
out_grid = make_geocube(
vector_data=gdf,
measurements=["mask"],
output_crs=TEST_GARS_PROJ,
geom=json.dumps(mapping(TEST_GARS_POLY)),
resolution=(-10, 10),
fill=fill,
)
assert out_grid.mask.dtype.name == expected_type
# test writing to netCDF
out_grid.to_netcdf(tmpdir.mkdir("make_geocube_soil") / "soil_grid_flat_mask.nc")
|
from AutoRegPilot import Linear_Regression, Loess_Regression, Polynomial_Regression
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
"""
Line_Plotter.plot(plotlist="LR,PR,CF,LSS",title="Tablom")
Loess_Regression.Plotter()
"""
Signal = np.array(
[
100, 11, 101, 99, 105,
110, 110, 125, 115, 120,
120, 12, 127, 130, 133,
136, 140, 145, 147, 150,
170, 10, 170, 510, 510,
510, 155, 158, 140, 162,
165, 169, 175, 160, 177,
122, 159, 176, 130, 197,
10, 0, 0, 10, 0,
170, 10, 170, 510, 510,
130, 110, 125, 115, 120,
140, 155, 167, 230, 133,510, 155, 158, 140, 162,
165, 169, 175, 160, 177,
122, 159, 176, 130, 197,510, 155, 158, 140, 162,
165, 169, 175, 160, 177,
122, 159, 176, 130, 197,
]
)
Signal = Signal/max(Signal)
time_Array = list(range(len(Signal)))
time = np.array(time_Array)
degree = 5
RegLen = 52
t0_1= timer()
Poly1 = Polynomial_Regression.Reg_Line(Signal,time,degree = degree)
t1 = timer() - t0_1
t0_2 = timer()
Loess1 = Loess_Regression.Reg_Line(Signal,time,RegLen = RegLen)
t2 = timer() - t0_2
t0_3 = timer()
Linear1 = Linear_Regression.Reg_Line(Signal,time)
t3 = timer() - t0_3
Elapsed_Time = "Elaspsed Time of; Poly: %.3f, Loess: %.3f, Linear: %.3f "%(t1,t2,t3)
plt.scatter(time,Signal)
plt.plot(Poly1,"m")
plt.plot(Loess1,"r")
plt.plot(Linear1,"y")
Legend_Polynomial = "Poly R. R2 = %.3f" % (Polynomial_Regression.R2_Score(Signal,time,degree = degree ))
Legend_Linear = "Linear R. R2 = %.3f" % (Linear_Regression.R2_Score(Signal,time))
Legend_Loess = "Loess R. R2 = %.3f" % (Loess_Regression.R2_Score(Signal,time,RegLen = RegLen))
plt.legend([Legend_Polynomial,Legend_Loess,Legend_Linear])
plt.xlabel(Elapsed_Time)
plt.ylabel("Signal")
plt.show()
|
from setuptools import setup, find_packages
setup(
name='ravop',
version='0.1-alpha',
packages=find_packages(),
install_requires=[
"numpy==1.20.1"
],
dependency_links=[
"https://github.com/ravenprotocol/ravcom.git@0.1-alpha"
]
)
|
from unittest import TestCase, mock
import aiocometd_chat_demo.__main__ as main
from aiocometd_chat_demo.chat_service import ChatService
from aiocometd_chat_demo.channels import ChannelsModel
from aiocometd_chat_demo.conversation import ConversationModel
from aiocometd_chat_demo._metadata import VERSION, AUTHOR, AUTHOR_EMAIL, URL
class TestMain(TestCase):
@mock.patch("aiocometd_chat_demo.__main__.qmlRegisterUncreatableType")
@mock.patch("aiocometd_chat_demo.__main__.qmlRegisterType")
def test_register_types(self, register_type, register_uncreatable_type):
main.register_types()
register_type.assert_has_calls([
mock.call(ConversationModel, "ChatDemo", 1, 0, "Conversation"),
mock.call(ChatService, "ChatService", 1, 0, "ChatService")
], any_order=True)
register_uncreatable_type.assert_has_calls([
mock.call(ChannelsModel, "ChannelsModel", 1, 0,
"ChannelsModel",
"ChannelsModel can't be created in QML!"),
mock.call(ConversationModel, "ConversationModel", 1, 0,
"ConversationModel",
"ConversationModel can't be created in QML!")
], any_order=True)
@mock.patch("aiocometd_chat_demo.__main__.sys")
@mock.patch("aiocometd_chat_demo.__main__.QQmlApplicationEngine")
@mock.patch("aiocometd_chat_demo.__main__.register_types")
@mock.patch("aiocometd_chat_demo.__main__.asyncio")
@mock.patch("aiocometd_chat_demo.__main__.QEventLoop")
@mock.patch("aiocometd_chat_demo.__main__.QGuiApplication")
@mock.patch("aiocometd_chat_demo.__main__.logging")
def test_main(self, logging_mod, gui_app_cls, event_loop_cls, asyncio_mod,
register_types_func, engine_cls, sys_mod):
sys_mod.argv = []
gui_app = mock.MagicMock()
gui_app_cls.return_value = gui_app
event_loop = mock.MagicMock()
event_loop_cls.return_value = event_loop
engine = engine_cls.return_value
root_context = engine.rootContext.return_value
main.main()
logging_mod.basicConfig.assert_called_with(level=logging_mod.INFO)
gui_app_cls.assert_called_with(["--style", main.QUICK_CONTROLS2_STYLE])
event_loop_cls.assert_called_with(gui_app)
asyncio_mod.set_event_loop.assert_called_with(event_loop)
register_types_func.assert_called()
engine_cls.assert_called()
root_context.setContextProperty.assert_has_calls([
mock.call("version", VERSION),
mock.call("author", AUTHOR),
mock.call("authorEmail", AUTHOR_EMAIL),
mock.call("projectUrl", URL),
], any_order=True)
engine.load.assert_called_with(main.MAIN_QML_PATH)
event_loop.__enter__.assert_called()
event_loop.__exit__.assert_called()
event_loop.run_forever.assert_called()
|
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Div, HTML, Layout, Submit
from dal import autocomplete
from django.utils.translation import ugettext, ugettext_lazy
from maps.forms.base import BaseForm
from maps.model.institute import Institute
from maps.model.type import Type
class InstituteForm(BaseForm):
class Meta:
model = Institute
fields = ('name', 'info', 'institute_location', 'institute_type')
widgets = {
'institute_location': autocomplete.ModelSelect2(
url='maps-ac:place-autocomplete',
attrs={'data-placeholder': ugettext_lazy('Type for getting available entries')})}
def __init__(self, *args, **kwargs):
super(InstituteForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', ugettext('submit').capitalize()))
instance = kwargs.get('instance')
selected_ids = [o.id for o in instance.institute_type.all()] if instance else []
nodes_html = self.get_nodes_html(
Type.objects.get(name='Institute', parent=None), selected_ids)
self.helper.layout = Layout(
Div(
HTML('<div class="form-header">' + ugettext('data').capitalize() + '</div>'),
'name',
'institute_location',
css_class='form-float'),
Div(
HTML('<div class="form-header">' + ugettext('types').capitalize() + '</div>'),
HTML(nodes_html),
HTML('<div style="clear:both;"></div>'),
'info'),
Div('institute_type', css_class='hidden'))
|
# -*- coding: utf-8 -*-
# @Time : 2021/11/8 10:55
# @Author : yanqun.jiang
# @File : utils.py
import time
def print_run_time(func):
"""
本模块作用是打印程序运行时间
:param func: 要计算程序的时间
:return:
"""
def wrapper(*args, **kwargs):
start_time = time.time()
func(*args, **kwargs)
end_time = time.time()
runtime = end_time-start_time
print('>>>>>>>%s的程序运行时间为:%s' % (func.__name__, runtime))
return wrapper
def print_df_head(func):
"""
打印dataframe的前5行
:param func:
:return:
"""
def wrapper(*args, **kwargs):
df = func(*args, **kwargs) # 这里需要返回dataframe类型
print(df.head())
return df
return wrapper
|
import pandas as pd
import numpy as np
import succolib as sl
import inspect
###############################################################################
###############################################################################
def peakedDistMode(series):
if len(series.unique()) <= 1:
centre = 0
else:
# 1st iteration: 1e4 bins, range width is 1, range center is the mean of the series entries
cat0 = pd.cut(series, bins=np.arange(series.mean()-0.5, series.mean()+0.5, 0.0001)).value_counts()
centre0 = cat0.index[0].mid
# 2nd iteration: 1e5 bins, range width is 5 times the series std, range center is the 1st iteration output
range0 = 5*series.std()
binning0 = 0.00005*series.std()
cat = pd.cut(series, bins=np.arange(centre0-range0, centre0+range0, binning0)).value_counts()
centre = cat.index[0].mid
return centre
###############################################################################
###############################################################################
def aveVar(df, listInVar, nameAveVar):
ls = [df[s] for s in listInVar]
lsAve = sum(ls) / len(ls)
df[nameAveVar] = lsAve
print("%s added to df -- (mean, std) = (%f, %f)" % (nameAveVar, df[nameAveVar].mean(), df[nameAveVar].std()))
return df
###############################################################################
###############################################################################
def inHitCuts(df, layerMap):
df["boolSingleHitIn"] = (df[layerMap[0]] == 1) & (df[layerMap[1]] == 1) & (df[layerMap[2]] == 1) & (df[layerMap[3]] == 1)
# note: single-hit selection is not based on nHitIn but rather on input layers individually
print("boolSingleHitIn added to df")
return df
###############################################################################
###############################################################################
def outHitCuts(df, layerMap, outMultCut):
df["boolSingleHitOut"] = (df[layerMap[0]] == 1) & (df[layerMap[1]] == 1)
# note: single-hit selection is not based on nHitOut but rather on output layers individually
print("boolSingleHitOut added to df")
print("--")
for iRun in df["iRun"].unique():
print("run %s:" % iRun)
if iRun in outMultCut:
df.loc[df["iRun"]==iRun, "boolLowHitOut"] = df["nHitOut"] <= outMultCut[iRun][0]
df.loc[df["iRun"]==iRun, "boolHighHitOut"] = df["nHitOut"] >= outMultCut[iRun][1]
print("boolLowHitOut: output multiplicity lower window @ <= %f" % (outMultCut[iRun][0]))
print("boolHighHitOut: output multiplicity upper window @ >= %f" % (outMultCut[iRun][1]))
else:
df.loc[df["iRun"]==iRun, "boolLowHitOut"] = True
df.loc[df["iRun"]==iRun, "boolHighHitOut"] = True
print("no cuts defined on output multiplicity --> booleans always True")
return df
###############################################################################
###############################################################################
def trackingAngleAlign(df, trackingMap, thCentres, thName, z, bThCut, thCut = {}):
for iRun in df["iRun"].unique():
dfBool = df["iRun"] == iRun
print("run %s:" % iRun)
for i in range(2):
axis = ["x", "y"]
zU = z[iRun][trackingMap[i].replace("xRaw", "").replace("xCry"+str(i), "gonio")] # replace arguments to deal with both input/output cases
zD = z[iRun][trackingMap[i+2].replace("xRaw", "")]
xRawU = df[dfBool][trackingMap[i]].values
xRawD = df[dfBool][trackingMap[i+2]].values
# raw angles
df.loc[dfBool, thName+"Raw"+str(i)] = sl.zAngle(xRawD, zD, xRawU, zU)
print("%sRaw%d added to df" % (thName, i))
# shift value?
if thCentres[iRun][i] is None: # if no shift value is given (None), single-run angle distribution mode is used (function in .modules)
centre = peakedDistMode(df[dfBool][thName+"Raw"+str(i)])
print("trying to align %s layers (%s & %s) with %sRaw%d mode: %.10f" % (axis[i], trackingMap[i], trackingMap[i+2], thName, i, centre))
else:
centre = thCentres[iRun][i]
print("aligning %s layers (%s & %s) with the value given in the settings: %.10f" % (axis[i], trackingMap[i], trackingMap[i+2], centre))
# shift downstream modules
df.loc[dfBool, trackingMap[i].replace("Raw", "")] = xRawU # actually upstream module is not shifted -- copied with new name (trivially in itself) in input (output) analysis
df.loc[dfBool, trackingMap[i+2].replace("Raw", "")] = xRawD - centre*(zD - zU)
# shift raw angles
df.loc[dfBool, thName+str(i)] = df[dfBool][thName+"Raw"+str(i)].values - centre
# input angle selection (if required)
# name of the variable is "bool" + thName.replace("th", "") + "Aligned"
if bThCut:
if iRun in thCut:
if len(thCut[iRun]) == 4: # rectangular cut
xCutL = thCut[iRun][0]
xCutR = thCut[iRun][1]
yCutL = thCut[iRun][2]
yCutR = thCut[iRun][3]
cutX = (df[dfBool][thName+"0"].values > xCutL) & (df[dfBool][thName+"0"].values < xCutR)
cutY = (df[dfBool][thName+"1"].values > yCutL) & (df[dfBool][thName+"1"].values < yCutR)
df.loc[dfBool, "bool%sAligned" % thName.replace("th", "")] = cutX & cutY
print("bool%sAligned: rectangle centered in 0 with hor. (ver.) side %f (%f) (edges excluded)" % (thName.replace("th", ""), abs(xCutR-xCutL), abs(yCutR-yCutL)))
elif len(thCut[iRun]) == 2: # elliptical cut (different x & y axes)
xCut = thCut[iRun][0]
yCut = thCut[iRun][1]
df.loc[dfBool, "bool%sAligned" % thName.replace("th", "")] = (df[dfBool][thName+"0"].values / xCut)**2 + (df[dfBool][thName+"1"].values / yCut)**2 < 1
print("bool%sAligned: ellipse centered in 0 with hor. (ver.) half-axis %f (%f) (edge excluded)" % (thName.replace("th", ""), xCut, yCut))
elif len(thCut[iRun]) == 1: # circular cut -- radius as only parameter
df.loc[dfBool, "bool%sAligned" % thName.replace("th", "")] = (df[dfBool][thName+"0"].values / thCut[iRun])**2 + (df[dfBool][thName+"1"].values / thCut[iRun])**2 < 1
print("bool%sAligned: circle centered in 0 with radius %f (edge excluded)" % (thName.replace("th", ""), thCut[iRun]))
else:
df.loc[dfBool, "bool%sAligned" % thName.replace("th", "")] = True
print("no cut defined for %s angle (cut list size mismatch) --> bool%sAligned always True" % (thName, thName.replace("th", "")))
else:
df.loc[dfBool, "bool%sAligned" % thName.replace("th", "")] = True
print("no cut defined for %s angle (run not in th%sCut) --> bool%sAligned always True" % (thName, thName.replace("th", ""), thName.replace("th", "")))
return df
###############################################################################
###############################################################################
def inputTrackingProj(df, inTrackingMap, z, xCryCut):
for iRun in df["iRun"].unique():
dfBool = df["iRun"] == iRun
for i in range(2):
zU = z[iRun][inTrackingMap[i]]
zD = z[iRun][inTrackingMap[i+2]]
xU = df[dfBool]["x" + inTrackingMap[i]]
xD = df[dfBool]["x" + inTrackingMap[i+2]]
# input beam @ crystal
df.loc[dfBool, "xCry%d"%i] = sl.zProj(xD, zD, xU, zU, z[iRun]["gonio"])
# input beam @ forward calorimeter
df.loc[dfBool, "xCaloFwd%d"%i] = sl.zProj(xD, zD, xU, zU, z[iRun]["caloFwd"])
# crystal fiducial selection
if iRun in xCryCut:
xCut = [xCryCut[iRun][0], xCryCut[iRun][1]]
yCut = [xCryCut[iRun][2], xCryCut[iRun][3]]
df.loc[dfBool, "boolInCry0"] = (df["xCry0"]>xCut[0]) & (df["xCry0"]<xCut[1])
df.loc[dfBool, "boolInCry1"] = (df["xCry1"]>yCut[0]) & (df["xCry1"]<yCut[1])
df.loc[dfBool, "boolInCry"] = df["boolInCry0"] & df["boolInCry1"]
print("run %s: boolInCry(0/1): (%f < x < %f) & (%f < y < %f)" % (iRun, xCut[0], xCut[1], yCut[0], yCut[1]))
else:
df.loc[dfBool, "boolInCry0"] = True
df.loc[dfBool, "boolInCry1"] = True
df.loc[dfBool, "boolInCry"] = True
print("run %s: no cut defined on crystal fiducial area --> boolInCry(0/1) always True" % iRun)
print("--")
# print input beam info -- once for the whole dataset
for i in range(2):
axis = ["x", "y"]
print("%s axis" % axis[i])
print("final input angle %s: (mean, std) = (%f, %f)" % (df["thIn%d"%i].name, df["thIn%d"%i].mean(), df["thIn%d"%i].std()))
print("final beam projections: x%s, x%s, xCry%d, xCaloFwd%d" % (inTrackingMap[i], inTrackingMap[i+2], i, i))
return df
###############################################################################
###############################################################################
def trackingAngleDelta(df):
for i in range(2):
df["thDelta%d" % i] = df["thOut%d" % i] - df["thIn%d" % i]
return df
###############################################################################
###############################################################################
def outputTrackingPrint(df, outTrackingMap):
for i in range(2):
axis = ["x", "y"]
print("%s axis" % axis[i])
print("final output angle %s: (mean, std) = (%f, %f)" % (df["thOut%d"%i].name, df["thOut%d"%i].mean(), df["thOut%d"%i].std()))
print("final beam projections: xCry%d, x%s" % (i, outTrackingMap[i]))
print("output-input angle delta %s: (mean, std) = (%f, %f)" % (df["thDelta%d"%i].name, df["thDelta%d"%i].mean(), df["thDelta%d"%i].std()))
###############################################################################
###############################################################################
def gonioPair(df, gonioMap):
for iDof in [s.replace("xGonioRaw", "") for s in df.columns if "xGonioRaw" in s]:
if iDof in gonioMap: # if selected DOF is listed in gonioMap...
if gonioMap[iDof][0] in df.columns: # if variable coupled to selected DOF exists in df...
if gonioMap[iDof][1]: # if variable shifting (via mean) is selected...
df["xGonio%s" % iDof] = df["xGonioRaw%s" % iDof] + (df[gonioMap[iDof][0]] - df[gonioMap[iDof][0]].mean()) * gonioMap[iDof][2]
print("xGonioRaw%s paired to %s (shifted via its mean) with factor %E --> xGonio%s" % (iDof, gonioMap[iDof][0], gonioMap[iDof][2], iDof))
else: # if variable shifting (via mean) is not selected...
df["xGonio%s" % iDof] = df["xGonioRaw%s" % iDof] + df[gonioMap[iDof][0]] * gonioMap[iDof][2]
print("xGonioRaw%s paired to %s (as it is in df) with factor %E --> xGonio%s" % (iDof, gonioMap[iDof][0], gonioMap[iDof][2], iDof))
# recall: a xGonio variable is always created for each xGonioRaw variable -- if no shifting can be performed, simply xGonioX=xGonioRawX
else:
df["xGonio%s" % iDof] = df["xGonioRaw%s" % iDof]
print("xGonioRaw%s copied into xGonio%s with no modifications (%s not in df)" % (iDof, iDof, gonioMap[iDof][0]))
else:
df["xGonio%s" % iDof] = df["xGonioRaw%s" % iDof]
print("xGonioRaw%s copied into xGonio%s with no modifications (not in gonioMap)" % (iDof, iDof))
return df
###############################################################################
###############################################################################
def equalise(df, lsDigiCh, equalMap):
for iRun in df["iRun"].unique():
dfBool = df["iRun"] == iRun
print("run %s:" % iRun)
if iRun in equalMap:
for iCh in lsDigiCh:
if iCh in equalMap[iRun]:
func = equalMap[iRun][iCh][0]
args = [df["digiPHRaw" + iCh]] + equalMap[iRun][iCh][1]
funcSrc = inspect.getsource(func)
funcStr = (funcSrc.partition("lambda ")[1]+funcSrc.partition("lambda ")[2]).partition(", 'end'")[0]
print("digiPHRaw%s --> digiPH%s via %s" % (iCh, iCh, funcStr))
df.loc[dfBool, "digiPH" + iCh] = func(*args)
else:
print("digiPH%s = digiPHRaw%s, i.e. not equalised (not in equalMap)" % (iCh, iCh))
df.loc[dfBool, "digiPH" + iCh] = df["digiPHRaw" + iCh]
else:
print("digiPH* = digiPHRaw* (all var. in lsDigiCh), i.e. not equalised (run not in equalMap)")
for iCh in lsDigiCh:
df.loc[dfBool, "digiPH" + iCh] = df["digiPHRaw" + iCh]
return df
###############################################################################
###############################################################################
def defineDigiBooleans(df, lsDigiCh, digiPHCut, digiTimeCut, bDigiTime):
for iRun in df["iRun"].unique():
dfBool = df["iRun"] == iRun
print("run %s:" % iRun)
lsPHCuts = []
lsTimeCuts = []
# PH interval boolean
if iRun in digiPHCut:
for iCh in lsDigiCh:
if iCh in digiPHCut[iRun]:
chMin = digiPHCut[iRun][iCh][0]
chMax = digiPHCut[iRun][iCh][1]
df.loc[dfBool, "boolDigiPH" + iCh] = (df["digiPH" + iCh] > chMin) & (df["digiPH" + iCh] < chMax)
lsPHCuts.append(iCh)
else:
df.loc[dfBool, "boolDigiPH" + iCh] = True
print("cuts added to df: boolDigiPH + %s" % str(lsPHCuts))
else:
print("run not in digiPHCut --> boolDigiPH... always True for all the channels")
for iCh in lsDigiCh:
df.loc[dfBool, "boolDigiPH" + iCh] = True
# time interval boolean
if iRun in digiTimeCut:
for iCh in lsDigiCh:
if bDigiTime[iCh]:
if iCh in digiTimeCut[iRun]:
chMin = digiTimeCut[iRun][iCh][0]
chMax = digiTimeCut[iRun][iCh][1]
df.loc[dfBool, "boolDigiTime" + iCh] = (df["digiTime" + iCh] > chMin) & (df["digiTime" + iCh] < chMax)
lsTimeCuts.append(iCh)
else:
df.loc[dfBool, "boolDigiTime" + iCh] = True
print("cuts added to df: boolDigiTime + %s" % str(lsTimeCuts))
else:
print("run not in digiTimeCut --> boolDigiTime... always True for all the channels whose time is available")
for iCh in lsDigiCh:
if bDigiTime[iCh]:
df.loc[dfBool, "boolDigiTime" + iCh] = True
return df
###############################################################################
###############################################################################
def caloSum(df, bPHCalo0, lsDigiChCalo, caloName, bOverwrite=True):
bPHCalo = {}
for iRun in df["iRun"].unique():
dfBool = df["iRun"] == iRun
print("run %s:" % iRun)
if not bPHCalo0:
print("PHCalo%s not already in df --> can be created" % caloName)
strChange = "added to"
else:
if bOverwrite:
print("PHCalo%s already in df with (mean, std) = (%f, %f) --> can be overwritten" % (caloName, df[dfBool]["PHCalo"+caloName].mean(), df[dfBool]["PHCalo"+caloName].std()))
strChange = "overwritten in"
else:
print("PHCalo%s already in df with (mean, std) = (%f, %f) --> keeping it" % (caloName, df[dfBool]["PHCalo"+caloName].mean(), df[dfBool]["PHCalo"+caloName].std()))
# PHCalo... (according to caloName) only created if
# - not already in df
# - a priori existing in df, but overwriting is required (True by default)
if (not bPHCalo0) | (bPHCalo0 & bOverwrite):
if iRun in lsDigiChCalo:
if len(lsDigiChCalo[iRun])>0:
df.loc[dfBool, "PHCalo"+caloName] = sum([df["digiPH" + s] for s in lsDigiChCalo[iRun]])
bPHCalo.update({iRun: True})
print("PHCalo%s %s df" % (caloName, strChange))
else:
bPHCalo.update({iRun: False})
print("PHCalo%s not %s df (list of calo. channels empty for this run)" % (caloName, strChange))
if bPHCalo0:
print("(despite raw PHCalo%s not being removed from df)" % caloName)
else:
bPHCalo.update({iRun: False})
print("PHCalo%s not %s df (run not in list of calo. channels)" % (caloName, strChange))
if bPHCalo0:
print("(despite raw PHCalo%s not being removed from df)" % caloName)
else:
bPHCalo.update({iRun: True})
# note: if PHCalo... already in df + bOverwrite=True + computation not doable for some reason
# --> original raw df variable not removed from df, but bPHCalo=False (run by run)
return df, bPHCalo
###############################################################################
###############################################################################
def caloTimeBool(df, bPHCalo, lsDigiChCalo, bDigiTime, caloName):
for iRun in df["iRun"].unique():
dfBool = df["iRun"] == iRun
print("run %s:" % iRun)
if bPHCalo[iRun]:
if iRun in lsDigiChCalo:
if len(lsDigiChCalo[iRun]) > 0:
lsTimeAvail = [s for s in lsDigiChCalo[iRun] if bDigiTime[s]]
print("%d time entries found in df for Calo%s channels --> boolTimeCalo%s added to df (OR between channels)" % (len(lsTimeAvail), caloName, caloName))
df.loc[dfBool, "boolTimeCalo"+caloName] = False
for iCh in lsTimeAvail:
df.loc[dfBool, "boolTimeCalo"+caloName] = df["boolTimeCalo"+caloName] | df["boolDigiTime"+iCh]
else:
print("requested PHCalo%s not calculated from single channels --> boolTimeCalo%s always True" % (caloName, caloName))
df.loc[dfBool, "boolTimeCalo"+caloName] = True
else:
print("requested PHCalo%s not calculated from single channels --> boolTimeCalo%s always True" % (caloName, caloName))
df.loc[dfBool, "boolTimeCalo"+caloName] = True
else:
print("requested PHCalo%s not available --> boolTimeCalo%s always True" % (caloName, caloName))
df.loc[dfBool, "boolTimeCalo"+caloName] = True
return df
###############################################################################
###############################################################################
def calibrate(df, bE0, calibMap, caloName, bOverwrite=True):
bE = {}
for iRun in df["iRun"].unique():
dfBool = df["iRun"] == iRun
print("run %s:" % iRun)
if not bE0:
print("E%s not already in df --> can be created" % caloName)
strChange = "added to"
else:
if bOverwrite:
print("E%s already in df with (mean, std) = (%f, %f) --> can be overwritten" % (caloName, df[dfBool]["E"+caloName].mean(), df[dfBool]["E"+caloName].std()))
strChange = "overwritten in"
else:
print("E%s already in df with (mean, std) = (%f, %f) --> keeping it" % (caloName, df[dfBool]["E"+caloName].mean(), df[dfBool]["E"+caloName].std()))
# E... (according to caloName) only created if
# - not already in df or a priori existing in df, but overwriting is required (True by default)
# - calibration function is defined (for each run)
# - total detector PH is available in df
if (not bE0) | (bE0 & bOverwrite):
if iRun in calibMap:
if "PHCalo"+caloName in df.columns:
func = calibMap[iRun][0]
args = [df["PHCalo" + caloName]] + calibMap[iRun][1]
funcSrc = inspect.getsource(func)
funcStr = (funcSrc.partition("lambda ")[1]+funcSrc.partition("lambda ")[2]).partition(", 'end'")[0]
df.loc[dfBool, "E" + caloName] = func(*args)
bE.update({iRun: True})
print("E%s %s df -- obtained via %s" % (caloName, strChange, funcStr))
else:
print("E%s not %s df (run not in list of calo. channels)" % (caloName, strChange))
else:
bE.update({iRun: False})
print("E%s not %s df (calib. function not defined for this run)" % (caloName, strChange))
if bE0:
print("(despite raw E%s not being removed from df)" % caloName)
else:
bE.update({iRun: True})
# note: if E... already in df + bOverwrite=True + computation not doable for some reason
# --> original raw df variable not removed from df, but bE=False (run by run)
return df, bE |
from expungeservice.models.disposition import DispositionStatus
from tests.factories.crawler_factory import CrawlerFactory
from tests.fixtures.case_details import CaseDetails
from tests.fixtures.john_doe import JohnDoe
from expungeservice.models.record import Record
from expungeservice.util import DateWithFuture as date_class
def test_search_function():
record = CrawlerFactory.create(
JohnDoe.RECORD,
{
"X0001": CaseDetails.CASE_X1,
"X0002": CaseDetails.CASE_WITHOUT_FINANCIAL_SECTION,
"X0003": CaseDetails.CASE_WITHOUT_DISPOS,
},
)
# sorting by date results in the order X0003, X0002, X0001
assert record.__class__ == Record
assert len(record.cases) == 3
assert len(record.cases[2].charges) == 3
assert len(record.cases[1].charges) == 1
assert len(record.cases[0].charges) == 3
assert record.cases[2].charges[0].disposition.ruling == "Convicted - Failure to Appear"
assert record.cases[2].charges[0].disposition.date == date_class(2017, 6, 12)
assert record.cases[2].charges[1].disposition.ruling == "Dismissed"
assert record.cases[2].charges[1].disposition.date == date_class(2017, 6, 12)
assert record.cases[2].charges[2].disposition.ruling == "Dismissed"
assert record.cases[2].charges[2].disposition.date == date_class(2017, 6, 12)
assert record.cases[1].charges[0].disposition.ruling == "Dismissed"
assert record.cases[1].charges[0].disposition.date == date_class(1992, 4, 30)
assert record.cases[0].charges[0].disposition.status == DispositionStatus.UNKNOWN
assert record.cases[0].charges[0].disposition.status == DispositionStatus.UNKNOWN
assert record.cases[0].charges[1].disposition.status == DispositionStatus.UNKNOWN
assert record.cases[0].charges[1].disposition.status == DispositionStatus.UNKNOWN
assert record.cases[0].charges[2].disposition.status == DispositionStatus.UNKNOWN
assert record.cases[0].charges[2].disposition.status == DispositionStatus.UNKNOWN
def test_a_blank_search_response():
record = CrawlerFactory.create(JohnDoe.BLANK_RECORD, {})
assert len(record.cases) == 0
def test_single_charge_conviction():
record = CrawlerFactory.create(JohnDoe.SINGLE_CASE_RECORD, {"CASEJD1": CaseDetails.CASEJD1})
assert len(record.cases) == 1
assert len(record.cases[0].charges) == 1
assert record.cases[0].charges[0].name == "Loading Zone"
assert record.cases[0].charges[0].statute == "29"
assert record.cases[0].charges[0].level == "Violation Unclassified"
assert record.cases[0].charges[0].date == date_class(2008, 9, 4)
assert record.cases[0].charges[0].disposition.ruling == "Convicted"
assert record.cases[0].charges[0].disposition.date == date_class(2008, 11, 18)
def test_nonzero_balance_due_on_case():
record = CrawlerFactory.create(
JohnDoe.RECORD,
{
"X0001": CaseDetails.CASE_X1,
"X0002": CaseDetails.CASE_WITHOUT_FINANCIAL_SECTION,
"X0003": CaseDetails.CASE_WITHOUT_DISPOS,
},
)
assert record.cases[2].summary.get_balance_due() == 1516.80
def test_zero_balance_due_on_case():
record = CrawlerFactory.create(JohnDoe.SINGLE_CASE_RECORD, {"CASEJD1": CaseDetails.CASEJD1})
assert record.cases[0].summary.get_balance_due() == 0
|
from tsdb.dictdb import *
from tsdb.tsdb_client import TSDBClient
from tsdb.tsdb_server import *
from tsdb.tsdb_serialization import *
from tsdb.tsdb_ops import *
from tsdb.tsdb_rest_client import TSDB_REST_Client |
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
# Adapted from https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/evaluator/detection/coco.py
# which has the following license...
# https://github.com/MIC-DKFZ/nnDetection/blob/main/LICENSE
#
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
# Adapted from https://github.com/cocodataset/cocoapi
# which has the following license...
# https://github.com/cocodataset/cocoapi/blob/master/license.txt
# Copyright (c) 2014, Piotr Dollar and Tsung-Yi Lin
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
"""
This script is almost same with https://github.com/MIC-DKFZ/nnDetection/blob/main/nndet/evaluator/detection/coco.py
The changes include 1) code reformatting, 2) docstrings.
"""
import logging as logger
import time
from typing import Dict, List, Sequence, Tuple, Union
import numpy as np
class COCOMetric:
def __init__(
self,
classes: Sequence[str],
iou_list: Sequence[float] = (0.1, 0.5, 0.75),
iou_range: Sequence[float] = (0.1, 0.5, 0.05),
max_detection: Sequence[int] = (1, 5, 100),
per_class: bool = True,
verbose: bool = True,
):
"""
Class to compute COCO metrics
Metrics computed includes,
- mAP over the IoU range specified by `iou_range` at last value of `max_detection`
- AP values at IoU thresholds specified by `iou_list` at last value of `max_detection`
- AR over max detections thresholds defined by `max_detection` (over iou range)
Args:
classes (Sequence[str]): name of each class (index needs to correspond to predicted class indices!)
iou_list (Sequence[float]): specific thresholds where ap is evaluated and saved
iou_range (Sequence[float]): (start, stop, step) for mAP iou thresholds
max_detection (Sequence[int]): maximum number of detections per image
verbose (bool): log time needed for evaluation
Example:
.. code-block:: python
from monai.data.box_utils import box_iou
from monai.apps.detection.metrics.coco import COCOMetric
from monai.apps.detection.metrics.matching import matching_batch
# 3D example outputs of one image from detector
val_outputs_all = [
{"boxes": torch.tensor([[1,1,1,3,4,5]],dtype=torch.float16),
"labels": torch.randint(3,(1,)),
"scores": torch.randn((1,)).absolute()},
]
val_targets_all = [
{"boxes": torch.tensor([[1,1,1,2,6,4]],dtype=torch.float16),
"labels": torch.randint(3,(1,))},
]
coco_metric = COCOMetric(
classes=['c0','c1','c2'], iou_list=[0.1], max_detection=[10]
)
results_metric = matching_batch(
iou_fn=box_iou,
iou_thresholds=coco_metric.iou_thresholds,
pred_boxes=[val_data_i["boxes"].numpy() for val_data_i in val_outputs_all],
pred_classes=[val_data_i["labels"].numpy() for val_data_i in val_outputs_all],
pred_scores=[val_data_i["scores"].numpy() for val_data_i in val_outputs_all],
gt_boxes=[val_data_i["boxes"].numpy() for val_data_i in val_targets_all],
gt_classes=[val_data_i["labels"].numpy() for val_data_i in val_targets_all],
)
val_metric_dict = coco_metric(results_metric)
print(val_metric_dict)
"""
self.verbose = verbose
self.classes = classes
self.per_class = per_class
iou_list_np = np.array(iou_list)
_iou_range = np.linspace(
iou_range[0], iou_range[1], int(np.round((iou_range[1] - iou_range[0]) / iou_range[2])) + 1, endpoint=True
)
self.iou_thresholds = np.union1d(iou_list_np, _iou_range)
self.iou_range = iou_range
# get indices of iou values of ious range and ious list for later evaluation
self.iou_list_idx = np.nonzero(iou_list_np[:, np.newaxis] == self.iou_thresholds[np.newaxis])[1]
self.iou_range_idx = np.nonzero(_iou_range[:, np.newaxis] == self.iou_thresholds[np.newaxis])[1]
if (
not (self.iou_thresholds[self.iou_list_idx] == iou_list_np).all()
or not (self.iou_thresholds[self.iou_range_idx] == _iou_range).all()
):
raise ValueError(
"Require self.iou_thresholds[self.iou_list_idx] == iou_list_np and "
"self.iou_thresholds[self.iou_range_idx] == _iou_range."
)
self.recall_thresholds = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True)
self.max_detections = max_detection
def __call__(self, *args, **kwargs) -> Tuple[Dict[str, float], Union[Dict[str, np.ndarray], None]]:
"""
Compute metric. See :func:`compute` for more information.
Args:
*args: positional arguments passed to :func:`compute`
**kwargs: keyword arguments passed to :func:`compute`
Returns:
Dict[str, float]: dictionary with scalar values for evaluation
Dict[str, np.ndarray]: dictionary with arrays, e.g. for visualization of graphs
"""
return self.compute(*args, **kwargs)
def check_number_of_iou(self, *args) -> None:
"""
Check if shape of input in first dimension is consistent with expected IoU values
(assumes IoU dimension is the first dimension)
Args:
args: array like inputs with shape function
"""
num_ious = len(self.get_iou_thresholds())
for arg in args:
if arg.shape[0] != num_ious:
raise ValueError(
f"Require arg.shape[0] == len(self.get_iou_thresholds()). Got arg.shape[0]={arg.shape[0]}, "
f"self.get_iou_thresholds()={self.get_iou_thresholds()}."
)
def get_iou_thresholds(self) -> Sequence[float]:
"""
Return IoU thresholds needed for this metric in an numpy array
Returns:
Sequence[float]: IoU thresholds [M], M is the number of thresholds
"""
return list(self.iou_thresholds)
def compute(self, results_list: List[Dict[int, Dict[str, np.ndarray]]]) -> Tuple[Dict[str, float], None]:
"""
Compute COCO metrics
Args:
results_list (List[Dict[int, Dict[str, np.ndarray]]]): list with results per image (in list)
per category (dict). Inner Dict contains multiple results obtained by :func:`box_matching_batch`.
- `dtMatches`: matched detections [T, D], where T = number of
thresholds, D = number of detections
- `gtMatches`: matched ground truth boxes [T, G], where T = number
of thresholds, G = number of ground truth
- `dtScores`: prediction scores [D] detection scores
- `gtIgnore`: ground truth boxes which should be ignored
[G] indicate whether ground truth should be ignored
- `dtIgnore`: detections which should be ignored [T, D],
indicate which detections should be ignored
Returns:
Dict[str, float], dictionary with coco metrics
"""
if self.verbose:
logger.info("Start COCO metric computation...")
tic = time.time()
dataset_statistics = self._compute_statistics(results_list=results_list) # Dict[str, Union[np.ndarray, List]]
if self.verbose:
toc = time.time()
logger.info(f"Statistics for COCO metrics finished (t={(toc - tic):0.2f}s).")
results = {}
results.update(self._compute_ap(dataset_statistics))
results.update(self._compute_ar(dataset_statistics))
if self.verbose:
toc = time.time()
logger.info(f"COCO metrics computed in t={(toc - tic):0.2f}s.")
return results, None
def _compute_ap(self, dataset_statistics: Dict[str, Union[np.ndarray, List]]) -> Dict[str, float]:
"""
Compute AP metrics
Args:
dataset_statistics (List[Dict[int, Dict[str, np.ndarray]]]): list with result s per image (in list)
per category (dict). Inner Dict contains multiple results obtained by :func:`box_matching_batch`.
- `dtMatches`: matched detections [T, D], where T = number of
thresholds, D = number of detections
- `gtMatches`: matched ground truth boxes [T, G], where T = number
of thresholds, G = number of ground truth
- `dtScores`: prediction scores [D] detection scores
- `gtIgnore`: ground truth boxes which should be ignored
[G] indicate whether ground truth should be ignored
- `dtIgnore`: detections which should be ignored [T, D],
indicate which detections should be ignored
"""
results = {}
if self.iou_range: # mAP
key = (
f"mAP_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_"
f"MaxDet_{self.max_detections[-1]}"
)
results[key] = self._select_ap(dataset_statistics, iou_idx=self.iou_range_idx, max_det_idx=-1)
if self.per_class:
for cls_idx, cls_str in enumerate(self.classes): # per class results
key = (
f"{cls_str}_"
f"mAP_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_"
f"MaxDet_{self.max_detections[-1]}"
)
results[key] = self._select_ap(
dataset_statistics, iou_idx=self.iou_range_idx, cls_idx=cls_idx, max_det_idx=-1
)
for idx in self.iou_list_idx: # AP@IoU
key = f"AP_IoU_{self.iou_thresholds[idx]:.2f}_MaxDet_{self.max_detections[-1]}"
results[key] = self._select_ap(dataset_statistics, iou_idx=[idx], max_det_idx=-1)
if self.per_class:
for cls_idx, cls_str in enumerate(self.classes): # per class results
key = f"{cls_str}_" f"AP_IoU_{self.iou_thresholds[idx]:.2f}_" f"MaxDet_{self.max_detections[-1]}"
results[key] = self._select_ap(dataset_statistics, iou_idx=[idx], cls_idx=cls_idx, max_det_idx=-1)
return results
def _compute_ar(self, dataset_statistics: Dict[str, Union[np.ndarray, List]]) -> Dict[str, float]:
"""
Compute AR metrics
Args:
dataset_statistics (List[Dict[int, Dict[str, np.ndarray]]]): list with result s per image (in list)
per category (dict). Inner Dict contains multiple results obtained by :func:`box_matching_batch`.
- `dtMatches`: matched detections [T, D], where T = number of
thresholds, D = number of detections
- `gtMatches`: matched ground truth boxes [T, G], where T = number
of thresholds, G = number of ground truth
- `dtScores`: prediction scores [D] detection scores
- `gtIgnore`: ground truth boxes which should be ignored
[G] indicate whether ground truth should be ignored
- `dtIgnore`: detections which should be ignored [T, D],
indicate which detections should be ignored
"""
results = {}
for max_det_idx, max_det in enumerate(self.max_detections): # mAR
key = f"mAR_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_MaxDet_{max_det}"
results[key] = self._select_ar(dataset_statistics, max_det_idx=max_det_idx)
if self.per_class:
for cls_idx, cls_str in enumerate(self.classes): # per class results
key = (
f"{cls_str}_"
f"mAR_IoU_{self.iou_range[0]:.2f}_{self.iou_range[1]:.2f}_{self.iou_range[2]:.2f}_"
f"MaxDet_{max_det}"
)
results[key] = self._select_ar(dataset_statistics, cls_idx=cls_idx, max_det_idx=max_det_idx)
for idx in self.iou_list_idx: # AR@IoU
key = f"AR_IoU_{self.iou_thresholds[idx]:.2f}_MaxDet_{self.max_detections[-1]}"
results[key] = self._select_ar(dataset_statistics, iou_idx=idx, max_det_idx=-1)
if self.per_class:
for cls_idx, cls_str in enumerate(self.classes): # per class results
key = f"{cls_str}_" f"AR_IoU_{self.iou_thresholds[idx]:.2f}_" f"MaxDet_{self.max_detections[-1]}"
results[key] = self._select_ar(dataset_statistics, iou_idx=idx, cls_idx=cls_idx, max_det_idx=-1)
return results
@staticmethod
def _select_ap(
dataset_statistics: dict,
iou_idx: Union[int, List[int], np.ndarray, None] = None,
cls_idx: Union[int, Sequence[int], None] = None,
max_det_idx: int = -1,
) -> float:
"""
Compute average precision
Args:
dataset_statistics (dict): computed statistics over dataset
- `counts`: Number of thresholds, Number recall thresholds, Number of classes, Number of max
detection thresholds
- `recall`: Computed recall values [num_iou_th, num_classes, num_max_detections]
- `precision`: Precision values at specified recall thresholds
[num_iou_th, num_recall_th, num_classes, num_max_detections]
- `scores`: Scores corresponding to specified recall thresholds
[num_iou_th, num_recall_th, num_classes, num_max_detections]
iou_idx: index of IoU values to select for evaluation(if None, all values are used)
cls_idx: class indices to select, if None all classes will be selected
max_det_idx (int): index to select max detection threshold from data
Returns:
np.ndarray: AP value
"""
prec = dataset_statistics["precision"]
if iou_idx is not None:
prec = prec[iou_idx]
if cls_idx is not None:
prec = prec[..., cls_idx, :]
prec = prec[..., max_det_idx]
return float(np.mean(prec))
@staticmethod
def _select_ar(
dataset_statistics: dict,
iou_idx: Union[int, Sequence[int], None] = None,
cls_idx: Union[int, Sequence[int], None] = None,
max_det_idx: int = -1,
) -> float:
"""
Compute average recall
Args:
dataset_statistics (dict): computed statistics over dataset
- `counts`: Number of thresholds, Number recall thresholds, Number of classes, Number of max
detection thresholds
- `recall`: Computed recall values [num_iou_th, num_classes, num_max_detections]
- `precision`: Precision values at specified recall thresholds
[num_iou_th, num_recall_th, num_classes, num_max_detections]
- `scores`: Scores corresponding to specified recall thresholds
[num_iou_th, num_recall_th, num_classes, num_max_detections]
iou_idx: index of IoU values to select for evaluation(if None, all values are used)
cls_idx: class indices to select, if None all classes will be selected
max_det_idx (int): index to select max detection threshold from data
Returns:
np.ndarray: recall value
"""
rec = dataset_statistics["recall"]
if iou_idx is not None:
rec = rec[iou_idx]
if cls_idx is not None:
rec = rec[..., cls_idx, :]
rec = rec[..., max_det_idx]
if len(rec[rec > -1]) == 0:
return -1.0
return float(np.mean(rec[rec > -1]))
def _compute_statistics(
self, results_list: List[Dict[int, Dict[str, np.ndarray]]]
) -> Dict[str, Union[np.ndarray, List]]:
"""
Compute statistics needed for COCO metrics (mAP, AP of individual classes, mAP@IoU_Thresholds, AR)
Adapted from https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py
Args:
results_list (List[Dict[int, Dict[str, np.ndarray]]]): list with result s per image (in list)
per cateory (dict). Inner Dict contains multiple results obtained by :func:`box_matching_batch`.
- `dtMatches`: matched detections [T, D], where T = number of
thresholds, D = number of detections
- `gtMatches`: matched ground truth boxes [T, G], where T = number
of thresholds, G = number of ground truth
- `dtScores`: prediction scores [D] detection scores
- `gtIgnore`: ground truth boxes which should be ignored
[G] indicate whether ground truth should be ignored
- `dtIgnore`: detections which should be ignored [T, D],
indicate which detections should be ignored
Returns:
dict: computed statistics over dataset
- `counts`: Number of thresholds, Number recall thresholds, Number of classes, Number of max
detection thresholds
- `recall`: Computed recall values [num_iou_th, num_classes, num_max_detections]
- `precision`: Precision values at specified recall thresholds
[num_iou_th, num_recall_th, num_classes, num_max_detections]
- `scores`: Scores corresponding to specified recall thresholds
[num_iou_th, num_recall_th, num_classes, num_max_detections]
"""
num_iou_th = len(self.iou_thresholds)
num_recall_th = len(self.recall_thresholds)
num_classes = len(self.classes)
num_max_detections = len(self.max_detections)
# -1 for the precision of absent categories
precision = -np.ones((num_iou_th, num_recall_th, num_classes, num_max_detections))
recall = -np.ones((num_iou_th, num_classes, num_max_detections))
scores = -np.ones((num_iou_th, num_recall_th, num_classes, num_max_detections))
for cls_idx, cls_i in enumerate(self.classes): # for each class
for max_det_idx, max_det in enumerate(self.max_detections): # for each maximum number of detections
results = [r[cls_idx] for r in results_list if cls_idx in r] # len is num_images
if len(results) == 0:
logger.warning(f"WARNING, no results found for coco metric for class {cls_i}")
continue
dt_scores = np.concatenate([r["dtScores"][0:max_det] for r in results])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dt_scores, kind="mergesort")
dt_scores_sorted = dt_scores[inds]
# r['dtMatches'] [T, R], where R = sum(all detections)
dt_matches = np.concatenate([r["dtMatches"][:, 0:max_det] for r in results], axis=1)[:, inds]
dt_ignores = np.concatenate([r["dtIgnore"][:, 0:max_det] for r in results], axis=1)[:, inds]
self.check_number_of_iou(dt_matches, dt_ignores)
gt_ignore = np.concatenate([r["gtIgnore"] for r in results])
num_gt = np.count_nonzero(gt_ignore == 0) # number of ground truth boxes (non ignored)
if num_gt == 0:
logger.warning(f"WARNING, no gt found for coco metric for class {cls_i}")
continue
# ignore cases need to be handled differently for tp and fp
tps = np.logical_and(dt_matches, np.logical_not(dt_ignores))
fps = np.logical_and(np.logical_not(dt_matches), np.logical_not(dt_ignores))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float32)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float32)
for th_ind, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): # for each threshold th_ind
tp, fp = np.array(tp), np.array(fp)
r, p, s = _compute_stats_single_threshold(tp, fp, dt_scores_sorted, self.recall_thresholds, num_gt)
recall[th_ind, cls_idx, max_det_idx] = r
precision[th_ind, :, cls_idx, max_det_idx] = p
# corresponding score thresholds for recall steps
scores[th_ind, :, cls_idx, max_det_idx] = s
return {
"counts": [num_iou_th, num_recall_th, num_classes, num_max_detections], # [4]
"recall": recall, # [num_iou_th, num_classes, num_max_detections]
"precision": precision, # [num_iou_th, num_recall_th, num_classes, num_max_detections]
"scores": scores, # [num_iou_th, num_recall_th, num_classes, num_max_detections]
}
def _compute_stats_single_threshold(
tp: np.ndarray,
fp: np.ndarray,
dt_scores_sorted: np.ndarray,
recall_thresholds: Union[np.ndarray, Sequence[float]],
num_gt: int,
) -> Tuple[float, np.ndarray, np.ndarray]:
"""
Compute recall value, precision curve and scores thresholds
Adapted from https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py
Args:
tp (np.ndarray): cumsum over true positives [R], R is the number of detections
fp (np.ndarray): cumsum over false positives [R], R is the number of detections
dt_scores_sorted (np.ndarray): sorted (descending) scores [R], R is the number of detections
recall_thresholds (Sequence[float]): recall thresholds which should be evaluated
num_gt (int): number of ground truth bounding boxes (excluding boxes which are ignored)
Returns:
- float, overall recall for given IoU value
- np.ndarray, precision values at defined recall values
[RTH], where RTH is the number of recall thresholds
- np.ndarray, prediction scores corresponding to recall values
[RTH], where RTH is the number of recall thresholds
"""
num_recall_th = len(recall_thresholds)
rc = tp / num_gt
# np.spacing(1) is the smallest representable epsilon with float
pr = tp / (fp + tp + np.spacing(1))
if len(tp):
recall = rc[-1]
else:
# no prediction
recall = 0
# array where precision values nearest to given recall th are saved
precision = np.zeros((num_recall_th,))
# save scores for corresponding recall value in here
th_scores = np.zeros((num_recall_th,))
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist()
precision = precision.tolist()
# smooth precision curve (create box shape)
for i in range(len(tp) - 1, 0, -1):
if pr[i] > pr[i - 1]:
pr[i - 1] = pr[i]
# get indices to nearest given recall threshold (nn interpolation!)
inds = np.searchsorted(rc, recall_thresholds, side="left")
try:
for save_idx, array_index in enumerate(inds):
precision[save_idx] = pr[array_index]
th_scores[save_idx] = dt_scores_sorted[array_index]
except BaseException:
pass
return recall, np.array(precision), np.array(th_scores)
|
import os
import omlet.utils as U
from pytorch_lightning.callbacks import Callback
class SourceCodeBackup(Callback):
master_only = True
def __init__(self,
exp_dir,
source_dir,
include_pattern=('*.py', '*.sh'),
subdir='code'):
"""
Args:
tag_var_map: maps a tensorboard tag to the tracked variable
"""
self.record_dir = os.path.join(os.path.expanduser(exp_dir), subdir)
self.source_dir = self._get_code_path(source_dir)
assert os.path.exists(self.source_dir), \
'source code dir "{}" does not exist'.format(self.source_dir)
self.include_pattern = include_pattern
def _get_code_path(self, path):
"handles both abspath and relative path"
path = os.path.expanduser(path)
if os.path.isabs(path):
return path
else:
# relative to the current file
current_script = eval('__file__')
path = os.path.join(os.path.dirname(current_script), path)
return os.path.abspath(path)
def on_init_start(self, trainer):
U.f_mkdir(self.record_dir)
U.f_copytree(
self.source_dir, self.record_dir,
include=self.include_pattern, exist_ok=True
)
print(
'Backed up source code {} to {}', self.source_dir, self.record_dir
)
|
import json
import shutil
import tarfile
import tempfile
from multiprocessing import Pool, cpu_count
from pathlib import Path
from typing import List, Union
import click
import pandas as pd
import rouge
import torch
from models import Model
from reader import ReviewTest, OptimusTest
from tokenizer import Tokenizer
from util import avg, overlap, oracle, load_tokenizer, load_data, build_model, BAD_WORDS, powerset
def brute_force_gen(model: Model,
data: Union[ReviewTest, OptimusTest],
tgt_tokenizer: Tokenizer,
num_beams: int = 4,
bad_words_ids: List[int] = None,
split: int = 1, ):
outs = []
for i, x in enumerate(data):
z_raw = model(**x).q.loc
idxes = powerset(z_raw.size(0))
zs = torch.stack([z_raw[idx].mean(dim=0) for idx in idxes])
gens = []
for z in torch.split(zs, len(idxes) // split):
g = model.generate(z, num_beams=num_beams, bad_words_ids=bad_words_ids)
gens.extend(tgt_tokenizer.decode(g))
outs.append([{"selected": [x["reviews"][i] for i in idx],
"reviews": x["reviews"],
"summary": x["summary"],
"predicted": gen,
"idx": idx} for idx, gen in zip(idxes, gens)])
return outs
@click.command()
@click.argument("log_dir_or_file", type=click.Path(exists=True))
@click.option("--split", type=click.INT, default=1)
def main(log_dir_or_file, split):
log_dir_or_file = Path(log_dir_or_file)
tempdir = None
if not log_dir_or_file.is_dir():
# Extract archive
tempdir = tempfile.mkdtemp()
with tarfile.open(log_dir_or_file, "r:gz") as archive:
archive.extractall(tempdir)
log_dir = Path(tempdir)
else:
log_dir = Path(log_dir_or_file)
config = json.load(open(log_dir / "config.json"))
src_tokenizer, tgt_tokenizer = load_tokenizer(config)
_, dev, test = load_data(config, src_tokenizer, tgt_tokenizer)
bad_words_ids = tgt_tokenizer.get_ids(BAD_WORDS)
if config["model"]["type"].lower() == "bimeanvae" and config["data_dir"].endswith("amzn"):
# The amzn dataset often includes the pronoun I without prefix. To avoid the issue, this tweak is applied.
bad_words_ids.extend(tgt_tokenizer.get_ids("I", no_prefix=True))
model = build_model(config).eval()
model.load_state_dict(torch.load(log_dir / "pytorch_model.bin", map_location=lambda storage, loc: storage))
if torch.cuda.is_available():
model.cuda()
dev_gen = brute_force_gen(model, dev, tgt_tokenizer, bad_words_ids=bad_words_ids, split=split)
test_gen = brute_force_gen(model, test, tgt_tokenizer, bad_words_ids=bad_words_ids, split=split)
coop = {}
evaluator = rouge.Rouge(metrics=["rouge-n", "rouge-l"], max_n=2, limit_length=False, apply_avg=True)
with Pool(cpu_count()) as p:
for func in (avg, overlap, oracle):
name = func.__name__
coop[name] = {}
for key, val in (("dev", dev_gen), ("test", test_gen)):
coop_score = p.map(func, val)
index = list(range(len(val[0])))
index = [max(index, key=lambda x: s[x]) for s in coop_score]
selected = [v[i] for i, v in zip(index, val)]
rouge_score = evaluator.get_scores(
[x["predicted"] for x in selected], [x["summary"] for x in selected])
rouge_score = {"_".join((metric, k)): v for metric, vs in rouge_score.items() for k, v in
vs.items()}
coop[name][key] = {
"coop_score": coop_score,
"index": index,
"rouge": rouge_score}
df = pd.DataFrame({k: coop[name][k]["rouge"] for k in ("dev", "test")})
df.sort_index(inplace=True)
print(name)
print(df)
# Clean-up
if tempdir is not None:
shutil.rmtree(tempdir, ignore_errors=True)
else:
json.dump(coop, open(log_dir / "coop.json", "w"))
if __name__ == '__main__':
main()
|
from __future__ import annotations
from typing import TYPE_CHECKING
import random
from itertools import product
from py_rete.bind_node import BindNode
from py_rete.filter_node import FilterNode
from py_rete.ncc_node import NccPartnerNode
from py_rete.ncc_node import NccNode
from py_rete.negative_node import NegativeNode
from py_rete.join_node import JoinNode
from py_rete.pnode import PNode
from py_rete.common import WME
from py_rete.common import V
from py_rete.common import Match
from py_rete.fact import Fact
from py_rete.alpha import AlphaMemory
from py_rete.beta import ReteNode
from py_rete.beta import BetaMemory
from py_rete.conditions import Cond
from py_rete.conditions import Ncc
from py_rete.conditions import Neg
from py_rete.conditions import Filter
from py_rete.conditions import Bind
from py_rete.production import Production
if TYPE_CHECKING: # pragma: no cover
from typing import Optional
from typing import Generator
from typing import Dict
from typing import Tuple
from typing import List
from typing import Set
from typing import Union
from typing import Hashable
class ReteNetwork:
"""
A Rete Network to store all the facts and productions to compute matches.
"""
def __init__(self):
self.alpha_hash: Dict[
Tuple[Hashable, Hashable, Hashable], AlphaMemory] = {}
self.beta_root = ReteNode()
self.buf = None
self.pnodes: List[PNode] = []
self.working_memory: Set[WME] = set()
self.facts: Dict[str, Fact] = {}
self.fact_counter: int = 0
self.production_counter: int = 0
self.productions: Set[Production] = set()
def run(self, n: int = 10) -> None:
"""
First n rules, chosen at random. After each rule is fired the facts are
updated and new matches computed.
"""
while n > 0:
matches = list(self.matches)
if len(matches) <= 0:
break
match = random.choice(matches)
match.fire()
n -= 1
def __repr__(self):
output = 'Productions:\n'
for p in self.productions:
output += "{}: {}\n".format(p.id, p)
output += "\nFacts:\n"
for fid in self.facts:
copy = self.facts[fid].duplicate()
for k in copy:
if isinstance(copy[k], Fact):
copy[k] = copy[k].id
output += "{}: {}\n".format(fid, copy)
output += "\nWMEs:\n"
for wme in self.working_memory:
output += "{}\n".format(wme)
return output
def add_fact(self, fact: Fact) -> None:
"""
Adds a fact to the network.
"""
if fact.id is not None:
raise ValueError("Fact already has an id, cannot add")
copy = fact.duplicate()
for k in copy:
if isinstance(copy[k], Fact):
if copy[k].id is None:
self.add_fact(copy[k])
copy[k] = copy[k].id
fact.id = "f-{}".format(self.fact_counter)
copy.id = fact.id
self.fact_counter += 1
self.facts[fact.id] = fact
for wme in copy.wmes:
self.add_wme(wme)
def remove_fact(self, fact: Fact) -> None:
"""
Removes a fact from the network.
"""
if fact.id is None or fact.id not in self.facts:
raise ValueError("Fact has no id or does not exist in network.")
if fact.id in self.facts:
del self.facts[fact.id]
self.remove_wme_by_fact_id(fact.id)
fact.id = None
def get_fact_by_id(self, fact_id: str) -> Fact:
return self.facts[fact_id]
def update_fact(self, fact: Fact) -> None:
# TODO: Figure out a fancy way to only update part of the fact
self.remove_fact(fact)
self.add_fact(fact)
def remove_wme_by_fact_id(self, identifier: str) -> None:
to_remove = [wme for wme in self.working_memory if wme.identifier ==
identifier]
for wme in to_remove:
self.remove_wme(wme)
def get_new_match(self) -> Optional[Match]:
for pnode in self.pnodes:
if pnode.new:
t = pnode.pop_new_token()
return Match(pnode, t)
return None
@property
def new_matches(self) -> Generator[Match, None, None]:
for pnode in self.pnodes:
for t in pnode.new:
yield Match(pnode, t)
@property
def matches(self) -> Generator[Match, None, None]:
for pnode in self.pnodes:
for t in pnode.activations:
yield Match(pnode, t)
@property
def wmes(self) -> Set[WME]:
return self.working_memory
def add_production(self, prod: Production) -> None:
"""
Adds a production to the ReteNetwork.
"""
if prod.id is not None:
raise ValueError("Production already has an id, cannot add")
prod.id = "p-{}".format(self.production_counter)
prod._rete_net = self
self.production_counter += 1
self.productions.add(prod)
for conds in prod.get_rete_conds():
current_node = self.build_or_share_network_for_conditions(
self.beta_root, conds, [])
p_node = self.build_or_share_p(current_node, prod)
self.pnodes.append(p_node)
prod.p_nodes.append(p_node)
def remove_production(self, prod: Production) -> None:
"""
Removes a pnode from the network
"""
if prod.id is None:
raise ValueError("Production has no id, cannot remove.")
# Remove production
self.productions.remove(prod)
for pnode in prod.p_nodes:
self.delete_node_and_any_unused_ancestors(pnode)
self.pnodes.remove(pnode)
prod.id = None
prod.p_nodes = []
def add_wme(self, wme: WME) -> None:
if wme in self.working_memory:
return
keys = product([wme.identifier, '*'],
[wme.attribute, '*'],
[wme.value, '*'])
for key in keys:
if key in self.alpha_hash:
self.alpha_hash[key].activation(wme)
self.working_memory.add(wme)
def remove_wme(self, wme: WME) -> None:
for stored_wme in self.working_memory:
if wme == stored_wme:
wme = stored_wme
for am in wme.amems:
am.items.remove(wme)
if not am.items:
for node in am.successors:
if (isinstance(node, JoinNode) and
not isinstance(node, NegativeNode)):
node.parent.children.remove(node)
for t in wme.tokens:
t.delete_token_and_descendents()
for jr in wme.negative_join_results:
jr.owner.join_results.remove(jr)
if not jr.owner.join_results:
if jr.owner.node and jr.owner.node.children is not None:
for child in jr.owner.node.children:
child.left_activation(jr.owner, None, jr.owner.binding)
self.working_memory.remove(wme)
def build_or_share_alpha_memory(self, condition):
"""
:type condition: Condition
:rtype: AlphaMemory
"""
id_test = '*'
attr_test = '*'
value_test = '*'
if not isinstance(condition.identifier, V):
id_test = condition.identifier
if not isinstance(condition.attribute, V):
attr_test = condition.attribute
if not isinstance(condition.value, V):
value_test = condition.value
key = (id_test, attr_test, value_test)
if key in self.alpha_hash:
return self.alpha_hash[key]
self.alpha_hash[key] = AlphaMemory()
self.alpha_hash[key].key = key
for w in self.working_memory:
if condition.test(w):
self.alpha_hash[key].activation(w)
return self.alpha_hash[key]
def build_or_share_join_node(self, parent: BetaMemory, amem: AlphaMemory,
condition: Cond) -> JoinNode:
for child in parent.all_children:
if (type(child) == JoinNode and child.amem == amem and
child.condition == condition):
return child
node = JoinNode(children=[], parent=parent, amem=amem,
condition=condition)
parent.children.append(node)
parent.all_children.append(node)
amem.successors.append(node)
amem.reference_count += 1
node.update_nearest_ancestor_with_same_amem()
if not parent.items:
amem.successors.remove(node)
elif not amem.items:
parent.children.remove(node)
return node
def build_or_share_negative_node(self, parent: JoinNode, amem: AlphaMemory,
condition: Neg) -> NegativeNode:
for child in parent.children:
if (isinstance(child, NegativeNode) and child.amem == amem and
child.condition == condition):
return child
node = NegativeNode(parent=parent, amem=amem, condition=condition)
parent.children.append(node)
amem.successors.append(node)
amem.reference_count += 1
node.update_nearest_ancestor_with_same_amem()
self.update_new_node_with_matches_from_above(node)
if not node.items:
amem.successors.remove(node)
return node
def build_or_share_beta_memory(self, parent: ReteNode) -> BetaMemory:
for child in parent.children:
# if isinstance(child, BetaMemory): # Don't include subclasses
if type(child) == BetaMemory:
return child
node = BetaMemory(parent=parent)
parent.children.append(node)
self.update_new_node_with_matches_from_above(node)
return node
def build_or_share_p(self, parent: ReteNode, prod: Production) -> PNode:
for child in parent.children:
if isinstance(child, PNode):
return child
node = PNode(production=prod, parent=parent)
parent.children.append(node)
self.update_new_node_with_matches_from_above(node)
return node
def build_or_share_ncc_nodes(self, parent: JoinNode, ncc: Ncc,
earlier_conds: List[Cond]
) -> NccNode:
bottom_of_subnetwork = self.build_or_share_network_for_conditions(
parent, ncc, earlier_conds)
for child in parent.children:
if (isinstance(child, NccNode) and child.partner.parent ==
bottom_of_subnetwork):
return child
ncc_partner = NccPartnerNode(parent=bottom_of_subnetwork)
ncc_node = NccNode(partner=ncc_partner, children=[], parent=parent)
ncc_partner.ncc_node = ncc_node
parent.children.insert(0, ncc_node)
bottom_of_subnetwork.children.append(ncc_partner)
ncc_partner.number_of_conditions = ncc.number_of_conditions
self.update_new_node_with_matches_from_above(ncc_node)
self.update_new_node_with_matches_from_above(ncc_partner)
return ncc_node
def build_or_share_filter_node(self, parent: ReteNode,
f: Filter) -> FilterNode:
for child in parent.children:
if isinstance(child, FilterNode) and child.func == f.func:
return child
node = FilterNode([], parent, f.func, self)
parent.children.append(node)
return node
def build_or_share_bind_node(self, parent: ReteNode, b: Bind) -> BindNode:
for child in parent.children:
if (isinstance(child, BindNode) and child.func == b.func and
child.bind == b.to):
return child
node = BindNode([], parent, b.func, b.to, self)
parent.children.append(node)
return node
def build_or_share_network_for_conditions(self, parent: ReteNode,
rule: Union[Ncc, List[Cond]],
earlier_conds: List[Cond]
) -> ReteNode:
current_node = parent
conds_higher_up = earlier_conds
for cond in rule:
if isinstance(cond, Cond) and not isinstance(cond, Neg):
current_node = self.build_or_share_beta_memory(current_node)
am = self.build_or_share_alpha_memory(cond)
current_node = self.build_or_share_join_node(current_node, am,
cond)
elif isinstance(cond, Neg):
am = self.build_or_share_alpha_memory(cond)
current_node = self.build_or_share_negative_node(current_node,
am, cond)
elif isinstance(cond, Ncc):
current_node = self.build_or_share_ncc_nodes(current_node,
cond,
conds_higher_up)
elif isinstance(cond, Filter):
current_node = self.build_or_share_filter_node(current_node,
cond)
elif isinstance(cond, Bind):
current_node = self.build_or_share_bind_node(current_node,
cond)
conds_higher_up.append(cond)
return current_node
def update_new_node_with_matches_from_above(self, new_node: ReteNode
) -> None:
parent = new_node.parent
if parent == self.beta_root:
new_node.left_activation(None, None, {})
elif (isinstance(parent, BetaMemory) and
not isinstance(parent, (NccNode, NegativeNode))):
for tok in parent.items:
new_node.left_activation(token=tok)
elif (isinstance(parent, JoinNode) and
not isinstance(parent, NegativeNode)):
saved_list_of_children = parent.children
parent.children = [new_node]
for item in parent.amem.items:
parent.right_activation(item)
parent.children = saved_list_of_children
elif isinstance(parent, NegativeNode):
for token in parent.items:
if not token.join_results:
new_node.left_activation(token, None, token.binding)
elif isinstance(parent, NccNode):
for token in parent.items:
if not token.ncc_results:
new_node.left_activation(token, None, token.binding)
elif isinstance(parent, (BindNode, FilterNode)):
saved_list_of_children = parent.children
parent.children = [new_node]
self.update_new_node_with_matches_from_above(parent)
parent.children = saved_list_of_children
def delete_alpha_memory(self, amem: AlphaMemory):
del self.alpha_hash[amem.key]
def delete_node_and_any_unused_ancestors(self, node: ReteNode):
if isinstance(node, NccNode):
self.delete_node_and_any_unused_ancestors(node.partner)
if isinstance(node, BetaMemory):
for item in node.items:
item.delete_token_and_descendents()
if isinstance(node, NccPartnerNode):
for item in node.new_result_buffer:
item.delete_token_and_descendents()
if isinstance(node, JoinNode) and not isinstance(node, NegativeNode):
if not node.right_unlinked:
node.amem.successors.remove(node)
node.amem.reference_count -= 1
if node.amem.reference_count == 0:
self.delete_alpha_memory(node.amem)
if not node.left_unlinked:
node.parent.children.remove(node)
node.parent.all_children.remove(node)
if not node.parent.all_children:
self.delete_node_and_any_unused_ancestors(node.parent)
elif node.parent:
node.parent.children.remove(node)
if not node.parent.children:
self.delete_node_and_any_unused_ancestors(node.parent)
|
import time
from enum import IntEnum
from typing import Tuple, Any
import pygame as pg
from pygame.surface import Surface
from bomber_monkey.features.board.board import Board
from bomber_monkey.features.board.board_display_system import BoardDisplaySystem
from bomber_monkey.features.board.board_system import BoardSystem
from bomber_monkey.features.bomb.bomb_explosion_system import BombExplosionSystem, ExplosionPropagationSystem
from bomber_monkey.features.bomb.bomb_sound_system import BombSoundSystem
from bomber_monkey.features.destruction.destruction_system import DestructionSystem
from bomber_monkey.features.destruction.protection_system import ProtectionSystem
from bomber_monkey.features.display.image_display_system import ImageDisplaySystem
from bomber_monkey.features.display.score_display_system import PlayerScoreDisplaySystem
from bomber_monkey.features.display.sprite_display_system import SpriteDisplaySystem, SpriteSetDisplaySystem
from bomber_monkey.features.display.startup_count_down_display_system import StartupCountDownDisplaySystem
from bomber_monkey.features.display.title_bar_display_system import TitleBarDisplaySystem
from bomber_monkey.features.items.banana import BananaSystem
from bomber_monkey.features.items.immunity import ImmunityItemSystem
from bomber_monkey.features.items.reverse_control import ReserveControlItemSystem
from bomber_monkey.features.items.speed_down import SpeedDownItemSystem
from bomber_monkey.features.items.speed_up import SpeedUpItemSystem
from bomber_monkey.features.items.stronger import StrongerItemSystem
from bomber_monkey.features.lifetime.lifetime_system import LifetimeSystem
from bomber_monkey.features.physics.collision import Collision
from bomber_monkey.features.physics.collision_physic import PlayerCollisionWithDTPhysic
from bomber_monkey.features.physics.physic_system import PhysicSystem
from bomber_monkey.features.controller.controller_system import ControllerSystem
from bomber_monkey.features.player.crunch import CrunchSystem, NoCrunchSystem
from bomber_monkey.features.player.player import Player
from bomber_monkey.features.player.player_animator import PlayerAnimatorSystem
from bomber_monkey.features.player.players_config import PlayersConfig
from bomber_monkey.features.tile.tile_killer_system import TileKillerSystem
from bomber_monkey.game_config import GameConfig
from bomber_monkey.game_factory import GameFactory
from bomber_monkey.game_scores import GameScores, GameRoundResult
from bomber_monkey.states.app_state import AppState, AppTransitions
from python_ecs.ecs import Simulator
class GameState(AppState):
def __init__(self,
conf: GameConfig,
scores: GameScores,
screen: Surface,
player_config: PlayersConfig
):
super().__init__()
self.conf = conf
self.transition = None
self.scores = scores if scores is not None else GameScores(player_config.nb_players)
self._sim = Simulator(context=self)
self.sim.reset()
self._board = GameFactory.create_board(self.sim)
self.clock = pg.time.Clock()
self.start_time = time.time()
self.pause_start_time = -1
self.paused_time = 0
for slot, input_mapping in player_config.slot_and_input_mapping:
GameFactory.create_player(self.sim, slot, input_mapping)
systems = [
ControllerSystem(),
BoardSystem(),
PhysicSystem(PlayerCollisionWithDTPhysic()),
BombExplosionSystem(),
ExplosionPropagationSystem(),
TileKillerSystem(lambda body: GameFactory.create_item(self.sim, body)),
DestructionSystem(),
ProtectionSystem(),
BananaSystem(),
ImmunityItemSystem(self.conf),
SpeedUpItemSystem(self.conf),
SpeedDownItemSystem(self.conf),
ReserveControlItemSystem(),
StrongerItemSystem(),
CrunchSystem(self.conf),
NoCrunchSystem(),
PlayerAnimatorSystem(),
LifetimeSystem()
]
display_systems = [
BoardDisplaySystem(self.conf, screen),
TitleBarDisplaySystem(self.conf, screen),
PlayerScoreDisplaySystem(screen),
ImageDisplaySystem(self.conf, screen),
SpriteDisplaySystem(self.conf, screen, 0),
SpriteSetDisplaySystem(self.conf, screen, 0),
SpriteDisplaySystem(self.conf, screen, 1),
SpriteSetDisplaySystem(self.conf, screen, 1),
StartupCountDownDisplaySystem(screen),
BombSoundSystem(),
]
# init simulation (ECS)
self.sim.reset_systems([
*systems,
*display_systems,
])
self.sim.start_hooks.append(lambda sim: sim.clear_components(Collision))
@property
def sim(self):
return self._sim
@property
def board(self) -> Board:
return self._board
@property
def game_elapsed_time(self):
return time.time() - self.start_time - self.paused_time
def pause_game(self):
self.pause_start_time = time.time()
self.transition = (AppTransitions.PAUSE_MENU, self)
def run(self) -> Tuple[IntEnum, Any]:
if self.pause_start_time > 0:
# we are getting out of a pause
self.paused_time += time.time() - self.pause_start_time
self.pause_start_time = -1
self.sim.update()
pg.display.flip()
self.clock.tick(self.conf.MAX_FPS)
if len(self.board.players) == 0:
return AppTransitions.ROUND_END, GameRoundResult(self.scores, None)
if len(self.board.players) == 1:
winner: Player = self.board.players[0].get(Player)
self.scores.scores[winner.player_id] += 1
if self.scores.scores[winner.player_id] == self.conf.winning_score:
return AppTransitions.GAME_END, GameRoundResult(self.scores, winner.player_id)
return AppTransitions.ROUND_END, GameRoundResult(self.scores, winner.player_id)
transition = self.transition
self.transition = None
return transition
|
import matplotlib.pyplot as plt
from transform_images import unscale
def view_dataset(images):
# plot emojis in order
lines = 12
f, axarr = plt.subplots(lines, lines, sharex=True, sharey=True, figsize=(12, 12))
for i in range(lines ** 2):
a = axarr[i % lines, i // lines]
img = images[i]
a.axis("off")
a.imshow(img)
plt.subplots_adjust(wspace=0, hspace=0)
def view_samples(epoch, samples, nrows, ncols, figsize=(5, 5)):
fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols,
sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.axis('off')
img = unscale(img)
im = ax.imshow(img, aspect='equal')
plt.subplots_adjust(wspace=0, hspace=0)
return fig, axes
def view_epoch_samples(samples, figsize=(5, 5)):
epochs = len(samples)
ncols = 12
nrows = epochs // ncols
fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols,
sharey=True, sharex=True)
print(len(samples))
for ax, s in zip(axes.flatten(), samples):
ax.axis('off')
img = s[3]
img = unscale(img)
im = ax.imshow(img, aspect='equal')
plt.subplots_adjust(wspace=0, hspace=0)
return fig, axes
def view_losses(losses):
plt.subplots()
plt.plot(losses.T[0], label='Discriminator', alpha=0.5)
plt.plot(losses.T[1], label='Generator', alpha=0.5)
plt.title("Training Losses")
plt.legend()
|
import os, sys
from matplotlib import pyplot as plot
import numpy as np
import matplotlib
from analysis_config import *
numFigs = 0
numTrials = 0
numTrialsInj = 0
colors = ['g','b', 'y', 'm', 'c', 'r']
TYPES = ["Arith-FP", "Pointer", "Arith-Fix", "Ctrl-Loop", "Ctrl-Branch"]
TYPES_LONG = ["Floating-Point", "Pointer", "Fixed-Point", "Control-Loop", "Control-Branch"]
typeIdx= {"Arith-FP":0, "Arithmetic":0, "Control": 6, "Pointer":1, "Arith-Fix":2, "Control-Loop":3, "Control-Branch":4}
nClassifications = len(TYPES)
def initVis(c):
"""Perform database operations to cache some values needed in later
analysis.
Parameters
----------
c : object
sqlite3 database handle that is open to a valid filled database
"""
global numTrialsInj, numTrials
c.execute("SELECT * FROM trials WHERE trials.numInj > 0")
numTrialsInj = 1. * len(c.fetchall())
c.execute("SELECT * FROM trials")
numTrials = 1. * len(c.fetchall())
if numTrialsInj == 0:
print "No injections found to visualize..."
sys.exit(1)
print "Visualizing ", numTrials, " fault injection trials ("\
, numTrialsInj,") with faults"
# wrapper around creating a pie chart
def barchart(bins, values, xlabel, ylabel, title, name=None, ticks=None):
"""Wapper around the matplotlib bar function. Useful when we need bars
from multiple trends.
Parameters
----------
bins : np.array
x values of where the bars should reside 1-D array
values : np.array
heights of the bars 2-D array [# trends][ len(bins)]
xlabel : str
label to display on the x axis
ylabel : str
label to display on the y axis
title : str
title of the figure
name : list str
trend names of length len(values) # trends
ticks : list str
x axis ticks to display of length len(bins)
See Also
----------
histo()
"""
global numFigs
plot.figure(numFigs)
width = 1./len(values)
if name == None:
name = ["" for i in xrange(len(values))]
for set in range(0, len(values)):
plot.bar(bins+width*set, values[set][:], width, label=name[set], color=colors[set])
plot.xlabel(xlabel)
plot.ylabel(ylabel)
plot.legend(fancybox = True, shadow=True)
if ticks != None:
plot.xticks(bins+.5, ticks, rotation=60)
plot.title(title)
plot.tight_layout()
numFigs += 1
def piechart(percents, labels, title):
"""Wrapper around the matplotlib function pie
Parameters
----------
percents : array like
values to plot as percentages
labels : list of str
trend names corresponding to the percentages in percents
title : str
title of the pie chart
"""
global numFigs
plot.figure(numFigs)
patches, texts, autotexts = plot.pie(percents, labels=labels, autopct='%1.1f%%', colors=colors)
for i in range(0, len(autotexts)):
autotexts[i].set_color('w')
autotexts[i].set_weight('bold')
#autotexts[i].set_fontsize(16)
plot.title(title)
ax = plot.axis()
v = (ax[0] - (ax[1] - ax[0])/4., ax[1] + (ax[1] - ax[0])/4., ax[2], ax[3])
plot.axis(v)
plot.tight_layout()
numFigs += 1
def histo(values, bins, xlabel, ylabel, title, ticks=None, label=None, c=None):
"""Wrapper around the matplotlib function bar. Useful for a single trend
Parameters
----------
values : array like
heights of the bars 1-D
bins : array like
x values of where the bars should reside 1-D array
xlabel : str
label to display on the x axis
ylabel : str
label to display on the y axis
title : str
title of the figure
ticks : list str
x axis ticks to display of length len(bins)
label : list str
trend name of the data being graphed
c : char
color of the trend
See Also
---------
barchart
"""
global numFigs
fig = plot.figure(numFigs)
width = .8
if c != None and label != None:
plot.bar(bins, values, width, align ='center', label=label, color=c)
else:
plot.bar(bins, values, width, align = 'center')
plot.xlabel(xlabel)
plot.ylabel(ylabel)
if ticks != None:
plot.xticks(bins, ticks, rotation=60)
#plot.xticks(bins+.5, ticks, rotation=60)
plot.legend(loc='upper left',fancybox = True, shadow=True)
plot.title(title)
plot.tight_layout()
numFigs += 1
def visClassifications(c, moreDetail=None):
"""Graphs of what types of faults were injected. Classification based
on FlipIt classification.
Parameters
----------
c : object
sqlite3 database handle that is open to a valid filled database
moreDetail : list of str
function names to generate extra analysis of injections inside them
Notes
----------
More detail currently not implimented.
"""
typeBuckets = np.zeros(nClassifications + 1)
bits = np.zeros((nClassifications, 64))
c.execute("SELECT site, bit FROM injections")
injs = c.fetchall()
if len(injs) == 0:
print "Error in visClassifications: No Injections\n"
return
maximum = max(injs)[0] +1
locs = np.zeros((nClassifications, maximum))
c.execute("SELECT type FROM sites INNER JOIN injections ON sites.site = injections.site")
types = c.fetchall()
for i in range(len(injs)):
type = types[i][0]
site = injs[i][0]
bit = injs[i][1]
#print type
#print typeIdx
if type in typeIdx:
idx = typeIdx[type]
if idx == 6:
print "Warning: mapping type (", type,\
") to type ( Control-Branch )"
idx = 4
typeBuckets[idx] += 1
locs[idx][site] += 1
bits[idx][bit] += 1
else:
print "VIZ: not classified = ", i
typeBuckets[nClassifications] += 1
fracs = typeBuckets/np.sum(typeBuckets)
piechart(fracs[0:-1], TYPES_LONG, "Classification of Injections Based on Type")
barchart(np.linspace(0,64,num=64), bits, "Bit Location", "Frequency", "Injected bit", TYPES)
plot.xlim((0,64))
def visFunctions(c, moreDetail=None):
"""Graphs percentages of what functions faults were injection into
Parameters
----------
c : object
sqlite3 database handle that is open to a valid filled database
moreDetail : list of str
function names to generate extra analysis of injections inside them
"""
global numFigs
c.execute("SELECT DISTINCT function FROM sites INNER JOIN injections ON injections.site = sites.site")
funcs = c.fetchall()
values = []
for i in funcs:
c.execute("SELECT COUNT(trial) FROM injections INNER JOIN sites ON sites.site = injections.site AND sites.function = ?", i)
values.append(1. * c.fetchone()[0])
piechart(np.array(values)/sum(values), [i[0] for i in funcs], "Injected Functions")
ind = 0
width = .5
fig = plot.figure(numFigs)
ax = plot.subplot(111)
for i in funcs:
i = i[0]
c.execute("SELECT type FROM sites INNER JOIN injections ON sites.site = injections.site AND sites.function = ?", (i,))
types = c.fetchall()
tot = float(len(types))
per = np.zeros(nClassifications)
per = [ 0 for i in xrange(nClassifications)]
for t in types:
#per[typeIdx[t[0]]] += 1.
idx = typeIdx[t[0]]
if idx == 6:
print "Warning: mapping type ( Control ) to type "\
"( Control-Branch )"
idx = 4
per[idx] += 1
per = np.array(per)/tot * 100
btm = 0
legend = []
for t in xrange(0,nClassifications):
p1 = ax.bar(ind, per[t], width, align='center', color=colors[t], bottom=btm)
btm += per[t]
legend.append(p1)
ind += 1
ax.set_xticks(np.arange(len(funcs)))
ax.set_xticklabels([f[0] for f in funcs], rotation=60, ha='center')
numFigs += 1
ax.set_ylim((0,100))
ax.set_ylabel("Percent")
# shrink graph to add legend and title at top
plot.tight_layout()
box = ax.get_position()
ax.set_position([box.x0, box.y0,
box.width, box.height * 0.8])
ax.legend(legend, TYPES, loc='upper center', bbox_to_anchor=(0.5, 1.15),
fancybox=True, shadow=True, ncol=5)
plot.setp(plot.gca().get_legend().get_texts(), fontsize='x-small')
plot.text(0.5, 1.15, "Breakdown of Injection Type per Function",
horizontalalignment='center', fontsize=14, transform = ax.transAxes)
# more detail for a function creates an html file with the source
# code colored based on injection percentatge
if moreDetail != None:
visInjectionsInCode(c, moreDetail)
def visInjectionsInCode(c, functions):
"""Creates an html file with the source code colored based on injection
percentages
Parameters
----------
c : object
sqlite3 database handle that is open to a valid filled database
functions : list of str
function names to generate extra analysis of injections inside them
"""
outfile = open("more.html", 'w')
outfile.write("<!DOCTYPE html>\n<html>\n<body>\n")
for func in functions:
# grab all injections in this function
c.execute("SELECT file, line FROM sites INNER JOIN injections ON sites.site = injections.site AND sites.function = ?", (func,))
result = c.fetchall()
if len(result) == 0:
print "Warning (visInjectionsInCode): no injections in target function -- ", func
continue
# locate the min and max source line num to shrink output file size
# we only want to show the section of code that we inject in
lines = [i[1] for i in result]
file = result[0][0]
if ".LLVM.txt" in file:
file = result[-1][0]
minimum = np.min(lines)-1
minimum = minimum if minimum >= 0 else 0
maximum = np.max(lines)+1
bins = np.arange(minimum, maximum+1)
values, bins = np.histogram(lines, bins, density=False) # <------------ check here
bins = np.arange(minimum, maximum)
values = 1.*values/np.sum(values)*100 # percents
histo(values, bins, "Source Line Number", "Percent",\
"Injections mapped to source line numbers for function: " + func)
outfile.write("<h1>" + func + "()</h1>\n<table>\n")
if minimum == 0:
outfile.write("Unable to assign " + str(values[0]) + "\% of injections to source code.\n")
minimum = np.min(np.trim_zeros(lines)) - 1
values = values[minimum:]
outfile.write("<table>\n")
if os.path.isfile(file):
srcPath = ""
if not os.path.isfile(srcPath+file):
print "Warning (visInjectionsInCode): source file not found -- ", srcPath + file
continue
print "\nRelating injections to source code in file: ", srcPath+file
FILE = open(srcPath+file, "r")
function = FILE.readlines()[minimum:maximum]
FILE.close()
for i in range(1,len(function)):
color = "bgcolor=\"" + getColor(values[i]) +"\""
outfile.write("<tr " + color +">\n<td>"+ str(minimum+i) +\
"</td>\n<td><code>" + str2html(function[i-1]) + "</code></td>\n<td>"\
+ str(values[i]) + "</td>\n</tr>\n")
outfile.write("</table>\n")
outfile.write("</body>\n</html>\n")
outfile.close()
def str2html(s):
"""Replaces '<', '>', and '&' with html equlivants
Parameters
----------
s : str
string to convert to a vaild html string to display properly
"""
return s.replace("&", "&").replace(">", ">").replace("<", "<")
def getColor(x):
"""Selects an html color based on 0 <= x <= 100
Parameters
----------
x : float
percent of injection to color visually. Higher the percent the darker
the color
Returns
----------
html color name useful for classifying
"""
if x >= 75:
return "red"
elif x >= 50:
return "orange"
elif x >= 25:
return "yellow"
elif x >= 5:
return "lime"
else:
return "white"
# graphs percentage of trials that crashed
def visCrashes(c):
"""Graph percentage of trials that crashed, and bit location and type
of the corresponding injection
Parameters
----------
c : object
sqlite3 database handle that is open to a valid filled database
Notes
----------
Only considers trials with injections when calculating percentages
"""
bits = np.zeros((nClassifications, 64))
c.execute("SELECT type FROM sites INNER JOIN injections ON sites.site = injections.site INNER JOIN trials ON trials.trial = injections.trial AND trials.crashed = 1")
crash = c.fetchall()
crashed = float(len(crash))
c.execute("SELECT site, bit FROM injections INNER JOIN trials ON injections.trial = trials.trial AND trials.crashed = 1")
sitesBits = c.fetchall()
for i in range(len(sitesBits)):
type = crash[i][0]
bit = sitesBits[i][1]
#bits[typeIdx[type]][bit] += 1
idx = typeIdx[type]
if idx == 6:
print "Warning: mapping type ( Control ) to type "\
"( Control-Branch )"
idx = 4
bits[idx][bit] += 1
piechart([(numTrialsInj - crashed)/numTrialsInj, crashed/numTrialsInj],\
["Didn't Crash", "Crashed"], "Unexpected Termination")
barchart(np.linspace(0,64,num=64), bits, "Bit Location", "Frequency", "Unexpected Termination: Injected bit", TYPES)
plot.legend(loc='upper left', fancybox = True, shadow=True)
plot.xlim((0,64))
for i in range(nClassifications):
if np.sum(bits[i][:]) > 0:
histo(bits[i][:], np.linspace(0, 64, num=64), "Bit Location", "Frequency", "Unexpected Termination", None, TYPES[i], colors[i])
plot.xlim((0,64))
#plot.legend('upper left')
# graphs percent of trials that threw at least 1 assert
def visAsserts(c):
"""Graphs percent of trials that threw at least 1 assert
Parameters
----------
c : object
sqlite3 database handle that is open to a valid filled database
Notes
----------
Only considers trials with injections when calculating percentages
"""
c.execute("SELECT DISTINCT trial from signal WHERE num == 6")
asserts = len(c.fetchall())
piechart([asserts/numTrialsInj, (numTrials - asserts)/numTrialsInj],\
["Failed Assert(s)", "Didn't Assert"], "Trials with Injections Asserting")
# graphs percent of trials that generated at a certian signal type that is reported in the output file
def visSignals(c):
"""Graphs the percent of trials that generated a certian signal type
reported in the output file
Parameters
----------
c : object
sqlite3 database handle that is open to a valid filled database
Notes
----------
If a trial generates multiple signals, e.g. each rank asserts, we regard
this as a single signal for the trial.
"""
numSigs = 0.
sigs = {}
c.execute("SELECT DISTINCT trial, num FROM signals")
#build histogram for what signals were raised
signals = c.fetchall()
for pair in signals:
s = pair[1]
numSigs += 1
if s in sigs:# and s != 11:
sigs[s] += 1
else:
sigs[s] = 1.
fracs = [(numTrials - numSigs)/numTrialsInj]
labels = ["No Signal"]
for s in sigs:
fracs.append(sigs[s]/numTrialsInj)
labels.append("Signal " + str(s))
c.execute("SELECT DISTINCT trial from signals")
unique = len(c.fetchall())
piechart(fracs, labels, str(unique) + " Trials Signaling")
def visDetections(c, moreDetail=None):
"""Graphs the percentage of trials that generate detection
Parameters
----------
c : object
sqlite3 database handle that is open to a valid filled database
moreDetail : list of str
function names to generate extra analysis of detections inside them
Notes
----------
Assumes one injection per trial.
TODO: add graphs for more detail option
"""
#if moreDetail != None:
# print "TODO: implement more detail option for 'visDetections'"
c.execute("SELECT COUNT(trial) FROM trials WHERE detection = 1")
detected = float(c.fetchone()[0])
c.execute("SELECT SUM(numInj) FROM trials")
numInj = float(c.fetchone()[0])
piechart([detected/numInj, (numInj - detected)/numInj],\
["Detected", "Didn't Detect"], "Number of Trials with Detection ("+str(detected)+")")
def visDetectedInjections(c, moreDetail=None):
"""Graphs bit locations and type of what injections were detected
Parameters
----------
c : object
sqlite3 database handle that is open to a valid filled database
moreDetail : list of str
function names to generate extra analysis of detections inside them
Notes
----------
TODO: add graphs for more detail option
TODO: visualize injection sites detected
TODO: visualize injection types detected
Allows for user custom visualzations after first 'plot.show()'
"""
#if moreDetail != None:
# print "TODO: implement more detail option for 'visDetectedInjections'"
bits = np.zeros((nClassifications,64))
c.execute("SELECT site, bit FROM injections INNER JOIN trials ON injections.trial = trials.trial AND trials.detection = 1")
injs = c.fetchall()
c.execute("SELECT type FROM sites INNER JOIN injections ON sites.site = injections.site INNER JOIN trials ON injections.trial = trials.trial AND trials.detection = 1")
types = c.fetchall()
for i in range(len(injs)):
type = types[i][0]
site = injs[i][0]
bit = injs[i][1]
#bits[typeIdx[type]][bit] += 1
idx = typeIdx[type]
if idx == 6:
print "Warning: mapping type ( Control ) to type "\
"( Control-Branch )"
idx = 4
bits[idx][bit] += 1
barchart(np.linspace(0,64,num=64), bits, "Injected bit", "Frequency", "Detected Injection Bit Location", TYPES)
plot.xlim((0,64))
def visDetectionLatency(c):
"""Visualizes the detection latency of an injection in the form of
a bar chart with the x-axis as number of instruction executed after
injection.
Parameters
----------
c : object
sqlite3 database handle that is open to a valid filled database
Notes
----------
Assumes the user modifed the latency value in the detections table. It can
be calucated by the
'LLVM_dynamic_inst_of_detection - LLVM_dynamic_inst_of_injection'.
The later can be obtained from the injection table for the trial, and the
former can be obtained at detection time though the FlipIt API call
'FLIPIT_GetDynInstCount()'.
"""
#TODO: Extend to look at latency for each detector
c.execute("SELECT latency FROM detections")
buckets = [-1, 0, 1, 2, 3, 4, 5, 10, 1e2, 1e3, 1e9, 1e13]
data = [ i[0] for i in c.fetchall()]
values, bins = np.histogram(data, buckets, normed=False)
xlabel = "# of instrumented LLVM instructions till detection"
ylabel = "Frequency"
title = "Detection Latency"
ticks = ["-1", "0", "1", "2", "3", "4", "5->", "10->", "1e2->", "1e3->", "1e9->"]
bins = np.arange(0,11)
histo(values, bins, xlabel, ylabel, title, ticks)
|
from bitcoin import *
import hashlib
import json
from time import time
import requests
from src.proof_of_work import ProofOfWork
from src.mempool import Mempool
from src.network import Network
class Blockchain:
def __init__(self):
self.chain = []
# Instantiate mempool
self.mempool = Mempool()
# Instantiate the network of nodes which are connected to his node
self.network = Network()
# Create the genesis block
self.create_block(
nonce=0,
previous_block_hash='86a4be451d0e4ae83bcd72e1eb5308b19a4b270f95c25d752927341f7632a1cc'
)
def create_block(self, nonce, previous_block_hash=None, transactions_of_block=None):
"""
Create a new block and append it to the chain.
:param nonce: <int> The nonce calculated by the Proof of Work algorithm
:param previous_block_hash: (Optional) <str> Hash of previous block
:param transactions_of_block: <list> Transactions included in the block
:return: block: <dict> Created and appended block
"""
block = {
'index': len(self.chain) + 1,
'timestamp': time(),
'nonce': nonce,
'transactions_hash': self.mempool.hash(transactions_of_block),
'previous_block_hash': previous_block_hash or self.hash(self.chain[-1]),
'transactions': transactions_of_block
}
# Reset the mempool
self.mempool.current_transactions = []
# Add the new block to the end of the chain
self.chain.append(block)
return block
@staticmethod
def hash(block):
"""
Calculate a SHA-256 hash of a block.
:param block: <dict> Block
:return: <str> Hash of the block
"""
# Order the block to avoid inconsistent hashes
block_encoded = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_encoded).hexdigest()
@property
def last_block(self):
return self.chain[-1]
def reach_consensus(self):
"""
Algorithm used to reach consensus in the network.
The current chain of the node will be replaced if
a longer valid chain exists in the network.
:return: <bool> True if chain was replaced, False if not
"""
neighbour_nodes = self.network.nodes
longest_chain_length = len(self.chain)
longer_chain = None
# Query all the nodes in the network and request their chain
for node in neighbour_nodes:
response = requests.get(f'http://{node}/chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
# Check if the chain is longer and valid
if length > longest_chain_length and self.valid_chain(chain):
longest_chain_length = length
longer_chain = chain
# Replace the chain if there is a longer and valid chain in the network
if longer_chain:
self.chain = longer_chain
return True
return False
def valid_chain(self, chain):
"""
Validate a given blockchain by checking the hash, the Proof of Work and the transactions for each block.
:param chain: <list> The blockchain
:return: <bool> True if valid, False if not
"""
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
current_block = chain[block_index]
# Validate the hash of the block
if current_block['previous_block_hash'] != self.hash(previous_block):
return False
# Validate the Proof of Work
if not ProofOfWork.valid_proof(current_block['transactions_hash'],
self.hash(previous_block),
current_block['nonce']):
return False
# Validate the transactions of the block
if current_block['transactions']:
coinbase_transactions = 0
for current_transaction in current_block['transactions']:
# Count the amount of coinbase transactions in the block
if current_transaction['sender'] == "0":
coinbase_transactions += 1
if not self.valid_transaction(current_transaction, chain, block_index):
return False
# If the block contains more than one coinbase transaction return False
if coinbase_transactions > 1:
return False
previous_block = current_block
block_index += 1
return True
def valid_transaction(self, signed_transaction, chain, block_index):
"""
Validate the transaction on the blockchain.
First the signature of the transaction is verified then it is
checked if the sender had enough funds at the time of the transaction.
:param signed_transaction: <dict> Signed transaction
:param chain: <list> The blockchain
:param block_index: <int> Index of a block
:return: <bool> True if the transaction is valid, False if not
"""
sender = signed_transaction['sender']
amount = signed_transaction['amount']
# Check if the miner rewarded himself more than 10 coins
if sender == "0":
if amount < 0 or amount > 10:
return False
else:
return True
else:
# Verify the signature of the transaction
if self.valid_signature(signed_transaction):
# The transaction is invalid if the amount is negative
if amount < 0:
return False
# Get the balance of the sender
balance = self.address_balance_at_block_index(sender, chain, block_index)
if amount <= balance:
return True
else:
return False
else:
return False
@staticmethod
def valid_signature(signed_transaction):
"""
Verify the signature of a signed transaction.
:param signed_transaction: <dict> Signed transaction contains sender, recipient, amount and signature
:return: <bool> True if the signature is valid, False if not
"""
transaction_content = {
'sender': signed_transaction['sender'],
'recipient': signed_transaction['recipient'],
'amount': signed_transaction['amount']
}
# Order the transaction to avoid inconsistent hashes
transaction_encoded = json.dumps(transaction_content, sort_keys=True).encode()
transaction_hash = hashlib.sha256(transaction_encoded).hexdigest()
signature = signed_transaction['signature']
# Get the public key of the sender which is needed for the verification
try:
public_key_sender = ecdsa_recover(transaction_hash, signature)
except:
return False
return ecdsa_verify(transaction_hash, signature, public_key_sender)
@staticmethod
def address_balance_at_block_index(address, chain, block_index):
"""
Calculate the balance of an address up to a certain block
index by adding inputs to the balance and subtracting outputs.
:param address: <str> Address
:param chain: <list> The blockchain
:param block_index: <int> The index of a block
:return: balance: <int> Balance of an address
"""
index = 1
balance = 0
while index < block_index:
current_block = chain[index]
if current_block['transactions']:
transactions = current_block['transactions']
for current_transaction in transactions:
if current_transaction['sender'] == address:
balance -= current_transaction['amount']
if current_transaction['recipient'] == address:
balance += current_transaction['amount']
index += 1
return balance
@staticmethod
def address_transactions_at_block_index(address, chain, block_index):
"""
Get the send and received transactions of an address up to a
certain block index and calculate the number of transactions
:param address: <str> Address
:param chain: <list> The blockchain
:param block_index: <int> The index of a block
:return: number_of_transactions: <int> Total number of transactions,
send_transactions: <list> List of send transactions,
received_transactions: <list> List of received transactions
"""
index = 1
number_of_transactions = 0
send_transactions = []
received_transactions = []
while index < block_index:
current_block = chain[index]
if current_block['transactions']:
transactions = current_block['transactions']
for current_transaction in transactions:
if current_transaction['sender'] == address:
send_transactions.append(current_transaction)
number_of_transactions += 1
elif current_transaction['recipient'] == address:
received_transactions.append(current_transaction)
number_of_transactions += 1
index += 1
return number_of_transactions, send_transactions, received_transactions
|
from django.contrib import admin
from rest_framework.authtoken.models import Token
from rangefilter.filter import DateRangeFilter
from room.models import Room, Scheduling
from room.forms import SchedulingForm
admin.site.site_header = 'Administração Agendador'
@admin.register(Room)
class RoomAdmin(admin.ModelAdmin):
list_display = ('name', 'capacity', 'description')
search_fields = ('name',)
@admin.register(Scheduling)
class SchedulingAdmin(admin.ModelAdmin):
form = SchedulingForm
list_filter = (('start_date', DateRangeFilter), ('end_date', DateRangeFilter))
list_display = ('start_date', 'end_date', 'room', 'username', 'email')
search_fields = ('username', 'email') |
import datetime
print(datetime.date(2000,1,2))
|
from celery import Celery
celery = Celery(__name__, autofinalize=False)
|
import json
from openpyxl import Workbook
from openpyxl.styles import Font, Border, Side, PatternFill, colors, Alignment
res_file = open('../models/lstm_crf/results_dde/score/testa.preds.txt', encoding='utf-8')
wb = Workbook()
wb.create_sheet('分类准确率,召回率,f1值')
ws = wb['Sheet']
ws1 = wb['分类准确率,召回率,f1值']
data = [[0, 0, 0, 0] for _ in range(10)]
red_fill = PatternFill("solid", fgColor="CCCCFF") # 单元格填充颜色
lines = []
while True:
line = res_file.readline()
if not line:
break
line = line.split()
lines.append(line)
word_tags = line
if len(word_tags) < 3:
continue
else:
if 'ROCK' in word_tags[1]:
data[0][0] += 1
if word_tags[1] == word_tags[2]:
data[0][1] += 1
elif word_tags[2] != 'O':
data[0][2] += 1
else:
data[0][3] += 1
if 'TECT' in word_tags[1]:
data[1][0] += 1
if word_tags[1] == word_tags[2]:
data[1][1] += 1
elif word_tags[2] != 'O':
data[1][2] += 1
else:
data[1][3] += 1
if 'ALTE' in word_tags[1]:
data[2][0] += 1
if word_tags[1] == word_tags[2]:
data[2][1] += 1
elif word_tags[2] != 'O':
data[2][2] += 1
else:
data[2][3] += 1
if 'PHYS' in word_tags[1]:
data[3][0] += 1
if word_tags[1] == word_tags[2]:
data[3][1] += 1
elif word_tags[2] != 'O':
data[3][2] += 1
else:
data[3][3] += 1
if 'CHEM' in word_tags[1]:
data[4][0] += 1
if word_tags[1] == word_tags[2]:
data[4][1] += 1
elif word_tags[2] != 'O':
data[4][2] += 1
else:
data[4][3] += 1
if 'CHRO' in word_tags[1]:
data[5][0] += 1
if word_tags[1] == word_tags[2]:
data[5][1] += 1
elif word_tags[2] != 'O':
data[5][2] += 1
else:
data[5][3] += 1
if 'MINE' in word_tags[1]:
data[6][0] += 1
if word_tags[1] == word_tags[2]:
data[6][1] += 1
elif word_tags[2] != 'O':
data[6][2] += 1
else:
data[6][3] += 1
if 'DEPO' in word_tags[1]:
data[7][0] += 1
if word_tags[1] == word_tags[2]:
data[7][1] += 1
elif word_tags[2] != 'O':
data[7][2] += 1
else:
data[7][3] += 1
if 'DEEP' in word_tags[1]:
data[8][0] += 1
if word_tags[1] == word_tags[2]:
data[8][1] += 1
elif word_tags[2] != 'O':
data[8][2] += 1
else:
data[8][3] += 1
if 'ELEM' in word_tags[1]:
data[9][0] += 1
if word_tags[1] == word_tags[2]:
data[9][1] += 1
elif word_tags[2] != 'O':
data[9][2] += 1
else:
data[9][3] += 1
row = 1
col = 1
for i in range(len(lines)):
sp = lines[i]
print(sp)
if len(sp) > 1:
ws.cell(row, col).value = sp[0]
ws.cell(row+1, col).value = sp[1]
ws.cell(row+2, col).value = sp[2]
if sp[1] != sp[2]:
ws.cell(row+2, col).fill = red_fill
col += 1
else:
row += 5
col = 1
ws1.cell(1, 1).value = '类别'
ws1.cell(1, 2).value = '准确率'
ws1.cell(1, 3).value = '召回率'
ws1.cell(1, 4).value = 'f1 值'
ws1.cell(2, 1).value = 'ROCK'
ws1.cell(3, 1).value = 'TECT'
ws1.cell(4, 1).value = 'ALTE'
ws1.cell(5, 1).value = 'PHYS'
ws1.cell(6, 1).value = 'CHEM'
ws1.cell(7, 1).value = 'CHRO'
ws1.cell(8, 1).value = 'MINE'
ws1.cell(9, 1).value = 'DEPO'
ws1.cell(10, 1).value = 'DEEP'
ws1.cell(11, 1).value = 'ELEM'
for i in range(10):
if data[i][1] == 0:
ws1.cell(i+2, 2).value = 0
ws1.cell(i+2, 3).value = 0
ws1.cell(i+2, 4).value = 0
else:
ws1.cell(i+2, 2).value = data[i][1] / (data[i][1] + data[i][2])
p = data[i][1] / (data[i][1] + data[i][2])
ws1.cell(i+2, 3).value = data[i][1] / (data[i][1] + data[i][3])
r = data[i][1] / (data[i][1] + data[i][3])
ws1.cell(i+2, 4).value = 2*p*r / (p+r)
wb.save('./out_data/ner_train_result.xlsx')
|
from sim.evaluation.metric.PacketMetric import *
from sim.utils.helper import *
class MetricEvaluation:
def __init__(self):
pass
def execute(self,
sqr_nodes,
connectivity,
randomize_boot,
sec_before_inject,
sec_after_inject,
inject_node,
distance,
filenamebase):
logfilename = filenamebase + ".log"
print "="*40
print "Executing MetricEvaluation:"
print "SQR(nodes)\t\t", sqr_nodes
print "connectivity\t\t", connectivity
print "randomize_boot\t\t", randomize_boot
print "sec_before_inject\t", sec_before_inject
print "sec_after_inject\t", sec_after_inject
print "inject_node\t\t", inject_node
print "distance\t\t", distance
print "logfilename\t\t", logfilename
print "="*40
pm = PacketMetric()
pm.execute(sqr_nodes,
connectivity,
randomize_boot,
sec_before_inject,
sec_after_inject,
inject_node,
distance,
filenamebase)
|
#store features in database
# we get dictionary to store id, word
# we get tfidf scores for reach document as (id,score) where we map id to dictionary to get word
# so each document or sentence is converted to word and its tfidf score. these are features we store in db
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import re
from wordcloud import WordCloud
import nltk
from nltk import word_tokenize, sent_tokenize
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.corpus import wordnet
from gensim import corpora, models
from gensim.models import TfidfModel
import gensim
nltk.download('wordnet')
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
import sys
import os
import shutil
import json
from datetime import datetime
from utils.database import NewsDatabase
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
DATA_PATH = os.path.join(BASE_DIR,'data')
class DataFeatureGenerator():
def __init__(self,config):
print('DataFeatureGenerator instantiated')
self.__config = config
self.artifactsList = []
def process(self,df_articles):
try:
print('Generating features')
id2word = gensim.corpora.Dictionary(df_articles['processed_content'])
count=0
for key, val in id2word.iteritems():
print(key,'-->', val)
count +=1
if count >= 10:
break
print('show top 10 words by count')
processeddata=df_articles['processed_content']
corpus = [id2word.doc2bow(text) for text in processeddata]
print('gensim bag of words (wordid : wordcount) \n',corpus[:10])
model = gensim.models.TfidfModel(corpus,id2word)
print('gensim Tfidf data (wordid : Tfidf value) \n')
corpus_tfidf = model[corpus]
for doc in corpus_tfidf[:10]:
print(doc)
if self.__config['Storefeatures']:
self.__storeFeatures(id2word,corpus,model,processeddata)
return [id2word,corpus,model,processeddata]
except:
print(sys.exc_info())
return None
def __storeFeatures(self,id2word,corpus,model,processeddata):
print('storing features file paths into database')
try:
#saving features into files
today = datetime.today().strftime('%Y-%m-%d')
filepath = os.path.join(DATA_PATH, today)
if not os.path.exists(filepath):
os.mkdir(filepath)
strid2wordfile = os.path.join(filepath,'id2word.dic')
if os.path.isfile(strid2wordfile):
os.remove(strid2wordfile)
strcorpusfile = os.path.join(filepath,'corpus.mm')
if os.path.isfile(strcorpusfile):
os.remove(strcorpusfile)
strmodelfile = os.path.join(filepath,'tfidfmodel.model')
if os.path.isfile(strmodelfile):
os.remove(strmodelfile)
processeddatafile = os.path.join(filepath,'processeddata.csv')
if os.path.isfile(processeddatafile):
os.remove(processeddatafile)
id2word.save(strid2wordfile)
corpora.MmCorpus.serialize(strcorpusfile,corpus)
model.save(strmodelfile)
processeddata.to_csv(processeddatafile)
self.artifactsList.append(strid2wordfile)
self.artifactsList.append(strcorpusfile)
self.artifactsList.append(strmodelfile)
self.artifactsList.append(processeddatafile)
NewsDatabase.dumpFeatures([strid2wordfile,strcorpusfile,strmodelfile,processeddatafile])
return True
except:
print(sys.exc_info())
return False
|
#!/usr/bin/env python
'''
Parses input/output formats,
manages transformations
'''
import csv
import re
import sys
from numpy import array
import numpy as np
import pandas as pd
from pandas import *
from . import config
from . import distance
from . import store
from . import stats
from . import HSIC
def wrap_features(txt, width=40):
'''helper function to wrap text for long labels'''
import textwrap
txt = txt.replace('s__','').replace('g__','').replace('f__','').replace('o__','').replace('c__','').replace('p__','').replace('k__','')
txt = str(txt).split("|")
txt = [val for val in txt if len(val)>0 ]
if len(txt)>1:
txt = txt[len(txt)-2]+" "+txt[len(txt)-1]
else:
txt = txt[0]
return txt #'\n'.join(textwrap.wrap(txt, width))
def substitute_special_characters(txt):
txt = re.sub('[\n\;]', '_', txt).replace('__','_').replace('__','_').replace('_',' ') # replace('.','_')
return txt
def load(file):
# Read in the file
if isinstance(file, pd.DataFrame):
return file.values
try:
import io
file_handle=io.open(file, encoding='utf-8')
except EnvironmentError:
sys.exit("Error: Unable to read file: " + file)
csvr = csv.reader(file_handle, dialect="excel-tab") #csv.excel_tab,
# Ignore comment lines in input file
data=[]
comments=[]
for line in csvr:
# Add comment to list
if re.match("#",line[0]):
comments.append(line)
else:
# First data line found
data=[line]
break
# Check if last comment is header
if comments:
header=comments[-1]
# if the same number of columns then last comment is header
if len(header) == len(data[0]):
data=[header,data[0]]
# finish processing csv
for line in csvr:
data.append(line)
# close csv file
file_handle.close()
return np.array(data)
class Input:
"""
Parser class for input
Handles missing values, data type transformations
* `CON` <- continous
* `CAT` <- categorical
* `BIN` <- binary
* `LEX` <- lexical
"""
def __init__(self, strFileName1, strFileName2=None, var_names=True, headers=False):
# Data types
self.continuous = "CON"
self.categorical = "CAT"
self.binary = "BIN"
self.lexical = "LEX"
# Boolean indicators
self.varNames = var_names
self.headers = headers
# Initialize data structures
self.strFileName1 = strFileName1
self.strFileName2 = strFileName1 if strFileName2 is None else strFileName2
self.discretized_dataset1 = None
self.discretized_dataset2 = None
self.orginal_dataset1 = None
self.orginal_dataset2 = None
self.outName1 = None
self.outName2 = None
self.outType1 = None
self.outType2 = None
self.outHead1 = None
self.outHead2 = None
self._load()
self._parse()
self._filter_to_common_columns()
print ("Discretizing is started using: %s style for filtering features with low entropy!" % config.strDiscretizing)
self._discretize()
self._remove_low_entropy_features()
if len(self.outName1) <2 or len(self.outName1) <2:
sys.exit("--- HAllA to continue needs at lease two features in each dataset!!!\n--- Please repeat the one feature or provide the -a AllA option in the command line to do pairwise alla-against-all test!!")
store.smart_decisoin()
if store.bypass_discretizing():
try:
self.orginal_dataset1= np.asarray(self.orginal_dataset1, dtype = float)
self.orginal_dataset2= np.asarray(self.orginal_dataset2, dtype = float)
self._transform_data()
#self.discretized_dataset1 = self.orginal_dataset1
#self.discretized_dataset2 = self.orginal_dataset2
except:
sys.exit("--- Please check your data types and your similarity metric!")
self._check_for_semi_colon()
def get(self):
return [(self.discretized_dataset1, self.orginal_dataset1, self.outName1, self.outType1, self.outHead1),
(self.discretized_dataset2, self.orginal_dataset2, self.outName2, self.outType2, self.outHead2)]
def _load(self):
self.orginal_dataset1 = load(self.strFileName1)
self.orginal_dataset2 = load(self.strFileName2)
def _check_for_semi_colon(self):
# check the names of features that HAllA uses to make sure they don't have ; which
# is special character to separate features in output files
for i in range(len(self.outName1)):
if ";" in self.outName1[i]:
print ("Feature names warning!")
print (self.outName1[i])
sys.exit("In the first dataset, your feature (row) names contains ; which is the special character HAllA uses for separating features,\n \
Please replace it with another character such as _")
for i in range(len(self.outName2)):
if ";" in self.outName2[i]:
print ("Feature names warning!")
print (self.outName2[i])
sys.exit("In the second dataset, your feature (row) names contains ; which is the special character HAllA uses for separating features,\n \
Please replace it with another character such as _")
def _discretize(self):
self.discretized_dataset1 = stats.discretize(self.orginal_dataset1, style = config.strDiscretizing, data_type = config.data_type[0])
self.discretized_dataset2 = stats.discretize(self.orginal_dataset2, style = config.strDiscretizing, data_type = config.data_type[1])
def _parse(self):
def __parse(pArray, bVar, bHeaders):
aOut = []
aNames = []
used_names = []
aTypes = []
aHeaders = None
# Parse header if indicated by user or "#"
if bHeaders or re.match("#",str(pArray[0,0])):
aHeaders = list(pArray[0,1:])
pArray = pArray[1:]
# Parse variable names
if bVar:
aNames = list(pArray[:, 0])
aNames = list(map(str, aNames))
if config.format_feature_names:
aNames = list(map(wrap_features, aNames))
aNames = list(map(substitute_special_characters, aNames))
pArray = pArray[:, 1:]
#replace missing charaters with nan
#pArray[pArray == config.missing_char] = 'NaN'
#print pArray
# # Parse data types, missing values, and whitespace
if config.missing_method:
from sklearn.preprocessing import Imputer
imp = Imputer(missing_values=config.missing_char, strategy=config.missing_method, axis=1)
#imp.fit(pArray)
for i, line in enumerate(pArray):
# * If the line is not full, replace the Nones with nans *
#*****************************************************************************************************
#line = list(map(lambda x: 'NaN' if x == config.missing_char else x, line)) ###### np.nan Convert missings to nans
if all([val == config.missing_char for val in line]):
# if all values in a feature are missing values then skip the feature
print ('All missing value in' , aNames[i])
continue
if not aNames:
aNames.append(i)
#aOut.append(line)
try:
if config.missing_method:
line = array(imp.fit_transform(line.reshape(1,-1)))[0]
aTypes.append("CON")
except ValueError:
line = line # we are forced to conclude that it is implicitly categorical, with some lexical ordering
aTypes.append("LEX")
used_names.append(aNames[i])
aOut.append(line)
# if there is categorical data then do HAllA with AllA style of
# finding the BH threshold using all p-values
if "LEX" in aTypes:
config.do_alla_halla = True
return aOut, used_names, aTypes, aHeaders
self.orginal_dataset1, self.outName1, self.outType1, self.outHead1 = __parse(self.orginal_dataset1, self.varNames, self.headers)
self.orginal_dataset2, self.outName2, self.outType2, self.outHead2 = __parse(self.orginal_dataset2, self.varNames, self.headers)
config.data_type[0] = self.outType1
config.data_type[1] = self.outType2
def _filter_to_common_columns(self):
"""
Make sure that the data are well-formed
"""
assert(len(self.orginal_dataset1) == len(self.outType1))
assert(len(self.orginal_dataset2) == len(self.outType2))
if self.outName1:
assert(len(self.orginal_dataset1) == len(self.outName1))
if self.outName2:
assert(len(self.orginal_dataset2) == len(self.outName2))
if self.outHead1:
assert(len(self.orginal_dataset1[0]) == len(self.outHead1))
if self.outHead2:
assert(len(self.orginal_dataset2[0]) == len(self.outHead2))
# If sample names are included in headers in both files,
# check that the samples are in the same order
if self.outHead1 and self.outHead2:
header1="\t".join(self.outHead1)
header2="\t".join(self.outHead2)
#print header1, header2
#if not (header1.lower() == header2.lower()):
#+
#"." + " \n File1 header: " + header1 + "\n" +
#" File2 header: " + header2)
try:
df1 = pd.DataFrame(self.orginal_dataset1, index = self.outName1, columns = self.outHead1)
except:
df1 = pd.DataFrame(self.orginal_dataset1, index = self.outName1, columns = self.outHead1)
try:
df2 = pd.DataFrame(self.orginal_dataset2, index = self.outName2, columns = self.outHead2)
except:
df2 = pd.DataFrame(self.orginal_dataset2, index = self.outName2, columns = self.outHead2)
#print df1.columns.isin(df2.columns)
#print df2.columns.isin(df1.columns)
l1_before = len(df1.columns)
l2_before = len(df2.columns)
# remove samples/columns with all NaN/missing values
# First change missing value to np.NaN for pandas
df1[df1==config.missing_char] =np.NAN
df2[df2==config.missing_char] =np.NAN
df1 = df1.dropna( axis=1, how='all')
df2 = df2.dropna( axis=1, how='all')
l1_after = len(df1.columns)
l2_after = len(df2.columns)
# replace np.NaN's with 'NaN'
df1[df1.isnull()] = 'NaN'
df2[df2.isnull()] = 'NaN'
if l1_before > l1_after:
print ("--- %d samples/columns with all missing values have been removed from the first dataset " % (l1_before- l1_after))
if l2_before > l2_after:
print ("--- %d samples/columns with all missing values have been removed from the second dataset " % (l2_before- l2_after))
# Keep common samples/columns between two data frame
df1 = df1.loc[: , df1.columns.isin(df2.columns)]
df2 = df2.loc[: , df2.columns.isin(df1.columns)]
# reorder df1 columns as the columns order of df2
df1 = df1.loc[:, df2.columns]
self.orginal_dataset1 = df1.values
self.orginal_dataset2 = df2.values
#print self.orginal_dataset1
#print HSIC.HSIC_pval(df1.values,df2.values, p_method ='gamma', N_samp =1000)
self.outName1 = list(df1.index)
self.outName2 = list(df2.index)
#print self.outName1
#print self.outName2
#self.outType1 = int
#self.outType2 = int
#self.outHead1 = df1.columns
#self.outHead2 = df2.columns
self.outHead1 = df1.columns
self.outHead2 = df2.columns
print(("The program uses %s common samples between the two data sets based on headers")%(str(df1.shape[1])))
if len(self.orginal_dataset1[0]) != len(self.orginal_dataset2[0]):
sys.exit("Have you provided --header option to use sample/column names for shared sample/columns.")
def _remove_low_variant_features(self):
try:
df1 = pd.DataFrame(self.orginal_dataset1, index = self.outName1, columns = self.outHead1, dtype=float)
except:
df1 = pd.DataFrame(self.orginal_dataset1, index = self.outName1, columns = self.outHead1, dtype=float)
try:
df2 = pd.DataFrame(self.orginal_dataset2, index = self.outName2, columns = self.outHead2, dtype=float)
except:
df2 = pd.DataFrame(self.orginal_dataset2, index = self.outName2, columns = self.outHead2, dtype=float)
#print df1.columns.isin(df2.columns)
#print df2.columns.isin(df1.columns)
#print df1.var(), np.var(df2, axis=1)
l1_before = len(df1.index)
l2_before = len(df2.index)
df1 = df1[df1.var(axis=1) > config.min_var]
df2 = df2[df2.var(axis=1) > config.min_var]
l1_after = len(df1.index)
l2_after = len(df2.index)
if l1_before > l1_after:
print ("--- %d features with variation equal or less than %.3f have been removed from the first dataset " % (l1_before- l1_after, config.min_var))
if l2_before > l2_after:
print ("--- %d features with variation equal or less than %.3f have been removed from the second dataset " % (l2_before- l2_after, config.min_var))
# reorder df1 columns as the columns order of df2
#df1 = df1.loc[:, df2.columns]
self.orginal_dataset1 = df1.values
self.orginal_dataset2 = df2.values
#print self.orginal_dataset1
self.outName1 = list(df1.index)
self.outName2 = list(df2.index)
#print self.outName1
#self.outType1 = int
#self.outType2 = int
#self.outHead1 = df1.columns
#self.outHead2 = df2.columns
#print self.outHead1
#print df2
assert(len(self.orginal_dataset1[0]) == len(self.orginal_dataset2[0]))
def _remove_low_entropy_features(self):
#print self.discretized_dataset1
#print self.orginal_dataset1
df1 = pd.DataFrame(self.discretized_dataset1, index = self.outName1, columns = self.outHead1)
df1_org = pd.DataFrame(self.orginal_dataset1, index = self.outName1, columns = self.outHead1)
df2 = pd.DataFrame(self.discretized_dataset2, index = self.outName2, columns = self.outHead2)
df2_org = pd.DataFrame(self.orginal_dataset2, index = self.outName2, columns = self.outHead2)
#print df1.columns.isin(df2.columns)
#print df2.columns.isin(df1.columns)
#print df1.var(), np.var(df2, axis=1)
l1_before = len(df1.index)
l2_before = len(df2.index)
# filter for only features with entropy greater that the threshold
temp_df1 = df1
df1 = df1[df1.apply(stats.get_enropy, 1) > config.entropy_threshold1]
df1_org = df1_org[temp_df1.apply(stats.get_enropy, 1) > config.entropy_threshold1]
temp_df2 = df2
df2 = df2[df2.apply(stats.get_enropy, 1) > config.entropy_threshold2]
df2_org = df2_org[temp_df2.apply(stats.get_enropy, 1) > config.entropy_threshold2]
l1_after = len(df1.index)
l2_after = len(df2.index)
if l1_before > l1_after:
print ("--- %d features with entropy equal or less than %.3f have been removed from the first dataset " % ((l1_before- l1_after), config.entropy_threshold1))
if l2_before > l2_after:
print ("--- %d features with entropy equal or less than %.3f have been removed from the second dataset " % ((l2_before- l2_after), config.entropy_threshold2))
# reorder df1 columns as the columns order of df2
#df1 = df1.loc[:, df2.columns]
self.discretized_dataset1 = df1.values
self.orginal_dataset1 = df1_org.values
self.discretized_dataset2 = df2.values
self.orginal_dataset2 = df2_org.values
#print self.discretized_dataset1
self.outName1 = list(df1.index)
self.outName2 = list(df2.index)
#print self.outName1
#self.outType1 = int
#self.outType2 = int
#self.outHead1 = df1.columns
#self.outHead2 = df2.columns
#print self.outHead1
#print df2
try:
print ("--- %d features and %d samples are used from first dataset" % (l1_after, len(self.discretized_dataset1[0])))
except IndexError:
sys.exit("WARNING! No feature in the first dataset after filtering.")
try:
print ("--- %d features and %d samples are used from second dataset" % (l2_after, len(self.discretized_dataset2[0])))
except IndexError:
sys.exit("WARNING! No feature in the second dataset after filtering.")
assert(len(self.discretized_dataset1[0]) == len(self.discretized_dataset2[0]))
def _transform_data(self):
scale = config.transform_method
#print(self.orginal_dataset1)
self.orginal_dataset1 = stats.scale_data(self.orginal_dataset1, scale = scale)
self.orginal_dataset2 = stats.scale_data(self.orginal_dataset2, scale = scale)
#print(self.orginal_dataset1)
|
import cProfile
import os
import gzip
import csv
import time
from collections import defaultdict
from itertools import combinations
from pybloomfilter import BloomFilter
#from pybloom import ScalableBloomFilter, BloomFilter #pybloom used cryptographic hashes in a bloom filter. This is a bad idea.
import numpy
class BloomKmerFinder():
"""
Finds all kmers that show up more than a certain number of times. Can choose to ignore dimerized reads
or do only dimerized reads. Useful for finding common kmers in unmapped reads. We use a bloom filter
to do this, so it is very fast, but requires a few GB of ram to keep the filter in memory.
"""
def __init__(self, params, k, exclude_monomers=False, exclude_dimers=False):
self.params = params
self.k = k #kmer for global matching
self.primer_k = 15 #kmer for primer matching
self.exclude_monomers = exclude_monomers
self.exclude_dimers = exclude_dimers
self.run_dimer_detector = False
self.bloom = BloomFilter(1e9, 0.01, None)
self.count_map = defaultdict(int)
if exclude_monomers or exclude_dimers:
self.run_dimer_detector = True
self.reads_read = 0 # how many lines we've looked at
self.dimer_reads_read = 0 # how many lines we've looked at with at least 2 primers
self.monomer_reads_read = 0 # how many lines we've looked at with exactly 1 primer
self.out_stats_file = open(os.path.join(self.params.output_dir,'count_stats'), 'w')
def reverse_complement(self, seq):
rev_map = {'A':'T','C':'G','G':'C','T':'A', 'N':'N'}
new_seq = ''.join([rev_map[x] for x in seq]) #sub letters
new_seq = new_seq[::-1] # reverse it
return new_seq
def parse_probe_list(self):
"""
Creates probe map, which is a map of probe names to sequence.
"""
probe_map = {}
with open(self.params.probe_list, 'r') as f:
c = csv.reader(f, delimiter="\t")
for line in c:
probe_map[line[0]] = line[1].upper()
return probe_map
def build_kmer_map(self, probe_map, k):
"""
Builds a map from kmer to probenames that have this kmer.
Also does reverse complements.
"""
kmer_map = defaultdict(set)
for (probe, seq) in probe_map.items():
seq_rc = self.reverse_complement(seq)
for i in range(0, len(seq)-k):
kmer_map[seq[i:i+k]].add(probe)
kmer_map[seq_rc[i:i+k]].add(probe+"rc")
return kmer_map
def run_matcher(self, input_file, kmer_map):
"""
Goes through a fastq, and registers all the kmers in it.
"""
if input_file is None: # we don't require the input files... one of them can be undefined
return
debug = 0
with open(input_file, 'r') as f:
counter = 0 # 0: header 1: sequence 2: junk 3: quality
for line in f:
if counter == 0:
read_name = line.strip()
if counter == 1:
line = line.upper()
if self.run_dimer_detector:
probe_matches = self.find_matches(line.strip(), kmer_map)
if len(probe_matches) > 1 and not self.exclude_dimers:
self.dimer_reads_read += 1
self.register_kmers(line.strip())
elif len(probe_matches) == 1 and not self.exclude_monomers:
self.monomer_reads_read += 1
self.register_kmers(line.strip())
elif len(probe_matches) == 0:
debug += 1
self.register_kmers(line.strip())
else:
self.register_kmers(line.strip())
self.reads_read += 1
counter += 1
counter = counter % 4
print('{} dimer: {}'.format(input_file, self.dimer_reads_read))
print('{} monomer: {}'.format(input_file, self.monomer_reads_read))
print('{} none: {}'.format(input_file, debug))
print('{} total: {}'.format(input_file, self.reads_read))
def register_kmers(self, read):
"""
Adds the read and its reverse complement to our bloom filter, and if we have seen it before,
adds it to the count map. The idea is that the bloom filter can approximately determine
if we've seen something before or not, and to the count map are added all kmers that the bloom
filter reports that we've seen before.
"""
for i in range(0, len(read)-self.k):
seq = read[i:i+self.k]
seq_rc = self.reverse_complement(seq)
if self.bloom.add(seq):
self.count_map[seq]+=1
if self.bloom.add(seq_rc):
self.count_map[seq_rc]+=1
def find_matches(self, line, kmer_map):
"""
For a single read, reports all found primers
"""
in_primer = None
matches = []
for i in range(0, len(line)-self.primer_k):
sub_seq = line[i:i+self.primer_k]
if in_primer is None: #we are not currently in a primer.
if len(kmer_map[sub_seq]) == 1: # If we see a uniquely mappable kmer, we enter a primer.
(in_primer,) = kmer_map[sub_seq]
matches.append(in_primer)
else: # Otherwise, we continue
continue
else: # we are in the middle of seeing a primer sequence
if in_primer in kmer_map[sub_seq]: # we see this primer again, and are thus still reading it. Continue.
continue
elif len(kmer_map[sub_seq]) == 1: # We no longer see our current primer, but this sequence is mappable to another primer. We are now in a different primer.
(in_primer,) = kmer_map[sub_seq]
matches.append(in_primer)
else: # We aren't in our current primer, and aren't uniquely in a different primer.
in_primer = None
return matches
def output_stats(self, kmer_map):
"""
We print the top-two unique maximal strings, and then a sorted list of all kmers that appear
at least twice in our reads.
"""
sorted_map = sorted(self.count_map.items(), key=lambda x: -x[1])
first_string = self.extend(sorted_map[0][0], self.count_map)
for (kmer, count) in sorted_map[1:]:
if kmer not in first_string and self.reverse_complement(kmer) not in first_string:
second_string = self.extend(kmer, self.count_map)
second_score = count
break
self.out_stats_file.write("{}\t{}\n".format(sorted_map[0][1], first_string))
self.out_stats_file.write("{}\t{}\n".format(second_score, second_string))
for (kmer, count) in sorted_map:
probe_matches = self.find_matches(kmer, kmer_map)
if len(probe_matches) == 0:
self.out_stats_file.write("{}\t{}\n".format(kmer, count))
else:
self.out_stats_file.write("{}\t{}\t{}\n".format(probe_matches, kmer, count))
def extend(self, seed, kmer_map):
"""
Given a kmer, we greedily extend it in both directions by looking for kmers that differ by 1 on either side. We add
the new kmer if its count is at least half of our peak kmer.
"""
final_string = [seed]
value = kmer_map[seed]
forward_extend = True
current_seed = seed
while forward_extend:
extender = current_seed[1:]
new_kmers = [extender+x for x in 'ACGT']
new_scores = [kmer_map[x] for x in new_kmers]
if numpy.max(new_scores)>value*0.5: #we extend
new_kmer = new_kmers[numpy.argmax(new_scores)]
if new_kmer == current_seed: #we hit a pathological (recursive) read
forward_extend = False
final_string.append(new_kmer[-1])
current_seed = new_kmer
else:
forward_extend = False
reverse_extend = True
current_seed = seed
while reverse_extend:
extender = current_seed[:-1]
new_kmers = [x+extender for x in 'ACGT']
new_scores = [kmer_map[x] for x in new_kmers]
if numpy.max(new_scores)>value*0.5: #we extend
new_kmer = new_kmers[numpy.argmax(new_scores)]
if new_kmer == current_seed: #we hit a pathological read
reverse_extend = False
final_string = [new_kmer[0]]+final_string
current_seed = new_kmer
else:
reverse_extend = False
return ''.join(final_string)
def run(self):
"""
Main execution function for kmer finder
"""
if self.run_dimer_detector:
probe_map = self.parse_probe_list()
kmer_map = self.build_kmer_map(probe_map, self.primer_k) #kmer-map for dimer detection
else:
kmer_map = defaultdict(set)
for input_file in [self.params.input_file1, self.params.input_file2]:
self.run_matcher(input_file, kmer_map)
self.output_stats(kmer_map)
self.out_stats_file.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--probe_list', type=str, help="Needed if you want to filter by dimers. tsv with 2 columns: (probe_name, sequence). If you are looking for adapters or other short sequences, they should be added to the probe list.")
parser.add_argument('--kmer', type=int, default=30, help="How big a fragment size to count")
parser.add_argument('--input_file1', help="A fastq file with reads to analyze")
parser.add_argument('--input_file2', help="Another fastq file (Optional)")
parser.add_argument('--output_dir')
parser.add_argument('--exclude_monomers', dest='exclude_monomers', action='store_true', help="Whether we exclude primer monomers from kmer counting")
parser.set_defaults(exclude_monomers=False)
parser.add_argument('--exclude_dimers', dest='exclude_dimers', action='store_true', help="Whether we exclude primer dimers from kmer counting")
parser.set_defaults(exclude_dimers=False)
params = parser.parse_args()
bloomy = BloomKmerFinder(params, params.kmer, params.exclude_monomers, params.exclude_dimers)
start = time.time()
bloomy.run()
print(time.time()-start) |
from .settings import *
SECRET_KEY = 'NOTREALLY'
|
def new2(n,operations):
a=[0 for i in range(n)]
for i in operations:
if "L" in i:
for i in range(len(a)):
if a[i]==0:
a[i]=1
break
else:
continue
elif "C" in i:
a[int(i[1])]=0
return a
print(new2(2,["L","L","L","C1"]))
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP
#
# Author: Federico Ceratto <federico.ceratto@hpe.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import designate.tests
from designate import exceptions
from designate.backend.agent_backend import impl_djbdns
from designate.tests.unit.agent import backends
class DjbdnsAgentBackendTestCase(designate.tests.TestCase):
@mock.patch.object(impl_djbdns.DjbdnsBackend, '_check_dirs')
def setUp(self, mock_check_dirs):
super(DjbdnsAgentBackendTestCase, self).setUp()
self.CONF.set_override('masters', ['127.0.0.1:5354'], 'service:agent')
self.backend = impl_djbdns.DjbdnsBackend('foo')
def test_start_backend(self):
self.backend.start()
def test_stop_backend(self):
self.backend.stop()
def test_init(self):
self.assertTrue(hasattr(self.backend, '_resolver'))
self.assertEqual(1, self.backend._resolver.timeout)
self.assertEqual(1, self.backend._resolver.lifetime)
self.assertEqual(['127.0.0.1'], self.backend._resolver.nameservers)
self.assertEqual(
'/var/lib/djbdns/root/data.cdb',
self.backend._tinydns_cdb_filename
)
self.assertEqual(
'/var/lib/djbdns/datafiles',
self.backend._datafiles_dir
)
self.assertEqual(
'/var/lib/djbdns/datafiles/%s.zonedata',
self.backend._datafiles_path_tpl
)
self.assertEqual([('127.0.0.1', 5354)], self.backend._masters)
@mock.patch.object(impl_djbdns.DjbdnsBackend, '_check_dirs')
def test_init_no_masters(self, mock_check_dirs):
self.CONF.set_override('masters', [], 'service:agent')
self.assertRaisesRegex(
exceptions.Backend,
'Missing agent AXFR masters',
impl_djbdns.DjbdnsBackend, 'foo'
)
def test_find_zone_serial(self):
class Data(object):
serial = 3
self.backend._resolver = mock.Mock()
self.backend._resolver.query.return_value = [Data(), ]
serial = self.backend.find_zone_serial('example.com')
self.assertEqual(3, serial)
def test_find_zone_serial_error(self):
self.backend._resolver = mock.Mock()
self.backend._resolver.query.side_effect = RuntimeError('foo')
serial = self.backend.find_zone_serial('example.com')
self.assertIsNone(serial)
@mock.patch('designate.backend.agent_backend.impl_djbdns.execute')
def test_create_zone(self, mock_execute):
self.backend._perform_axfr_from_minidns = mock.Mock()
self.backend._rebuild_data_cdb = mock.Mock()
zone = backends.create_dnspy_zone('example.org')
self.backend.create_zone(zone)
def test_update_zone(self):
self.backend._perform_axfr_from_minidns = mock.Mock()
self.backend._rebuild_data_cdb = mock.Mock()
zone = backends.create_dnspy_zone('example.org')
self.backend.update_zone(zone)
@mock.patch('designate.backend.agent_backend.impl_djbdns.os.remove')
def test_delete_zone(self, mock_rm):
self.backend._rebuild_data_cdb = mock.Mock()
self.backend.delete_zone('foo')
mock_rm.assert_called_once_with(
'/var/lib/djbdns/datafiles/foo.zonedata'
)
@mock.patch('designate.backend.agent_backend.impl_djbdns.os.remove')
def test_exception_filter(self, mock_os_remove):
self.backend._rebuild_data_cdb = mock.Mock()
self.assertRaises(
exceptions.Backend,
self.backend.delete_zone, None
)
@mock.patch('designate.backend.agent_backend.impl_djbdns.os.remove')
def test_exception_filter_pass_through(self, mock_os_remove):
self.backend._rebuild_data_cdb = mock.Mock()
mock_os_remove.side_effect = exceptions.Backend
self.assertRaises(
exceptions.Backend,
self.backend.delete_zone, 'foo'
)
|
"""
A context processor which adds the value of the
``COMMENTS_MODERATE_AFTER`` setting to each ``RequestContext`` in
which it is applied.
"""
from template_utils.context_processors import settings_processor
comment_moderation = settings_processor('COMMENTS_MODERATE_AFTER')
|
"""Generate JSON-LD contexts
"""
import logging
import os, re
import sys
from typing import Union, TextIO, List, Optional, Dict
import click
from jsonasobj import JsonObj, as_json, as_dict
from prefixcommons import curie_util as cu
from metamodel.metamodel import SchemaDefinition, SlotDefinition, ClassDefinition, Element, Definition, \
ClassDefinitionName, SlotDefinitionName, TypeDefinitionName
from metamodel.utils.builtins import DEFAULT_BUILTIN_TYPE_NAME, builtin_names, builtin_uri, Builtin
from metamodel.utils.formatutils import camelcase, underscore, be
from metamodel.utils.generator import Generator
class ContextGenerator(Generator):
generatorname = os.path.basename(__file__)
generatorversion = "0.0.2"
valid_formats = ['json']
visit_all_class_slots = False
def __init__(self, schema: Union[str, TextIO, SchemaDefinition], fmt: str='json') -> None:
super().__init__(schema, fmt)
self.prefixmap = dict()
self.slot_class_maps = dict()
self.curi_maps: List[Dict[str, str]] = []
def visit_schema(self):
# Add the list of curi maps
for curie_map in self.schema.default_curi_maps:
self.curi_maps.append(cu.read_biocontext(curie_map))
# Add any explicitly declared prefixes
for prefix in self.schema.prefixes.values():
self.prefixmap[prefix.local_name] = prefix.prefix_uri
# Add any prefixes explicitly declared
self.add_id_prefixes(self.schema)
# Add the default prefix
base_prefix = self.default_uri()
if base_prefix:
self.prefixmap['@vocab'] = base_prefix
self.prefixmap['@base'] = base_prefix
def end_schema(self) -> None:
comments = f'''Auto generated from {self.schema.source_file} by {self.generatorname} version: {self.generatorversion}
Generation date: {self.schema.generation_date}
Schema: {self.schema.name}
id: {self.schema.id}
description: {be(self.schema.description)}
license: {be(self.schema.license)}
'''
context = JsonObj(comments=comments)
for k, v in self.slot_class_maps.items():
self.prefixmap[k] = v
context['@context'] = self.prefixmap
print(as_json(context))
def visit_class(self, cls: ClassDefinition) -> bool:
class_def = {}
cn = camelcase(cls.name)
self.add_mappings(cls, class_def)
if class_def:
self.slot_class_maps[cn] = class_def
# We don't bother to visit class slots - just all slots
return False
def visit_slot(self, aliased_slot_name: str, slot: SlotDefinition) -> None:
slot_def = {}
sn = underscore(slot.name)
if not slot.alias:
rng = self.grounded_slot_range(slot)
if rng != DEFAULT_BUILTIN_TYPE_NAME:
builtin_rng_uri = builtin_uri(rng)
slot_def['@type'] = builtin_rng_uri \
if builtin_rng_uri and builtin_names.get(rng, None) not in (Builtin.uri, Builtin.anytype) else "@id"
if slot.multivalued:
slot_def['@container'] = '@list'
self.add_mappings(slot, slot_def)
if slot_def:
self.prefixmap[sn] = slot_def
def add_prefix(self, ncname: str) -> None:
""" Look up ncname and add it to the prefix map if necessary
@param ncname: name to add
"""
if ncname not in self.prefixmap:
uri = cu.expand_uri(ncname + ':', self.curi_maps)
if uri and '://' in uri:
self.prefixmap[ncname] = uri
else:
print(f"Unrecognized prefix: {ncname}", file=sys.stderr)
self.prefixmap[ncname] = f"http://example.org/unknown/{ncname}/"
def get_uri(self, ncname: str) -> Optional[str]:
""" Get the URI associated with ncname
@param ncname:
"""
uri = cu.expand_uri(ncname + ':', self.curi_maps)
return uri if uri and uri.startswith('http') else None
def add_id_prefixes(self, element: Element) -> None:
for id_prefix in element.id_prefixes:
self.add_prefix(id_prefix)
def add_mappings(self, defn: Definition, target: Dict) -> None:
""" Process any mappings in defn, adding all of the mappings prefixes to the namespace map and
add a link to the first mapping to the target
@param defn: Class or Slot definition
@param target: context target
"""
self.add_id_prefixes(defn)
for mapping in defn.mappings:
if '://' in mapping:
target['@id'] = mapping
else:
if ':' not in mapping or len(mapping.split(':')) != 2:
raise ValueError(f"Definition {defn.name} = unrecognized mapping: {mapping}")
ns = mapping.split(':')[0]
self.add_prefix(ns)
target['@id'] = defn.mappings[0]
@click.command()
@click.argument("yamlfile", type=click.Path(exists=True, dir_okay=False))
@click.option("--format", "-f", default='json', type=click.Choice(ContextGenerator.valid_formats), help="Output format")
def cli(yamlfile, format):
""" Generate jsonld @context definition from biolink model """
print(ContextGenerator(yamlfile, format).serialize())
|
"""Write a version of a palindrome recogniser that accepts a file name
from the user, reads each line, and prints the line to the screen if it
is a palindrome."""
file_name = 'palindromes.txt'
def palindrome_recognizer(file_name):
pass
print(palindrome_recognizer(file_name)) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Han"
__email__ = "liuhan132@foxmail.com"
import random
import numpy as np
import torch
from torch.utils.data.sampler import Sampler
class ValidDocBatchSampler(Sampler):
def __init__(self, valid_doc_nums, batch_size, shuffle=True, strict=True):
super(ValidDocBatchSampler, self).__init__(valid_doc_nums)
self.valid_doc_nums = valid_doc_nums
self.batch_size = batch_size
self.shuffle = shuffle
self.strict = strict
def __iter__(self):
lengths = np.array(
[(-l, np.random.random()) for l in self.valid_doc_nums],
dtype=[('l1', np.int_), ('rand', np.float_)]
)
indices = np.argsort(lengths, order=('l1', 'rand'))
batches = [indices[i:i + self.batch_size]
for i in range(0, len(indices), self.batch_size)]
if self.strict:
batches = list(filter(
lambda x: sum([self.valid_doc_nums[i] for i in x]) == self.valid_doc_nums[x[0]] * len(x),
batches
))
last = batches[-1]
if self.shuffle:
batches = batches[:len(batches) - 1]
np.random.shuffle(batches)
batches.append(last)
return iter([i for batch in batches for i in batch])
def __len__(self):
return len(self.valid_doc_nums)
class DialogTurnsSampler(Sampler):
def __init__(self, turns_num, batch_size, iter_num):
super(DialogTurnsSampler, self).__init__(turns_num)
self.turns_num = turns_num
self.batch_size = batch_size
self.iter_num = iter_num
def __iter__(self):
lengths = np.array(
[(-l, np.random.random()) for l in self.turns_num],
dtype=[('l1', np.int_), ('rand', np.float_)]
)
indices = np.argsort(lengths, order=('l1', 'rand'))
batches = [indices[i:i + self.batch_size]
for i in range(0, len(indices), self.batch_size)]
batches_i = iter(torch.randint(high=len(batches), size=(self.iter_num,), dtype=torch.int64))
return iter([i for batch_i in batches_i for i in batches[batch_i]])
def __len__(self):
return self.iter_num * self.batch_size
class GenBatchSampleIter:
def __init__(self, dataset, batch_size, collect_fun):
self.batch_size = batch_size
self.dataset = dataset
self.collect_fun = collect_fun
def __len__(self):
return len(self.dataset)
def __next__(self):
batch_idx = random.sample(range(len(self.dataset)), self.batch_size)
batch_data = [self.dataset[i] for i in batch_idx]
return self.collect_fun(batch_data)
|
import numpy as np, argparse, pickle
import matplotlib; matplotlib.use('agg')
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve, average_precision_score
import pdb
def loadData(path):
preds = pickle.load(open(path, 'rb'))
y_hot = np.array(preds['y_hot'])
logit_list = np.array(preds['logit_list'])
y_hot_new = np.reshape(np.array([x[1:] for x in y_hot]), (-1))
logit_list_new = np.reshape(np.array([x[1:] for x in logit_list]), (-1))
return y_hot_new, logit_list_new
def plotPR(dataset):
y_true, y_scores = loadData('./results/{}/precision_recall.pkl'.format(args.name))
precision,recall,threshold = precision_recall_curve(y_true,y_scores)
area_under = average_precision_score(y_true, y_scores)
baselines_path = './baselines_pr/{}/'.format(dataset)
print('Area under the curve: {:.3}'.format(area_under))
plt.plot(recall[:], precision[:], label=args.name, color ='red', lw=1, marker = 'o', markevery = 0.1, ms = 6)
if dataset == 'riedel_nyt':
base_list = ['BGWA', 'PCNN+ATT', 'PCNN', 'MIMLRE', 'MultiR', 'Mintz']
color = ['purple', 'darkorange', 'green', 'xkcd:azure', 'orchid', 'cornflowerblue']
marker = ['d', 's', '^', '*', 'v', 'x', 'h']
plt.ylim([0.3, 1.0])
plt.xlim([0.0, 0.45])
else:
base_list = ['BGWA', 'PCNN+ATT', 'PCNN']
color = ['purple', 'darkorange', 'green']
marker = ['d', 's', '^']
for i, baseline in enumerate(base_list):
precision = np.load(baselines_path + baseline + '/precision.npy')
recall = np.load(baselines_path + baseline + '/recall.npy')
plt.plot(recall, precision, color = color[i], label = baseline, lw=1, marker = marker[i], markevery = 0.1, ms = 6)
plt.xlabel('Recall', fontsize = 14)
plt.ylabel('Precision', fontsize = 14)
plt.legend(loc="upper right", prop = {'size' : 12})
plt.grid(True)
plt.tight_layout()
plt.show()
plot_path = './results/{}/plot_pr.pdf'.format(args.name)
plt.savefig(plot_path)
print('Precision-Recall plot saved at: {}'.format(plot_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-name', default='pretrained_reside')
parser.add_argument('-dataset', default='riedel_nyt')
args = parser.parse_args()
plotPR(args.dataset) |
""" An example to demonstrate the use of partitions.
(Working, but not up-to-date, as in not using latest functionalities.)
"""
from grom import Genome
Genome.DEBUG = False
e = Genome("examples/src/kdl.gb", "examples/test.gb")
print(e, end="\n\n\n")
e.partition([
("hello", range(0X0, 0x6543)),
("world", range(0x6543, 0x12345)),
("!", range(0x12345, e.size))
])
print(e, end="\n\n\n")
e.partition([
("int", range(0x0, 0x100)),
("head", range(0x100, 0x150)),
("game", range(0x150, e.size))
])
print(e, end="\n\n\n")
p = e.idof("game") # retrieve partition "2: game"
e.mutate(.12, 1, [p]) # only mutate the 'game' partition
e.save().start()
|
import collections
import heapq
import numpy as np
import pandas as pd
import scipy.stats as ss
class EpidemicModel(object):
"""
Every of the epidemic models below inherits from this class, which defines
the infrastructure of the event by event principle. The class supplies the
exposure process dependent on the model states, and can be subclassed to
use any number of stationary processes for defining events and state
changes. Events are defined as methods with an '_EVENT_' prefix. The base
class supplies a basic SIR model without initialization, with events for
exposure and recovery.
"""
stationary_processes = 'recovery',
states = 'S', 'I', 'R'
def __init__(self, beta, *process_rvs, population, init_exposed, **kwargs):
for key, value in kwargs.items():
setattr(self, f'{key}', value)
for name, rv in zip(self.stationary_processes, process_rvs):
setattr(self, f'_{name}_iter', self._rv_generator(rv))
self.beta = beta
self.population = population
self.inv_population = 1/population
self.init_exposed = init_exposed
self.state_container = collections.namedtuple(
'State', self.states
)
self.time = 0.0
self.event_list = list()
self._exposure_iter = self._rv_generator( ss.expon() )
self._init_model()
def __next__(self):
try:
self.time, event = self._pop()
except IndexError:
raise StopIteration
method = getattr(self, event)
method()
self._insert_exposure_event()
return event, self.time, self.state
def __iter__(self):
return self
def run_until(self, t):
"""
Returns a generator, iterating through events until some specified
time point.
"""
prev_time = 0
it = iter(self)
try:
while True:
event, time, state = next(it)
if time >= t:
break
yield event, time, state
except StopIteration:
pass
def _push(self, el):
heapq.heappush( self.event_list, el )
def _pop(self):
return heapq.heappop(self.event_list)
def _insert_exposure_event(self):
"""
Inserts next exposure event is such an event take place before the
next event.
"""
if self.I == 0 or self.S == 0:
return
dt = self.event_list[0][0] - self.time
rate = self.beta * self.S * self.I * self.inv_population
exposure_time = next(self._exposure_iter)/rate
if exposure_time < dt:
self._push( ( self.time + exposure_time, '_EVENT_exposure' ) )
@property
def state(self):
return self.state_container(*[getattr(self, s) for s in self.states])
def _rv_generator(self, rv):
"""
Returns a generator of the scipy-stats random variable, where multiple
numbers are generated at a time, since this is more efficient.
"""
while True:
yield from rv.rvs(size=20_000)
def _EVENT_exposure(self):
recovery_time = next( getattr(self, '_recovery_iter') )
self.S -= 1
self.I += 1
self._push( (self.time + recovery_time, '_EVENT_recovery' ))
def _EVENT_recovery(self):
self.I -= 1
self.R += 1
class SIR( EpidemicModel ):
stationary_processes = 'recovery',
states = 'S', 'I', 'R'
def _init_model(self):
self.S = self.population - self.init_exposed
self.I = self.init_exposed
self.R = 0
self.event_list = list()
# Push recoveries for inital onto queue
for i in range(self.init_exposed):
recovery_time = next( getattr(self, '_recovery_iter') )
self._push((recovery_time, '_EVENT_recovery'))
self._insert_exposure_event()
class SIRS(EpidemicModel):
stationary_processes = 'recovery', 'mutation'
states = 'S', 'I', 'R'
def _init_model(self):
self.S = self.population - self.init_exposed
self.I = self.init_exposed
self.R = 0
self.event_list = list()
# Push recoveries for inital onto queue
for i in range(self.init_exposed):
recovery_time = next( getattr(self, '_recovery_iter') )
self._push((recovery_time, '_EVENT_recovery'))
self._insert_exposure_event()
def _EVENT_recovery(self):
self.I -= 1
self.R += 1
mutation_time = next( getattr(self, '_mutation_iter') )
self._push( (self.time + mutation_time, '_EVENT_mutation') )
def _EVENT_mutation(self):
self.R -= 1
self.S += 1
class SIRD(EpidemicModel):
stationary_processes = 'recovery', 'death'
states = 'S', 'I', 'R', 'D'
def _init_model(self):
self.S = self.population - self.init_exposed
self.I = self.init_exposed
self.R = 0
self.D = 0
self.event_list = list()
self._binom_iter = self._rv_generator( ss.bernoulli(p=self.prob_dead) )
# Push nital recoveries or deaths onto queue
for i in range(self.init_exposed):
self._add_death_or_recovery()
self._insert_exposure_event()
def _EVENT_exposure(self):
self.S -= 1
self.I += 1
self._add_death_or_recovery()
def _EVENT_death(self):
self.I -= 1
self.D += 1
def _add_death_or_recovery(self):
does_die = next( self._binom_iter )
if does_die:
dead_time = next( getattr(self, '_death_iter') )
self._push( (self.time + dead_time, '_EVENT_death' ))
else:
recovery_time = next( getattr(self, '_recovery_iter') )
self._push((self.time + recovery_time, '_EVENT_recovery'))
class SR_SIR(EpidemicModel):
stationary_processes = 'recovery',
states = 'S', 'I', 'R'
def _init_model(self):
self.S = self.population - self.init_exposed
self.I = self.init_exposed
self.R = 0
self.event_list = list()
# Push recoveries for inital onto queue
for i in range(self.init_exposed):
recovery_time = next( getattr(self, '_recovery_iter') )
self._push((recovery_time, '_EVENT_recovery'))
# Push vaccine event into queue
self._push((self.begin_vaccine, '_EVENT_determine_vaccines'))
self._insert_exposure_event()
def _EVENT_determine_vaccines(self):
if not self.I >= 1:
return
vaccines_per_day = int(self.vaccine_rate(self.time))
vaccine_times = np.linspace(0, 1, num=vaccines_per_day + 1,
endpoint=False)
for time in vaccine_times[1:]:
self._push(((self.time + time, '_EVENT_vaccine')))
self._push(((self.time + 1, '_EVENT_determine_vaccines')))
def _EVENT_vaccine(self):
if self.S <= 0:
return
self.S -= 1
self.R += 1
class SEIR(EpidemicModel):
stationary_processes = 'incubation', 'recovery'
states = 'S', 'E', 'I', 'R'
def _init_model(self):
self.S = self.population - self.init_exposed
self.E = self.init_exposed
self.I = 0
self.R = 0
self.event_list = list()
# Push incubations for inital onto queue
for i in range(self.init_exposed):
incubation_time = next( getattr(self, '_incubation_iter') )
self._push((incubation_time, '_EVENT_incubation'))
def _EVENT_exposure(self):
self.S -= 1
self.E += 1
incubation_time = next( getattr(self, '_incubation_iter') )
self._push( (self.time + incubation_time, '_EVENT_incubation'))
def _EVENT_incubation(self):
self.E -= 1
self.I += 1
recovery_time = next( getattr(self, '_recovery_iter') )
self._push( (self.time + recovery_time, '_EVENT_recovery' ))
def _EVENT_recovery(self):
self.I -= 1
self.R += 1
class SR_SEIRSD(EpidemicModel):
stationary_processes = 'incubation', 'recovery', 'death', 'mutation'
states = 'S', 'E', 'I', 'R', 'D'
def _init_model(self):
self.S = self.population - self.init_exposed
self.E = self.init_exposed
self.I = 0
self.R = 0
self.D = 0
self.event_list = list()
self._binom_iter = self._rv_generator(ss.bernoulli(p=self.prob_dead))
# Push incubations for inital onto queue
for i in range(self.init_exposed):
incubation_time = next( getattr(self, '_incubation_iter') )
self._push((incubation_time, '_EVENT_incubation'))
# Push vaccine event into queue
self._push((self.begin_vaccine, '_EVENT_determine_vaccines'))
def _EVENT_exposure(self):
self.S -= 1
self.E += 1
incubation_time = next( getattr(self, '_incubation_iter') )
self._push( (self.time + incubation_time, '_EVENT_incubation'))
def _EVENT_incubation(self):
self.E -= 1
self.I += 1
self._add_death_or_recovery()
def _EVENT_recovery(self):
self.I -= 1
self.R += 1
mutation_time = next( getattr(self, '_mutation_iter') )
self._push((self.time + mutation_time, '_EVENT_mutation'))
def _EVENT_death(self):
self.I -= 1
self.D += 1
def _EVENT_determine_vaccines(self):
if not self.I >= 1:
return
vaccines_per_day = int(self.vaccine_rate(self.time))
vaccine_times = np.linspace(0, 1, num=vaccines_per_day + 1,
endpoint=False)
for time in vaccine_times[1:]:
self._push(((self.time + time, '_EVENT_vaccine')))
self._push(((self.time + 1, '_EVENT_determine_vaccines')))
def _EVENT_vaccine(self):
if self.S <= 0:
return
mutation_time = next( getattr(self, '_mutation_iter') )
self._push((self.time + mutation_time, '_EVENT_mutation'))
self.S -= 1
self.R += 1
def _EVENT_mutation(self):
self.R -= 1
self.S += 1
def _add_death_or_recovery(self):
does_die = next(self._binom_iter)
if does_die:
dead_time = next( getattr(self, '_death_iter') )
self._push( (self.time + dead_time, '_EVENT_death' ))
else:
recovery_time =next( getattr(self, '_recovery_iter') )
self._push( (self.time + recovery_time, '_EVENT_recovery' ))
class Ebola_SEIRSD(EpidemicModel):
stationary_processes = 'incubation', 'recovery', 'death', 'mutation'
states = 'S', 'E', 'I', 'R', 'D', 'C'
def _init_model(self):
self.S = self.population - self.init_exposed
self.E = self.init_exposed
self.I = 0
self.R = 0
self.D = 0
# Total cases
self.C = 0
self.event_list = list()
self._binom_iter = self._rv_generator(ss.bernoulli(p=self.prob_dead))
# Push incubations for inital onto queue
for i in range(self.init_exposed):
incubation_time = next( getattr(self, '_incubation_iter') )
self._push((incubation_time, '_EVENT_incubation'))
# Push time of beta change into queue
self._push((self.beta_change, '_EVENT_beta_change'))
def _EVENT_exposure(self):
self.S -= 1
self.E += 1
incubation_time = next( getattr(self, '_incubation_iter') )
self._push( (self.time + incubation_time, '_EVENT_incubation'))
def _EVENT_incubation(self):
self.E -= 1
self.I += 1
self.C += 1
self._add_death_or_recovery()
def _EVENT_recovery(self):
self.I -= 1
self.R += 1
mutation_time = next( getattr(self, '_mutation_iter') )
self._push((self.time + mutation_time, '_EVENT_mutation'))
def _EVENT_death(self):
self.I -= 1
self.D += 1
def _EVENT_mutation(self):
self.R -= 1
self.S += 1
def _add_death_or_recovery(self):
does_die = next(self._binom_iter)
if does_die:
dead_time = next( getattr(self, '_death_iter') )
self._push( (self.time + dead_time, '_EVENT_death' ))
else:
recovery_time =next( getattr(self, '_recovery_iter') )
self._push( (self.time + recovery_time, '_EVENT_recovery' ))
def _EVENT_beta_change(self):
self.beta=self.new_beta
class Covid_SEIRD(EpidemicModel):
stationary_processes = 'incubation', 'recovery', 'death'
states = 'S', 'E', 'I', 'R', 'D'
def _init_model(self):
self.S = self.population - self.init_exposed
self.E = self.init_exposed
self.I = 0
self.R = 0
self.D = 0
self.event_list = list()
self._binom_iter = self._rv_generator(ss.bernoulli(p=self.prob_dead))
# Push incubations for inital onto queue
for i in range(self.init_exposed):
incubation_time = next( getattr(self, '_incubation_iter') )
self._push((incubation_time, '_EVENT_incubation'))
self._push((self.beta_change, '_EVENT_beta_change'))
def _EVENT_exposure(self):
self.S -= 1
self.E += 1
incubation_time = next( getattr(self, '_incubation_iter') )
self._push( (self.time + incubation_time, '_EVENT_incubation'))
def _EVENT_incubation(self):
self.E -= 1
self.I += 1
self._add_death_or_recovery()
#recovery_time = next( getattr(self, '_recovery_iter') )
#self._push( (self.time + recovery_time, '_EVENT_recovery' ))
def _EVENT_recovery(self):
self.I -= 1
self.R += 1
def _EVENT_death(self):
self.I -= 1
self.D += 1
def _add_death_or_recovery(self):
does_die = next( self._binom_iter )
if does_die:
dead_time = next( getattr(self, '_death_iter') )
self._push( (self.time + dead_time, '_EVENT_death' ))
else:
recovery_time = next( getattr(self, '_recovery_iter') )
self._push((self.time + recovery_time, '_EVENT_recovery'))
def _EVENT_beta_change(self):
self.beta=self.new_beta
class Plague_SEIRD(EpidemicModel):
stationary_processes = 'incubation', 'recovery', 'death'
states = 'S', 'E', 'I', 'R', 'D', 'C'
def _init_model(self):
self.S = self.population - self.init_exposed
self.E = self.init_exposed
self.I = 0
self.R = 0
self.D = 0
# Total cases
self.C = 0
self.event_list = list()
self._binom_iter = self._rv_generator(ss.bernoulli(p=self.prob_dead))
# Push incubations for inital onto queue
for i in range(self.init_exposed):
incubation_time = next( getattr(self, '_incubation_iter') )
self._push((incubation_time, '_EVENT_incubation'))
# Push time of beta change into queue
self._push((self.beta_change, '_EVENT_beta_change'))
def _EVENT_exposure(self):
self.S -= 1
self.E += 1
incubation_time = next( getattr(self, '_incubation_iter') )
self._push( (self.time + incubation_time, '_EVENT_incubation'))
def _EVENT_incubation(self):
self.E -= 1
self.I += 1
self.C += 1
self._add_death_or_recovery()
def _EVENT_recovery(self):
self.I -= 1
self.R += 1
def _EVENT_death(self):
self.I -= 1
self.D += 1
def _EVENT_beta_change(self):
self.beta=self.new_beta
def _add_death_or_recovery(self):
does_die = next(self._binom_iter)
if does_die:
dead_time = next( getattr(self, '_death_iter') )
self._push( (self.time + dead_time, '_EVENT_death' ))
else:
recovery_time =next( getattr(self, '_recovery_iter') )
self._push( (self.time + recovery_time, '_EVENT_recovery' ))
|
import json
import time
from collector.collector import Collector
class TransactionCollector(Collector):
def __init__(self, logger, web3, db):
super().__init__(logger, web3, db)
def __get_transaction(self, from_block, to_block):
"""
TODO:
parallel execute for performance or async/await
"""
self.logger.info('Get block from {0} to {1}'.format(from_block, to_block))
for idx in range(from_block, to_block, 1):
block = self.web3.eth.getBlock(idx)
transactions = block['transactions']
timestamp = block['timestamp']
self.logger.info(
'block {}, parent {}, txn {} count, {} timestamp'.format(
idx,
block['parentHash'].hex(),
len(transactions),
timestamp
)
)
txn_list = []
for t in transactions:
t_info = self.web3.eth.getTransaction(t)
summary_txn = dict()
summary_txn['blockNumber'] = t_info['blockNumber']
summary_txn['hash'] = t_info['hash'].hex()
summary_txn['from'] = t_info['from']
summary_txn['to'] = t_info['to']
summary_txn['gasPrice'] = t_info['gasPrice']
summary_txn['value'] = t_info['value']
summary_txn['timestamp'] = timestamp
txn_list.append(summary_txn)
self.db.insert_txns(txn_list)
def _validate_last_transaction(self):
pass
def pull(self):
self.logger.info('Start to collect all transactions.')
latest_block = self.db.get_latest_block()
if latest_block is None:
latest_block = self.web3.eth.blockNumber
self.logger.info('It is a first pull transaction.')
else:
latest_block += 1
self.logger.info('Pulled lastest block: {0}'.format(latest_block))
self._validate_last_transaction()
while True:
new_latest_block = self.web3.eth.blockNumber
self.logger.info(
"Ethereum lastest block: {0}".format(new_latest_block)
)
if latest_block == new_latest_block:
time.sleep(2)
continue
self.__get_transaction(
latest_block,
new_latest_block,
)
latest_block = new_latest_block
time.sleep(2)
|
# Copyright 2019 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
#
from c7n_kube.query import QueryResourceManager, TypeInfo
from c7n_kube.provider import resources
@resources.register('volume')
class PersistentVolume(QueryResourceManager):
class resource_type(TypeInfo):
group = 'Core'
version = 'V1'
namespaced = False
patch = 'patch_persistent_volume'
delete = 'delete_persistent_volume'
enum_spec = ('list_persistent_volume', 'items', None)
@resources.register('volume-claim')
class PersistentVolumeClaim(QueryResourceManager):
class resource_type(TypeInfo):
group = 'Core'
version = 'V1'
patch = 'patch_namespaced_persistent_volume_claim'
delete = 'delete_namespaced_persistent_volume_claim'
enum_spec = ('list_persistent_volume_claim_for_all_namespaces', 'items', None)
|
import os.path
from . import *
class TestCommand(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__(os.path.join(examples_dir, '08_commands'), *args,
**kwargs)
def test_hello(self):
self.assertRegex(self.build('hello'), r'(?m)^\s*hello$')
def test_world(self):
self.assertRegex(self.build('world'), r'(?m)^\s*world$')
def test_script(self):
self.assertRegex(self.build('script'), r'(?m)^\s*hello, world!$')
self.assertExists(output_file('file'))
def test_alias(self):
output = self.build('hello-world')
self.assertRegex(output, r'(?m)^\s*hello$')
self.assertRegex(output, r'(?m)^\s*world$')
@skip_if_backend('msbuild')
class TestRunExecutable(IntegrationTest):
def __init__(self, *args, **kwargs):
super().__init__('run_executable', *args, **kwargs)
def test_env_run(self):
self.assertExists(output_file('file.txt'))
def test_cxx(self):
self.assertRegex(self.build('cxx'), r'(?m)^\s*hello from c\+\+!$')
def test_java(self):
self.assertRegex(self.build('java'), r'(?m)^\s*hello from java!$')
def test_java_classlist(self):
self.assertRegex(self.build('java-classlist'),
r'(?m)^\s*hello from java!$')
def test_python(self):
self.assertRegex(self.build('python'), r'(?m)^\s*hello from python!$')
|
import sqlite3 as sq3
class SQLCursor():
def cleanup(self):
if self.connection is not None:
self.connection.close()
self.connection = None
self.cursor = None
def open(self):
try:
self.connection = sq3.connect(self.filename)
self.cursor = self.connection.cursor()
except:
self.cleanup()
def __init__ (self, filename):
self.filename = filename
self.connection = None
self.cursor = None
def __enter__ (self):
self.open()
return self
def __exit__(self, *args):
self.cleanup()
def execute(self, query):
return self.cursor.execute(query)
def executemany(self, query, argtab):
return self.cursor.executemany(query, argtab)
def getcursor(self):
return self.cursor
def getconnection(self):
return self.connection
|
import importlib
import itertools
from collections import defaultdict
from pathlib import Path
import click
import numpy as np
import pandas as pd
import rich_click
from loguru import logger
from sqlite_utils import Database
from .. import DATA_DIR
from ..utils import determine_file_name
from .etl import generate_etl_commands, generate_update_commands
from .utils import RichClickCommand, RichClickGroup
rich_click.core.COMMAND_GROUPS = {"phl-budget-data etl": []}
@click.group(cls=RichClickGroup)
@click.version_option()
def main() -> None:
"""Main command-line interface for working with City of Philadelphia budget data."""
pass
@main.command(cls=RichClickCommand)
@click.option("--output", type=click.Path(exists=False), help="Output folder.")
@click.option("--save-sql", is_flag=True, help="Whether to save SQL databases.")
def save(output: click.Path = None, save_sql: bool = False) -> None:
"""Save the processed data products."""
if output is None:
output = DATA_DIR / "processed"
else:
output = Path(output)
for tag in ["spending", "qcmr"]:
# Output folder
output_folder = output / tag
if not output_folder.exists():
output_folder.mkdir(parents=True)
# Get the module
mod = importlib.import_module(f"..etl.{tag}.processed", __package__)
# Loop over each data loader
for name in dir(mod):
if name.startswith("load"):
# The function
f = getattr(mod, name)
# Required params
if hasattr(f, "model"):
# Get the params
schema = f.model.schema()
params = {
k: schema["properties"][k]["enum"] for k in schema["required"]
}
# Do all iterations of params
for param_values in list(itertools.product(*params.values())):
kwargs = dict(zip(schema["required"], param_values))
data = f(**kwargs)
# The filename
filename = determine_file_name(f, **kwargs).name
output_file = output_folder / filename
logger.info(f"Saving {output_file}")
data.to_csv(output_file, index=False)
else:
filename = determine_file_name(f).name
output_file = output_folder / filename
logger.info(f"Saving {output_file}")
f().to_csv(output_file, index=False)
# Save databases too
if save_sql:
logger.info("Saving SQL databases")
# Determine datasets
datasets = defaultdict(list)
for f in list((DATA_DIR / "processed").glob("**/*.csv")):
key = f.parts[-2]
datasets[key].append(f)
# Loop over each database
for key in datasets:
# Create the database
filename = DATA_DIR / "sql" / (key + ".db")
db = Database(filename)
# Add each dataset
for f in datasets[key]:
data = pd.read_csv(f).replace(np.nan, None).to_dict(orient="records")
db[f.stem].insert_all(data)
logger.info("...done")
@main.group(cls=RichClickGroup)
def etl():
"""Run the ETL pipeline for the specified data source (development installation only)."""
pass
@main.group(cls=RichClickGroup)
def update():
"""Parse the City's website to scrape and update City of
Philadelphia budget data (development installation only)."""
pass
# Generate the ETL commands and format the CLI help screen
rich_click.core.COMMAND_GROUPS["phl-budget-data etl"] = generate_etl_commands(etl)
# Generate update commands
generate_update_commands(update)
|
class HeroDoesNotExistError(Exception):
"""The specified hero does not exist."""
|
""" __init__.py."""
from .nbplugins_watch import nbplugins_watch, _watch_n_reload # noqa: F401
__version__ = "0.0.6"
VERSION = __version__.split(".")
|
# =============================================================================
#
# Copyright (c) Kitware, Inc.
# All rights reserved.
# See LICENSE.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the above copyright notice for more information.
#
# =============================================================================
import smtk
from smtk import common, attribute
if __name__ == '__main__':
import datetime
import os
import sys
errcode = 0
# SMTK DateTime instance
smtk_dt = smtk.common.DateTime()
# SMTK TimeZone instance
smtk_zone = smtk.common.TimeZone()
smtk_zone.setPosixString('EST-5')
# SMTK DateTimeZonePair
sp = smtk.common.DateTimeZonePair()
print("smtk_dt", smtk_dt)
sp.setDateTime(smtk_dt)
sp.setTimeZone(smtk_zone)
# Convert to python datetime - should be "None"
dt1 = sp.to_python_datetime()
if not dt1 is None:
print('Empty DateTimeZonePair should convert to None')
errcode = -1
# Set datetime with time zone
smtk_dt.setComponents(smtk_zone, 2016, 11, 16, 16, 46, 22, 33)
sp.setDateTime(smtk_dt)
# Check python datetime
dt2 = sp.to_python_datetime()
dt_string2 = dt2.strftime('%Y-%m-%d %H:%M:%S .%f')
expected2 = '2016-11-16 16:46:22 .033000'
if dt_string2 != expected2:
print('Wrong local datetime, should be %s not %s' %
(expected2, dt_string2))
errcode = -1
# Check python datetime with utc option
dt3 = sp.to_python_datetime(True)
dt_string3 = dt3.strftime('%Y-%m-%d %H:%M:%S .%f')
expected3 = '2016-11-16 21:46:22 .033000'
if dt_string3 != expected3:
print('Wrong UTC datetime, should be %s not %s' %
(expected3, dt_string3))
errcode = -1
sys.exit(errcode)
|
# Generated by Django 3.2.6 on 2021-08-08 01:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='App',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('value', models.CharField(max_length=200)),
('app', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='app_config.app')),
],
),
migrations.CreateModel(
name='ConfigRequest',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('name', models.CharField(max_length=200)),
('value', models.CharField(max_length=200)),
('app', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='app_config.app')),
],
),
]
|
# Client settings
BASE_URL = "https://api.mopinion.com"
TOKEN_PATH = "/token"
LATEST_VERSION = "2.0.0"
# Some settings for dataclasses
VERBOSITY_lEVELS = ["quiet", "normal", "full"]
ITERATE_VERBOSITY_lEVELS = ["normal", "full"]
VERSIONS = ["1.18.14", "2.0.0"]
CONTENT_NEGOTIATIONS = ["application/json", "application/x-yaml"]
|
"""
:copyright: (c)Copyright 2013, Intel Corporation All Rights Reserved.
The source code contained or described here in and all documents related
to the source code ("Material") are owned by Intel Corporation or its
suppliers or licensors. Title to the Material remains with Intel Corporation
or its suppliers and licensors. The Material contains trade secrets and
proprietary and confidential information of Intel or its suppliers and
licensors.
The Material is protected by worldwide copyright and trade secret laws and
treaty provisions. No part of the Material may be used, copied, reproduced,
modified, published, uploaded, posted, transmitted, distributed, or disclosed
in any way without Intel's prior express written permission.
No license under any patent, copyright, trade secret or other intellectual
property right is granted to or conferred upon you by disclosure or delivery
of the Materials, either expressly, by implication, inducement, estoppel or
otherwise. Any license under such intellectual property rights must be express
and approved by Intel in writing.
:organization: INTEL MCG PSI
:summary: Exception class that should be used when an issue occurs with the ACS framework
(sequencer, parser ...)
:since: 2013/09/05
:author: ssavrimoutou
"""
from acs.ErrorHandling.AcsBaseException import AcsBaseException
class AcsToolException(AcsBaseException):
"""
Exception class for I{Acs} exceptions.
"""
SEQUENCER_ERROR = "Unexpected exception occurred in sequencer"
"""
Define problems encountered during sequencer execution.
"""
XML_PARSING_ERROR = "Xml parsing error"
"""
Define problems encountered during xml file parsing.
"""
PHONE_OUTPUT_ERROR = "Phone output error"
"""
The value corresponding to an output phone error.
"""
HOST_OPERATION_TIMEOUT = "A timeout has occurred"
"""
A host operation has timeout.
"""
def __init__(self, generic_error_msg, specific_msg=None):
"""
Initializes this instance.
:type generic_error_msg: str
:param generic_error_msg: this object's generic error message.
:type specific_msg: str
:param specific_msg: specific additional error message.
Optional parameter, defaults to C{None}.
"""
AcsBaseException.__init__(self, generic_error_msg, specific_msg)
self._error_code = self._BLOCKED
|
from __future__ import print_function
from .generalized import Scraper
class Baidu(Scraper):
"""Scrapper class for Baidu"""
def __init__(self):
Scraper.__init__(self)
self.url = 'https://www.baidu.com/s'
self.newsURL = 'http://news.baidu.com/ns'
self.defaultStart = 0
self.queryKey = 'wd'
self.startKey = 'pn'
self.name = 'baidu'
@staticmethod
def parse_response(soup):
""" Parse the response and return set of urls
Returns: urls (list)
[[Tile1,url1], [Title2, url2],..]
"""
urls = []
for div in soup.findAll('div', {'class': 'result'}):
title = div.h3.a.getText()
url = div.h3.a['href']
urls.append({'title': title, 'link': url})
print('Baidu parsed: ' + str(urls))
return urls
@staticmethod
def parse_news_response(soup):
""" Parse the response and return set of urls
Returns: urls (list)
[[Tile1,url1], [Title2, url2],..]
"""
urls = []
for h3 in soup.findAll('h3', {'class': 'c-title'}):
title = h3.a.getText()
link = h3.a.get('href')
urls.append({'title': title, 'link': link})
print('Baidu parsed: ' + str(urls))
return urls
|
"""
pygments.lexers.berry
~~~~~~~~~~~~~~~~~~~~~
Lexer for Berry.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words, default, include, bygroups
from pygments.token import Text, Comment, Whitespace, Operator, Keyword, Name, String, Number, Punctuation
__all__ = ['BerryLexer']
line_re = re.compile('.*?\n')
class BerryLexer(RegexLexer):
"""
For `berry <http://github.com/berry-lang/berry>`_ source code.
.. versionadded:: 2.12.0
"""
name = 'Berry'
aliases = ['berry', 'be']
filenames = ['*.be']
mimetypes = ['text/x-berry', 'application/x-berry']
_name = r'\b[^\W\d]\w*'
tokens = {
'root': [
include('whitespace'),
include('numbers'),
include('keywords'),
(rf'(def)(\s+)({_name})', bygroups(Keyword.Declaration, Whitespace, Name.Function)),
(rf'\b(class)(\s+)({_name})', bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(rf'\b(import)(\s+)({_name})', bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
include('expr')
],
'expr': [
(r'[^\S\n]+', Whitespace),
(r'\.\.|[~!%^&*+=|?:<>/-]', Operator),
(r'[(){}\[\],.;]', Punctuation),
include('controls'),
include('builtins'),
include('funccall'),
include('member'),
include('name'),
include('strings')
],
'whitespace': [
(r'\s+', Whitespace),
(r'#-(.|\n)*?-#', Comment.Multiline),
(r'#.*?$', Comment.Single)
],
'keywords': [
(words((
'as', 'break', 'continue', 'import', 'static', 'self', 'super'),
suffix=r'\b'), Keyword.Reserved),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(var|def)\b', Keyword.Declaration)
],
'controls': [
(words((
'if', 'elif', 'else', 'for', 'while', 'do', 'end', 'break',
'continue', 'return', 'try', 'except', 'raise'),
suffix=r'\b'), Keyword)
],
'builtins': [
(words((
'assert', 'bool', 'input', 'classname', 'classof', 'number', 'real',
'bytes', 'compile', 'map', 'list', 'int', 'isinstance', 'print',
'range', 'str', 'super', 'module', 'size', 'issubclass', 'open',
'file', 'type', 'call'),
suffix=r'\b'), Name.Builtin)
],
'numbers': [
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'-?\d+', Number.Integer),
(r'(-?\d+\.?|\.\d)\d*([eE][+-]?\d+)?', Number.Float)
],
'name': [
(_name, Name)
],
'funccall': [
(rf'{_name}(?=\s*\()', Name.Function, '#pop')
],
'member': [
(rf'(?<=\.){_name}\b(?!\()', Name.Attribute, '#pop')
],
'strings': [
(r'"([^\\]|\\.)*?"', String.Double, '#pop'),
(r'\'([^\\]|\\.)*?\'', String.Single, '#pop')
]
}
|
__author__ = 'Nicholas C Pandolfi'
import os
import sys
from multiprocessing import Process
from .lrtools import addspace, removespace, checkdate
from itertools import islice
BUFFERSIZE = 1024 ** 2
class NotCompiledError(Exception):
'''
This error is called when the user tried to load
from a '.lrdict' file that does not exist yet
'''
pass
def construct(filename):
dirname = os.path.splitext(filename)[0] + '.lrdict'
dirfile = open(dirname, 'w+')
file = open(filename)
maxlength = len(str(os.path.getsize(filename)))
chars = 0
dirfile.write(addspace('0', maxlength) + '\n')
for (offset, line) in enumerate(file):
chars += len(line) + 1
dirfile.write(addspace(str(int(chars)), maxlength) + '\n')
dirfile.close()
return dirname, int(maxlength + 1)
def visconstruct(filename, increment = 0):
percent = 0.0
size = os.path.getsize(filename)
dirname = os.path.splitext(filename)[0] + '.lrdict'
dirfile = open(dirname, 'w+')
file = open(filename)
maxlength = len(str(os.path.getsize(filename)))
chars = 0
dirfile.write(addspace('0', maxlength) + '\n')
for (offset, line) in enumerate(file):
current = round(chars / size * 100, increment)
if percent != current:
percent = current
print('status: {}%'.format(current))
chars += len(line) + 1
dirfile.write(addspace(str(int(chars)), maxlength) + '\n')
dirfile.close()
return dirname, int(maxlength + 1)
def load(dirname, maxlength):
try:
file = open(dirname)
except FileNotFoundError:
raise NotCompiledError("file '{}' does not exist, original file may need to be compiled first".format(
dirname))
def ref_function(number):
file.seek(((number - 1) * maxlength) + (number - 1))
string = removespace(file.readline()[:-1])
return string
return ref_function
def build(filename):
return load(*construct(filename))
def cnlcount(filename, buffer = BUFFERSIZE):
file = open(filename)
newlines = 0
chars = 0
content = True
while content:
content = file.read(buffer)
newlines += content.count('\n')
chars += len(content)
file.close()
return {'chars': chars, 'lines': newlines, 'cpl': chars/newlines}
def precompile(filename, newprocess = True):
if newprocess:
process = Process(target = construct, args = (filename,))
process.start()
else:
construct(filename)
def stackcompile(filelist, pool = True):
if pool:
for filename in filelist:
precompile(filename, newprocess = True)
else:
for filename in filelist:
precompile(filename, newprocess = False)
def dircompile(directory, supported = [], notcompile = [], pool = True, treecompile = False):
if not isinstance(supported, list):
raise ValueError("'supported' parameter must consist of a list of file extensions")
tocompile = []
if not treecompile:
names = (filename for filename in os.listdir(directory))
else:
def names():
for walked in os.walk(directory):
dirname = walked[0]
for eachfile in walked[2]:
yield os.path.join(dirname, eachfile)
names = names()
for filename in names:
extension = os.path.splitext(filename)[1]
if (extension in supported) and (filename not in notcompile):
tocompile.append(os.path.join(directory, filename))
stackcompile(tocompile, pool)
cache = {} # The global cache if copen is used, else dopen uses handles, and cache remains unused
def cachebuild(filename, overwrite = False):
global cache
if filename in cache:
if overwrite:
cache[filename] = tuple(open(filename).readlines())
else:
cache[filename] = tuple(open(filename).readlines())
lines = cache[filename]
return len(lines), lines
def getline(filename, line, reload = False):
return cachebuild(filename, overwrite = reload)[1][line - 1]
def getonce(filename, linenumber, useislice = True):
file = open(filename)
if useislice:
line = next(islice(file, linenumber - 1, linenumber))
file.close()
return line
else:
for offset, line in enumerate(file):
if offset + 1 == linenumber:
return line
def clearcache(dontclear = []):
global cache
todelete = []
for entry in cache:
if entry not in dontclear:
todelete.append(entry)
for impending in todelete:
del cache[impending]
|
from experimaestro import param, config, option
from onir import datasets
@param('qlen', default=5)
@param('dlen', default=500)
@param('count', default=10000)
@config()
class RandomDataset(datasets.Dataset):
"""
Dataset producing random samples, used for controlled tests in scripts/perf_benchmark.py
"""
def __init__(self, config, logger, vocab, random):
super().__init__(config, logger, vocab)
self.random = random
def run(self, fmt="dict"):
return None
def qrels(self, fmt="dict"):
return None
def all_doc_ids(self):
return []
def all_query_ids(self):
return []
def build_record(self, fields, **initial_values):
result = dict(initial_values)
for field in sorted(fields):
l = 1
if field.startswith('query_'):
l = self.qlen
elif field.startswith('doc_'):
l = self.dlen
if field in ('runscore', 'relscore'):
result[field] = self.random.rand()
elif field.endswith('_len'):
result[field] = l
elif field.endswith('_tok'):
result[field] = list(self.random.randint(1, self.vocab.lexicon_size(), size=l))
elif field.endswith('_idf') or field.endswith('_score'):
result[field] = list(self.random.rand(l))
else:
raise ValueError(f'unsupported field: {field}')
return result
def record_iter(self,
fields: set,
source: str, # one of ['run', 'qrels']
minrel: None, # integer indicating the minimum relevance score, or None for unfiltered
shuf: bool = True,
random=None,
inf: bool = False):
self.logger.warn(f'source={source} minrel={minrel} and shuf={shuf} do not apply to RandomDataset')
first = True
while first or inf:
first = False
for _ in range(self.count):
yield self.build_record(fields)
|
from tournament_of_lulz.modules.top_images.model_top_images import ModelTopImages
from tournament_of_lulz.modules.top_images.view_top_images import ViewTopImages
def get(db_connection, data):
start = 0
limit = 10
if 'start' in data:
start = int(data['start'])
if 'limit' in data:
limit = int(data['limit'])
if limit > 32:
limit = 32
top_images_model = ModelTopImages(db_connection)
top_images_model.load_top_images(start, limit)
view = ViewTopImages(top_images_model)
return view.render()
|
class BaseItem(object):
def __init__(self):
super(BaseItem, self).__init__()
|
# encoding: utf-8
__author__ = "Dimitrios Karkalousos"
import argparse
import gc
import pathlib
import sys
import time
from collections import defaultdict
from typing import Any, Dict, Tuple, Union
import numpy as np
import torch
from torch.utils.data import DataLoader
from mridc import save_reconstructions, rss
from mridc.data.mri_data import SliceDataset
from mridc.data.subsample import create_mask_for_mask_type
from mridc.data.transforms import center_crop_to_smallest, UnetDataTransform
from mridc.nn.e2evn import NormUnet
from scripts.train_cirim import build_optim
torch.backends.cudnn.benchmark = False
def load_model(
checkpoint_file: str, device: str
) -> Tuple[Any, Union[torch.nn.DataParallel, NormUnet], torch.optim.Optimizer]:
"""
Loads the model from the checkpoint file.
Args:
checkpoint_file: Path to the checkpoint file.
device: cuda or cpu
Returns:
Checkpoint, UNet model, optimizer.
"""
checkpoint = torch.load(checkpoint_file, map_location=device)
arguments = checkpoint["args"]
model = NormUnet(
in_chans=arguments.in_chans, # number of channels in input image
out_chans=arguments.out_chans, # number of channels in output image
chans=arguments.chans, # number of channels in intermediate layers
num_pools=arguments.num_pools, # number of pooling operations in the encoder/decoder
drop_prob=arguments.drop_prob, # dropout probability
padding_size=arguments.padding_size, # padding size
normalize=arguments.normalize, # normalize the input image
).to(device)
if arguments.data_parallel:
model = torch.nn.DataParallel(model) # type: ignore
model.load_state_dict(checkpoint["model"])
optimizer = build_optim(arguments, model.parameters()) # type: ignore
optimizer.load_state_dict(checkpoint["optimizer"])
return checkpoint, model, optimizer
def run_unet(
model: NormUnet, data_loader: DataLoader, output_type: str, device: str, progress_bar_refresh: int
) -> Dict[str, np.ndarray]:
"""
Runs the model on the data loader and returns the reconstructions.
Args:
model: Normalized Unet
data_loader: torch.utils.data.DataLoader
output_type: SENSE or RSS
device: cuda or cpu
progress_bar_refresh: Refresh rate of the progress bar.
Returns:
Dictionary with the reconstructions.
"""
model.eval()
model.to(device)
# Create a dictionary to store the results
output = defaultdict(list)
for i, data in enumerate(data_loader):
with torch.no_grad():
image, target, fname, slice_num, _, _, _ = data
# Move the data to the correct device
image = image.to(device)
target = target.to(device)
# Run the model
estimate = model.forward(image.unsqueeze(1)).squeeze(1)
if output_type == "SENSE":
estimate = torch.view_as_complex(estimate)
elif output_type == "rss":
estimate = rss(estimate, dim=1)
else:
raise ValueError(f"Unknown output_type: {output_type}")
target, estimate = center_crop_to_smallest(target, estimate)
output[fname[0]].append((slice_num, estimate.cpu()))
gc.collect()
torch.cuda.empty_cache()
# update the progress bar
if i % progress_bar_refresh == 0:
sys.stdout.write("\r[{:5.2f}%]".format(100 * (i + 1) / len(data_loader)))
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
reconstructions = {
fname: np.stack([pred for _, pred in sorted(slice_preds)]) for fname, slice_preds in output.items()
}
return reconstructions
def main(args):
"""
Main function.
Args:
args: Arguments from the command line.
Returns:
None
"""
data_loader = DataLoader(
dataset=SliceDataset(
root=args.data_path,
sense_root=args.sense_path,
challenge=args.challenge,
transform=UnetDataTransform(
mask_func=False
if args.no_mask
else create_mask_for_mask_type(args.mask_type, args.center_fractions, args.accelerations),
shift_mask=args.shift_mask,
normalize_inputs=args.normalize_inputs,
crop_size=args.crop_size,
crop_before_masking=args.crop_before_masking,
kspace_zero_filling_size=args.kspace_zero_filling_size,
fft_type=args.fft_type,
output_type=args.output_type,
use_seed=False,
),
sample_rate=args.sample_rate,
mask_root=args.mask_path,
),
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=False,
)
# load the model
_, model, _ = load_model(args.checkpoint, args.device)
init_start = time.perf_counter()
# TODO: change print to logger
print("Reconstructing...")
reconstructions = run_unet(model, data_loader, args.output_type, args.device, args.progress_bar_refresh)
print("Saving...")
save_reconstructions(reconstructions, args.out_dir)
print("Finished! It took", time.perf_counter() - init_start, "s \n")
def create_arg_parser():
"""
Creates an ArgumentParser to read the arguments.
Returns:
argparse.ArgumentParser: An ArgumentParser object.
"""
parser = argparse.ArgumentParser(description="UNET")
parser.add_argument("data_path", type=pathlib.Path, help="Path to the data folder")
parser.add_argument("checkpoint", type=pathlib.Path, help="Path to the checkpoint file")
parser.add_argument("out_dir", type=pathlib.Path, help="Path to the output folder")
parser.add_argument("--sense_path", type=pathlib.Path, help="Path to the sense folder")
parser.add_argument("--mask_path", type=pathlib.Path, help="Path to the mask folder")
parser.add_argument(
"--data-split",
choices=["val", "test", "test_v2", "challenge"],
help='Which data partition to run on: "val" or "test"',
)
parser.add_argument(
"--challenge",
type=str,
choices=["singlecoil", "multicoil"],
default="multicoil",
help="Which challenge to run",
)
parser.add_argument("--sample_rate", type=float, default=1.0, help="Sample rate for the data")
parser.add_argument("--batch_size", type=int, default=1, help="Batch size for the data loader")
parser.add_argument(
"--no_mask",
action="store_true",
help="Toggle to turn off masking. This can be used for prospectively undersampled data.",
)
parser.add_argument(
"--mask_type",
choices=("random", "gaussian2d", "equispaced"),
default="gaussian2d",
type=str,
help="Type of k-space mask",
)
parser.add_argument(
"--accelerations", nargs="+", default=[10, 10], type=int, help="Acceleration rates to use for masks"
)
parser.add_argument(
"--center_fractions", nargs="+", default=[0.7, 0.7], type=float, help="Number of center lines to use in mask"
)
parser.add_argument("--shift_mask", action="store_true", help="Shift the mask")
parser.add_argument("--normalize_inputs", action="store_true", help="Normalize the inputs")
parser.add_argument("--crop_size", nargs="+", help="Size of the crop to apply to the input")
parser.add_argument("--crop_before_masking", action="store_true", help="Crop before masking")
parser.add_argument("--kspace_zero_filling_size", nargs="+", help="Size of zero-filling in kspace")
parser.add_argument(
"--output_type", choices=("SENSE", "RSS"), default="SENSE", type=str, help="Type of output to save"
)
parser.add_argument("--fft_type", type=str, default="orthogonal", help="Type of FFT to use")
parser.add_argument("--progress_bar_refresh", type=int, default=10, help="Progress bar refresh rate")
parser.add_argument("--num_workers", type=int, default=4, help="Number of workers for the data loader")
parser.add_argument(
"--data_parallel", action="store_true", help="If set, use multiple GPUs using data parallelism"
)
parser.add_argument("--device", type=str, default="cuda", help="Which device to run on")
return parser
if __name__ == "__main__":
main(create_arg_parser().parse_args(sys.argv[1:]))
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
def override_formerly_core_masked_filter(*args, **kwargs):
return 'hello from overridden formerly_core_masked_filter'
class FilterModule(object):
def filters(self):
return {
'formerly_core_masked_filter': override_formerly_core_masked_filter
}
|
import numpy as np
import os
import glob
import pandas as pd
import cv2
from xml.etree import ElementTree
import random
import shutil
import glob
from PIL import Image
for dirname in os.listdir("tmp"):
print(dirname)
for fpath in glob.glob(os.path.join("fullsizeimg",dirname,"original","*")):
img=Image.open(fpath)
img_resize = img.resize((300, 300),Image.LANCZOS)
img_resize.save(os.path.join("300_300img",dirname+"_"+os.path.basename(fpath))) |
# coding: utf-8
import asyncio
import itchat
from .config import (LOG_TEMPLATE_START_SEND, LOG_TEMPLATE_SEND_FAILED,
LOG_TEMPLATE_UPLOAD_FAILED, LOG_TEMPLATE_DOWNLOAD_FAILED)
from .logger import logger
from .chatroom import chatroom
from .cralwer import get_sticker_urls
from .util import get_file
async def send_image_by_url(url):
logger.verbose(LOG_TEMPLATE_START_SEND.format(url))
try:
f = await get_file(url)
except Exception as e:
logger.error(LOG_TEMPLATE_DOWNLOAD_FAILED.format(url))
logger.error(e)
return
try:
r = itchat.upload_file(fileDir='tmp.gif', isPicture=False, file_=f)
except Exception as e:
logger.error(LOG_TEMPLATE_UPLOAD_FAILED.format(url))
logger.error(e)
return
try:
chatroom.send_image(fileDir='tmp.gif', mediaId=r['MediaId'])
except Exception as e:
logger.error(LOG_TEMPLATE_SEND_FAILED.format(url))
logger.error(e)
async def send_image_by_urls(urls):
await asyncio.wait([send_image_by_url(u) for u in urls])
async def send_stickers_by_query(query):
sticker_urls = await get_sticker_urls(query)
await send_image_by_urls(sticker_urls)
async def send_animated_stickers_by_query(query):
sticker_urls = await get_sticker_urls(query, filetype='gif')
await send_image_by_urls(sticker_urls)
|
import airflow.utils.dates
from airflow import DAG
from airflow.sensors.filesystem import FileSensor
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash import BashOperator
source_name = 'camera1'
source_path = 'camera'
file_pattern = '*.jpeg'
destentation_path = 'output'
dag = DAG(
dag_id=f'{source_name}_collector_v3',
description="Monitor NFS share for newly saved images ...",
start_date=airflow.utils.dates.days_ago(1),
schedule_interval="@hourly",
default_args={"depends_on_past": True},
)
start_task = DummyOperator(task_id='start')
stop_task = DummyOperator(task_id='stop')
wait_for_new_files = FileSensor(
task_id=f'wait_for_new_file',
filepath='/opt/share/{source_path}/{file_pattern}',
poke_interval=30,
dag=dag
)
copy_new_files = BashOperator(
task_id='copy_new_files',
bash_command=(
f'mkdir -p /opt/share/{destentation_path}/{source_name}/{{ds}} &&'
f'cp -rf /opt/share/{source_path}/{file_pattern} /opt/share/{destentation_path}/{source_name}/{{ds}} &&'
f'rm /opt/share/{source_path}/{file_pattern}'
)
)
start_task >> wait_for_new_files >> copy_new_files >> stop_task
|
from visual import *
giant = sphere()
giant.pos = vector(-1e11,0,0)
giant.radius = 2e10
giant.color = color.red
giant.mass = 2e30
giant.p = vector(0, 0, -1e4) * giant.mass
dwarf = sphere()
dwarf.pos = vector(1.5e11,0,0)
dwarf.radius = 1e10
dwarf.color = color.yellow
dwarf.mass = 1e30
dwarf.p = -giant.p
for a in [giant, dwarf]:
a.orbit = curve(color=a.color, radius = 2e9)
dt = 86400
while 1:
rate(100)
dist = dwarf.pos - giant.pos
force = 6.7e-11 * giant.mass * dwarf.mass * dist / mag(dist)**3
giant.p = giant.p + force*dt
dwarf.p = dwarf.p - force*dt
for a in [giant, dwarf]:
a.pos = a.pos + a.p/a.mass * dt
a.orbit.append(pos=a.pos)
|
#!/usr/bin/env python3
# ------------------------------------------------------------------------
'''
This is a sample project that is added to the site. The idea here is
that nothing in this project can syntax error (down) the site, as it is
imported under a try: except clause, and it is calling only the
URL registration functions
'''
# ------------------------------------------------------------------------
import os, sys, random, datetime, time
# Add URL; too simple, but is communicates the idea
def got_aa(config, url, query, req, templ):
content = "AA file " + url + " " + str(query) + " "
return content
def got_bb(config, url, query, req, templ):
content = "bb file " + url + " " + str(query) + " "
return content
# ------------------------------------------------------------------------
# Add Mock calendar
def mock_cal_func(strx, context):
'''
Mock calendar. Does nothing but presents a calendar looking user interface
'''
from calendar import monthrange
try:
content = '''<table width=100% border=1>
<tr><td colspan=7>
<table width=100% border=0 bgcolor=#cccccc>
<tr>
<td align=center colspan=1>
<a href=?forw=1><<</a>
<td align=center colspan=5>
<b>APP THREE
<td align=center colspan=1>
<a href=?back=1>>></a>
<tr bgcolor=#cccccc>
<td align=center colspan=7>
(mock Calendar)
</table>
'''
dt2 = datetime.datetime.now()
dt = datetime.datetime(dt2.year, dt2.month, 1)
#print("dt", dt, "wd:", dt.weekday())
mon = dt.weekday()
rrr = monthrange(dt2.year, dt2.month)[1]
#print("mock_cal_func() wd:", dt.weekday(), "rrr", rrr)
content += "<tr><td colspan=7>"
cnt = 0; cnt2 = 0;
wday = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
content += "<tr>"
for cc in range(7):
content += "<td> <font size=-1>" + wday[cc]
for aa in range(5):
content += "<tr>"
colx = "#ffffff"
for bb in range(7):
if dt.year == dt2.year and dt.month == dt.month and dt2.day == cnt:
#print("today", dt2.day)
colx = "#dddddd"
#aa*7 + bb + 1
cnt += 1
if cnt > rrr:
content += "<td> <font size=-1>"
else:
if cnt > mon:
content += "<td bgcolor=" + colx + "> <font size=-1>" + "<a href=?cal-" + \
str(dt.year) + "-" + str(dt.month) + "-" + str(cnt) + \
">" + str(cnt2+1) + "</a>"
if random.randint(0, 255) % 4 == 0:
content += "*"
else:
content += " "
else:
content += "<td bgcolor=" + colx + "> <font size=-1>" + " "
cnt2 += 1
content += "</table>Active marked by asterisk (*)"
except:
print("Exception on two", sys.exc_info())
return content
# ------------------------------------------------------------------------
# Add all the functions for the urls; this function is called
# When the url is accessed
sys.path.append("../")
from wsgi_global import add_one_url
add_one_url("/aa", got_aa)
add_one_url("/bb", got_bb)
# ------------------------------------------------------------------------
# Add all the functions and the macro names here
# Simply refer to the macro in the html temple, and it will get called
# and the output substituted
from wsgi_global import add_one_func
add_one_func("app3", mock_cal_func)
# EOF |
import socket
import struct
import time
import threading
import select
class_name = 'fw'
manip_device_name = 'manipulations'
manip_attr_name = 'manipulations'
manip_inst_size = 14
ftp_manipulation_port = 210
NO_MANIPULATION_PORT = 0
MANIPULATION_CMD_INST = 0
MANIPULATION_CMD_FTP_DATA =1
# Helper method to turn an integer into an IPv4 address strings and vice versa
# Taken from: https://stackoverflow.com/questions/5619685/conversion-from-ip-string-to-integer-and-backward-in-python
def ip2int(addr):
return struct.unpack("!I", socket.inet_aton(addr))[0]
def int2ip(addr):
return socket.inet_ntoa(struct.pack("!I", addr))
# Read from the kernel's device which indicated the next
# awaiting connections info (IPs, Ports)
def get_awaiting_connection():
manip_dev = open('/sys/class/{0}/{1}/{2}'.format(class_name,manip_device_name,manip_attr_name),'r')
inst_raw = manip_dev.read(manip_inst_size)
client_ip,server_ip,client_port,server_port,_ = struct.unpack('!IIHHH',inst_raw)
client_ip = int2ip(client_ip)
server_ip = int2ip(server_ip)
manip_dev.close()
return {'client':{'ip':client_ip,'port':client_port},'server':{'ip':server_ip,'port':server_port}}
# Inform the kernel module of the 5-tuple of a manipulated connection
def send_kernel_manipulation_command(cmd_type,manip_port,client_ip,client_port,server_ip,server_port):
# Creating magic number header
buf = b'\x56\x78'
# Appending IPs, Ports
buf += struct.pack('!BIIHHH',cmd_type,client_ip,server_ip,client_port,server_port,manip_port)
manip_dev = open('/sys/class/{0}/{1}/{2}'.format(class_name,manip_device_name,manip_attr_name),'w')
manip_dev.write(buf)
manip_dev.close()
class Single_user_handler(threading.Thread):
def __init__(self,client,manipulation_inst):
super(Single_user_handler,self).__init__()
self.client = client
self.inst = manipulation_inst
# Prepare socket for the server connection
self.server = socket.socket()
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
# Binding socket before connection to server.
# We do this to get the expected SOURCE PORT because the first (SYN) packet
# we inform the kernel so it doesn't drop our connection
self.server.bind(('0.0.0.0',0))
manip_port = self.server.getsockname()[1]
# Write instructions to kernel
self.client_info = manipulation_inst['client']
self.server_info = manipulation_inst['server']
client_ip = self.client_info['ip']
client_port = self.client_info['port']
server_ip = self.server_info['ip']
server_port = self.server_info['port']
client_ip = ip2int(client_ip)
server_ip = ip2int(server_ip)
send_kernel_manipulation_command(MANIPULATION_CMD_INST,manip_port,
client_ip,client_port,
server_ip,server_port)
# Actually connect to the server, might throw exceptions if refused/timedout
self.server.connect((self.server_info['ip'],server_port))
def run(self):
client_buf = b''
while(1):
# check both sockets for info, whichever is available is processed
print('selecting')
ready = select.select([self.client,self.server], [], [], 100)
print('selected')
if self.client in ready[0]:
# Check if client is ready
print('Client is Ready ->> Reading!')
temp = self.client.recv(256)
if (temp == b''):
print('Client disconnected!')
break
client_buf += temp
if not '\x0d\x0a' in client_buf:
continue
# Else, we have a complete command
# Extract command
command_len = client_buf.index('\x0d\x0a')+2
command = client_buf[:command_len]
# Remove command from buffer
client_buf = client_buf[command_len:]
# Check for required inteception - the PORT command
if 'PORT' in command:
# Parse ASCII parameters
args = command[len('PORT '):].split(',')
client_ip = (int(args[0]) << 24) | (int(args[1]) << 16) | (int(args[2]) << 8) | int(args[3])
client_port = (int(args[4]) << 8) | int(args[5])
server_ip = ip2int(self.server_info['ip'])
# Notify kernel about the new expected TCP Connection
send_kernel_manipulation_command(MANIPULATION_CMD_FTP_DATA, NO_MANIPULATION_PORT,
client_ip,client_port,server_ip,0)
# any non-interesting command (and also PORT commands continue here)
print('Client -> Server: \033[91m{0}\033[00m'.format(temp.rstrip()))
self.server.send(command)
elif self.server in ready[0]:
# Server is ready
# just forward anything we can read back to the client
temp = self.server.recv(256)
if (temp == b''):
print('Server disconnected!')
break
print('Server -> Client: \033[96m{0}\033[00m'.format(temp.rstrip()))
self.client.send(temp)
self.client.close()
self.server.close()
def run_server(address):
listener = socket.socket()
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
listener.bind(address)
listener.listen(100)
print('Listening (Address: {0})...'.format(address))
while(1):
try:
client, c_address = listener.accept()
print('New client found! Endpoint: {0} '.format(c_address))
inst = get_awaiting_connection()
while inst['client']['ip'] != c_address[0] or inst['client']['port'] != c_address[1] :
inst = get_awaiting_connection()
except Exception as e:
print('ERROR IN ACCEPT: {0}'.format(e))
raise
try:
# Possible errors are connection refuse from server or timeout
handler = Single_user_handler(client,inst)
except Exception as e:
print(e)
client.close()
continue
handler.start()
def main(argv):
try:
run_server(('',ftp_manipulation_port))
except Exception as error:
print('ERROR: {0}'.format(error))
return 1
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv)) |
# loop through array
# add extra cokies at each array Index
# See if cookies is > than max
# Add true or false as necessary
class Solution:
def kidsWithCandies(self, candies: list[int], extraCandies: int) -> list[bool]:
mNum = max(candies)
store = []
for i in candies:
iextra = i + extraCandies
if iextra >= mNum:
store.append(True)
else:
store.append(False)
return store
solution = Solution()
print(solution.kidsWithCandies([4,1,2,5],3) ) |
from pettingzoo.atari import basketball_pong_v2
from pettingzoo.atari import boxing_v1
from pettingzoo.atari import combat_plane_v1
from pettingzoo.atari import combat_tank_v1
from pettingzoo.atari import double_dunk_v2
from pettingzoo.atari import entombed_competitive_v2
from pettingzoo.atari import entombed_cooperative_v2
from pettingzoo.atari import flag_capture_v1
from pettingzoo.atari import foozpong_v2
from pettingzoo.atari import ice_hockey_v1
from pettingzoo.atari import joust_v2
from pettingzoo.atari import mario_bros_v2
from pettingzoo.atari import maze_craze_v2
from pettingzoo.atari import othello_v2
from pettingzoo.atari import pong_v2
from pettingzoo.atari import quadrapong_v3
from pettingzoo.atari import space_invaders_v1
from pettingzoo.atari import space_war_v1
from pettingzoo.atari import surround_v1
from pettingzoo.atari import tennis_v2
from pettingzoo.atari import video_checkers_v3
from pettingzoo.atari import volleyball_pong_v2
from pettingzoo.atari import wizard_of_wor_v2
from pettingzoo.atari import warlords_v2
from pettingzoo.classic import chess_v5
from pettingzoo.classic import checkers_v3
from pettingzoo.classic import rps_v2
from pettingzoo.classic import connect_four_v3
from pettingzoo.classic import tictactoe_v3
from pettingzoo.classic import leduc_holdem_v4
from pettingzoo.classic import mahjong_v4
from pettingzoo.classic import texas_holdem_v4
from pettingzoo.classic import texas_holdem_no_limit_v6
from pettingzoo.classic import uno_v4
from pettingzoo.classic import dou_dizhu_v4
from pettingzoo.classic import gin_rummy_v4
from pettingzoo.classic import go_v5
from pettingzoo.classic import hanabi_v4
from pettingzoo.classic import backgammon_v3
from pettingzoo.butterfly import knights_archers_zombies_v7
from pettingzoo.butterfly import pistonball_v5
from pettingzoo.butterfly import cooperative_pong_v5
from pettingzoo.butterfly import prison_v3
from pettingzoo.butterfly import prospector_v4
from pettingzoo.magent import battle_v3
from pettingzoo.magent import adversarial_pursuit_v3
from pettingzoo.magent import gather_v3
from pettingzoo.magent import combined_arms_v5
from pettingzoo.magent import tiger_deer_v3
from pettingzoo.magent import battlefield_v3
from pettingzoo.mpe import simple_adversary_v2
from pettingzoo.mpe import simple_crypto_v2
from pettingzoo.mpe import simple_push_v2
from pettingzoo.mpe import simple_reference_v2
from pettingzoo.mpe import simple_speaker_listener_v3
from pettingzoo.mpe import simple_spread_v2
from pettingzoo.mpe import simple_tag_v2
from pettingzoo.mpe import simple_world_comm_v2
from pettingzoo.mpe import simple_v2
from pettingzoo.sisl import pursuit_v4
from pettingzoo.sisl import waterworld_v3
from pettingzoo.sisl import multiwalker_v7
all_prefixes = ["atari", "classic", "butterfly", "magent", "mpe", "sisl"]
manual_environments = {
"butterfly/knights_archers_zombies",
"butterfly/pistonball",
"butterfly/cooperative_pong",
"butterfly/prison",
"butterfly/prospector",
"sisl/pursuit"
}
all_environments = {
"atari/basketball_pong_v2": basketball_pong_v2,
"atari/boxing_v1": boxing_v1,
"atari/combat_tank_v1": combat_tank_v1,
"atari/combat_plane_v1": combat_plane_v1,
"atari/double_dunk_v2": double_dunk_v2,
"atari/entombed_cooperative_v2": entombed_cooperative_v2,
"atari/entombed_competitive_v2": entombed_competitive_v2,
"atari/flag_capture_v1": flag_capture_v1,
"atari/foozpong_v2": foozpong_v2,
"atari/joust_v2": joust_v2,
"atari/ice_hockey_v1": ice_hockey_v1,
"atari/maze_craze_v2": maze_craze_v2,
"atari/mario_bros_v2": mario_bros_v2,
"atari/othello_v2": othello_v2,
"atari/pong_v2": pong_v2,
"atari/quadrapong_v3": quadrapong_v3,
"atari/space_invaders_v1": space_invaders_v1,
"atari/space_war_v1": space_war_v1,
"atari/surround_v1": surround_v1,
"atari/tennis_v2": tennis_v2,
"atari/video_checkers_v3": video_checkers_v3,
"atari/volleyball_pong_v2": volleyball_pong_v2,
"atari/wizard_of_wor_v2": wizard_of_wor_v2,
"atari/warlords_v2": warlords_v2,
"classic/chess_v5": chess_v5,
"classic/checkers_v3": checkers_v3,
"classic/rps_v2": rps_v2,
"classic/connect_four_v3": connect_four_v3,
"classic/tictactoe_v3": tictactoe_v3,
"classic/leduc_holdem_v4": leduc_holdem_v4,
"classic/mahjong_v4": mahjong_v4,
"classic/texas_holdem_v4": texas_holdem_v4,
"classic/texas_holdem_no_limit_v6": texas_holdem_no_limit_v6,
"classic/uno_v4": uno_v4,
"classic/dou_dizhu_v4": dou_dizhu_v4,
"classic/gin_rummy_v4": gin_rummy_v4,
"classic/go_v5": go_v5,
"classic/hanabi_v4": hanabi_v4,
"classic/backgammon_v3": backgammon_v3,
"butterfly/knights_archers_zombies_v7": knights_archers_zombies_v7,
"butterfly/pistonball_v5": pistonball_v5,
"butterfly/cooperative_pong_v5": cooperative_pong_v5,
"butterfly/prison_v3": prison_v3,
"butterfly/prospector_v4": prospector_v4,
"magent/adversarial_pursuit_v3": adversarial_pursuit_v3,
"magent/battle_v3": battle_v3,
"magent/battlefield_v3": battlefield_v3,
"magent/combined_arms_v5": combined_arms_v5,
"magent/gather_v3": gather_v3,
"magent/tiger_deer_v3": tiger_deer_v3,
"mpe/simple_adversary_v2": simple_adversary_v2,
"mpe/simple_crypto_v2": simple_crypto_v2,
"mpe/simple_push_v2": simple_push_v2,
"mpe/simple_reference_v2": simple_reference_v2,
"mpe/simple_speaker_listener_v3": simple_speaker_listener_v3,
"mpe/simple_spread_v2": simple_spread_v2,
"mpe/simple_tag_v2": simple_tag_v2,
"mpe/simple_world_comm_v2": simple_world_comm_v2,
"mpe/simple_v2": simple_v2,
"sisl/multiwalker_v7": multiwalker_v7,
"sisl/waterworld_v3": waterworld_v3,
"sisl/pursuit_v4": pursuit_v4,
}
|
from flask import (Flask, g, Response, current_app)
from flask_cachual import Cachual, cached
from mock import mock, MagicMock
import cachual, pytest
def get_app():
return Flask(__name__)
@mock.patch('flask_cachual.Cachual.init_app')
def test_ctor_no_app(mock_init_app):
cachual = Cachual()
cachual.init_app.assert_has_calls([])
assert cachual.app == None
@mock.patch('flask_cachual.Cachual.init_app')
def test_ctor_app(mock_init_app):
app = MagicMock()
cachual = Cachual(app)
cachual.init_app.assert_called_with(app)
assert cachual.app == app
def test_init_no_type():
app = get_app()
cachual = Cachual()
with pytest.raises(Exception):
cachual.init_app(app)
@mock.patch('cachual.RedisCache')
def test_init_redis(mock_redis_cache):
cache = mock_redis_cache.return_value
cache_args = {'host': 'test', 'port': 1, 'db': 1}
app = get_app()
app.config["CACHUAL_TYPE"] = 'redis'
app.config["CACHUAL_ARGS"] = cache_args
cachual = Cachual(app)
assert app.cachual_cache == cache
@mock.patch('cachual.MemcachedCache')
def test_init_memcached(mock_memcached_cache):
cache = mock_memcached_cache.return_value
cache_args = {'server': ('test', 1)}
app = get_app()
app.config["CACHUAL_TYPE"] = 'memcached'
app.config["CACHUAL_ARGS"] = cache_args
cachual = Cachual(app)
assert app.cachual_cache == cache
def test_init_invalid_type():
app = get_app()
app.config["CACHUAL_TYPE"] = 'R4n3ign4wuih4'
app.config["CACHUAL_ARGS"] = {}
cachual = Cachual()
with pytest.raises(Exception):
cachual.init_app(app)
@mock.patch('cachual.RedisCache')
def test_cached(mock_redis_cache):
cache = mock_redis_cache.return_value
cache.cached = MagicMock()
cache_decorator = cache.cached.return_value
cache_decorated = cache_decorator.return_value
cache_args = {'host': 'test', 'port': 1, 'db': 1}
test_args = ['arg1', 'arg2']
test_kwargs = {"kwarg1": "val1", "kwarg2": "val2"}
app = get_app()
app.config["CACHUAL_TYPE"] = 'redis'
app.config["CACHUAL_ARGS"] = cache_args
cachual = Cachual(app)
ttl = MagicMock()
pack = MagicMock()
unpack = MagicMock()
use_class_for_self = MagicMock()
@cached(ttl=ttl, pack=pack, unpack=unpack,
use_class_for_self=use_class_for_self)
def test_cache_func(*args, **kwargs):
pass
@app.route('/')
def test_route():
test_cache_func(*test_args, **test_kwargs)
return 'ok'
with app.test_client() as c:
c.get('/')
cache.cached.assert_called_with(ttl, pack, unpack, use_class_for_self)
cache_decorator.assert_called_with(test_cache_func.__wrapped__)
cache_decorated.assert_called_with(*test_args, **test_kwargs)
|
class Solution:
def divisorSubstrings(self, num: int, k: int) -> int:
result = 0
num = str(num)
for i in range(len(num)):
if i + k - 1 >= len(num):
break
if int(num[i: i + k]) == 0:
continue
if int(num) % int(num[i: i + k]) == 0:
result += 1
return result
if __name__ == "__main__":
solution = Solution()
print(solution.divisorSubstrings(240, 2))
print(solution.divisorSubstrings(430043, 2))
|
# groceries.py
#from pprint import pprint
products = [
{"id":1, "name": "Chocolate Sandwich Cookies", "department": "snacks", "aisle": "cookies cakes", "price": 3.50},
{"id":2, "name": "All-Seasons Salt", "department": "pantry", "aisle": "spices seasonings", "price": 4.99},
{"id":3, "name": "Robust Golden Unsweetened Oolong Tea", "department": "beverages", "aisle": "tea", "price": 2.49},
{"id":4, "name": "Smart Ones Classic Favorites Mini Rigatoni With Vodka Cream Sauce", "department": "frozen", "aisle": "frozen meals", "price": 6.99},
{"id":5, "name": "Green Chile Anytime Sauce", "department": "pantry", "aisle": "marinades meat preparation", "price": 7.99},
{"id":6, "name": "Dry Nose Oil", "department": "personal care", "aisle": "cold flu allergy", "price": 21.99},
{"id":7, "name": "Pure Coconut Water With Orange", "department": "beverages", "aisle": "juice nectars", "price": 3.50},
{"id":8, "name": "Cut Russet Potatoes Steam N' Mash", "department": "frozen", "aisle": "frozen produce", "price": 4.25},
{"id":9, "name": "Light Strawberry Blueberry Yogurt", "department": "dairy eggs", "aisle": "yogurt", "price": 6.50},
{"id":10, "name": "Sparkling Orange Juice & Prickly Pear Beverage", "department": "beverages", "aisle": "water seltzer sparkling water", "price": 2.99},
{"id":11, "name": "Peach Mango Juice", "department": "beverages", "aisle": "refrigerated", "price": 1.99},
{"id":12, "name": "Chocolate Fudge Layer Cake", "department": "frozen", "aisle": "frozen dessert", "price": 18.50},
{"id":13, "name": "Saline Nasal Mist", "department": "personal care", "aisle": "cold flu allergy", "price": 16.00},
{"id":14, "name": "Fresh Scent Dishwasher Cleaner", "department": "household", "aisle": "dish detergents", "price": 4.99},
{"id":15, "name": "Overnight Diapers Size 6", "department": "babies", "aisle": "diapers wipes", "price": 25.50},
{"id":16, "name": "Mint Chocolate Flavored Syrup", "department": "snacks", "aisle": "ice cream toppings", "price": 4.50},
{"id":17, "name": "Rendered Duck Fat", "department": "meat seafood", "aisle": "poultry counter", "price": 9.99},
{"id":18, "name": "Pizza for One Suprema Frozen Pizza", "department": "frozen", "aisle": "frozen pizza", "price": 12.50},
{"id":19, "name": "Gluten Free Quinoa Three Cheese & Mushroom Blend", "department": "dry goods pasta", "aisle": "grains rice dried goods", "price": 3.99},
{"id":20, "name": "Pomegranate Cranberry & Aloe Vera Enrich Drink", "department": "beverages", "aisle": "juice nectars", "price": 4.25}
] # based on data from Instacart: https://www.instacart.com/datasets/grocery-shopping-2017
# print(products)
# print(len(products))
# print(products[0])
# print(products[0]['name'])
# for item in range(0, len(products)):
# print(products[item]['name'])
# alpha order
# names = []
# for item in range(0, len(products)):
# names.append(products[item]['name'])
# names.sort()
# for item in names:
# print(f"\n {item}")
#attributions ##########################################################
#lambda function courtesy of stackoverflow :)
#https://stackoverflow.com/questions/72899/how-do-i-sort-a-list-of-dictionaries-by-a-value-of-the-dictionary
#set fix courtesy of stackoverflow :)
#https://stackoverflow.com/questions/7961363/removing-duplicates-in-python-lists
# format applier courtesy of Dan Gode's class financial statement analytics
#attributions ##########################################################
def alpha_sorted_products(inventory):
sorted_inventory = sorted(inventory, key = lambda k: k['name'])
print('________________________________')
print('There are', len(sorted_inventory), 'products')
print('________________________________')
for item in range(0,len(sorted_inventory)):
print('+',sorted_inventory[item]['name'], "(${:,.2f})".format(sorted_inventory[item]['price']))
def alpha_sorted_departments(inventory):
unique_departments = []
for depts in range(0, len(inventory)):
unique_departments.append(inventory[depts]['department'])
set_departments = set(unique_departments)
listed_dept = list(set_departments)
listed_dept.sort()
final_departments = []
for department in listed_dept:
item_count = 0
for item in range(0, len(inventory)):
if inventory[item]['department'] == department:
item_count += 1
else:
pass
final_departments.append({'department': department, 'item_count': item_count})
print('________________________________')
print('There are', len(final_departments), 'departments')
print('________________________________')
for items_to_print in range(0,len(final_departments)):
if final_departments[items_to_print]['item_count'] > 1:
print('+',final_departments[items_to_print]['department'].capitalize(), "(",final_departments[items_to_print]['item_count'], 'products)')
else:
print('+',final_departments[items_to_print]['department'].capitalize(), "(",final_departments[items_to_print]['item_count'], 'product)')
def combined(inventory):
alpha_sorted_products(inventory)
alpha_sorted_departments(inventory)
combined(products) |
from moonleap import u0
from moonleap.utils.inflect import plural
from moonleap.utils.magic_replace import magic_replace
from titan.api_pkg.pkg.ml_name import ml_type_spec_from_item_name
from titan.react_pkg.pkg.ts_var import ts_type, ts_type_import_path
from titan.react_state_pkg.itemview.props import get_item_view_route_params
from titan.react_view_pkg.router import RouterConfig
def create_router_configs(self):
return [RouterConfig(component=self, url="")]
effect_args_template = """
type ArgsT = {
yellowTulip
};
"""
def get_context(select_item_effect):
_ = lambda: None
_.route_params = get_item_view_route_params(select_item_effect.item_list.item_name)
_.item_name = select_item_effect.item_list.item_name
_.items_name = plural(_.item_name)
_.item_ts_type = ts_type(select_item_effect.item_list.item)
_.item_ts_type_import_path = ts_type_import_path(select_item_effect.item_list.item)
_.type_spec = ml_type_spec_from_item_name(_.item_name)
class Sections:
def effect_args(self):
args = ", ".join(
[f"{route_param}: string" for route_param in _.route_params]
)
return magic_replace(
effect_args_template,
[
("yellowTulip", args),
],
)
def declare_params(self):
return "{ " + ", ".join(_.route_params) + " }"
def extract_params(self):
return ", ".join(
[
f"{route_param}: params.{route_param}"
for route_param in _.route_params
]
)
def get_item_id(self):
search_function = "(x) => " + " && ".join(
[
f"x.{param} === {_.item_name + u0(param)}"
for param in _.type_spec.query_item_by or []
]
)
return (
""
if _.route_params == [f"{_.item_name}Id"]
else f"const {_.item_name}Id = "
+ f"R.find({search_function})(props.{_.items_name})?.id"
)
return dict(sections=Sections(), _=_)
|
try:
prediction = stock["predictions"]["basic"][index]
current_action = prediction["action"]["current"]
previouse_action = prediction["action"]["previouse"]
action_flags = prediction["action_flags"]
key = int("".join(str(x) for x in action_flags), 2)
send_mail = False
map_action = {
0: "none",
1: "none",
2: "unknown",
3: "sell",
4: "none",
5: "none",
6: "buy",
7: "none"
}
if "sell" in current_action:
if "sell" in map_action[key]:
action_flags[2] = 0
# Send mail changed to sell
send_mail = True
action_flags[0] = 1
elif "hold" in current_action:
action_flags[1] = 1
# FEATURE: Were from you arrived (buy or sell)
elif "buy" in current_action:
if "buy" in map_action[key]:
action_flags[0] = 0
# Send mail changed to buy
send_mail = True
action_flags[2] = 1
else:
pass
if send_mail is True:
self.Node.LogMSG("({classname})# [StockSimplePredictionChangeEvent] Send Mail ({0} {1} {2} {3})".format(ticker, stock["price"], previouse_action, current_action, classname=self.ClassName),5)
html = '''
<table>
<tr>
<td><span>Ticker</span></td>
<td><span>{0}</span></td>
</tr>
<tr>
<td><span>Price</span></td>
<td><span>{1}</span></td>
</tr>
<tr>
<td><span>Previouse Prediction</span></td>
<td><span>{2}</span></td>
</tr>
<tr>
<td><span>Current Prediction</span></td>
<td><span>{3}</span></td>
</tr>
</table>
'''.format(ticker, stock["price"], previouse_action, current_action)
self.Node.SendMail("yevgeniy.kiveisha@gmail.com", "Stock Monitor Prediction Change", html)
except Exception as e:
self.Node.LogMSG("({classname})# [EXCEPTION] (StockSimplePredictionChangeEvent) {0} {1}".format(ticker,str(e),classname=self.ClassName),5) |
import argparse
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import pynab.releases
from pynab.db import db_session, Pre, Release
from pynab import log
import config
def rename_pre_releases():
count = 0
with db_session() as db:
query = db.query(Release).filter(Release.pre_id!=None)
query = query.outerjoin(Pre, Pre.id==Release.pre_id).filter((Release.name!=Pre.name) | (Release.search_name!=Pre.searchname))
for release in query.all():
old_category_id = release.category_id
release.name = release.pre.name
release.search_name = release.pre.searchname
release.category_id = pynab.categories.determine_category(release.search_name, release.group.name)
db.merge(release);
count += 1
log.info('rename: [{}] -> [{}]'.format(release.search_name, release.pre.searchname))
db.commit()
log.info('rename: successfully renamed {} releases'.format(count))
if __name__ == '__main__':
print('''
Rename Releases with mismatched pre-IDs.
Compares release names with pre-ID names and updates upon mismatch.
''')
input('To continue, press enter. To exit, press ctrl-c.')
rename_pre_releases()
|
# Loss functions taken/inspired from Akensert's kaggle kernel
import numpy as np
import tensorflow as tf
from tensorflow import keras as K
def weighted_log_loss(y_true, y_pred):
"""
Can be used as the loss function in model.compile()
---------------------------------------------------
"""
# class_weights = np.array([1., 1., 1., 1., 1., 2.])
class_weights = np.array([1.])
eps = K.backend.epsilon()
y_pred = K.backend.clip(y_pred, eps, 1.0-eps)
out = -( y_true * K.backend.log( y_pred) * class_weights
+ (1.0 - y_true) * K.backend.log(1.0 - y_pred) * class_weights)
return K.backend.mean(out, axis=-1)
def _normalized_weighted_average(arr, weights=None):
"""
A simple Keras implementation that mimics that of
numpy.average(), specifically for the this competition
"""
if weights is not None:
scl = K.backend.sum(weights)
weights = K.backend.expand_dims(weights, axis=1)
return K.backend.sum(K.backend.dot(arr, weights), axis=1) / scl
return K.backend.mean(arr, axis=1)
def weighted_loss(y_true, y_pred):
"""
Will be used as the metric in model.compile()
---------------------------------------------
Similar to the custom loss function 'weighted_log_loss()' above
but with normalized weights, which should be very similar
to the official competition metric:
https://www.kaggle.com/kambarakun/lb-probe-weights-n-of-positives-scoring
and hence:
sklearn.metrics.log_loss with sample weights
"""
# class_weights = K.backend.variable([1., 1., 1., 1., 1. ,2.])
class_weights = K.backend.variable([1.])
eps = K.backend.epsilon()
y_pred = K.backend.clip(y_pred, eps, 1.0-eps)
loss = -( y_true * K.backend.log( y_pred)
+ (1.0 - y_true) * K.backend.log(1.0 - y_pred))
loss_samples = _normalized_weighted_average(loss, class_weights)
return K.backend.mean(loss_samples)
def weighted_log_loss_metric(trues, preds):
"""
Will be used to calculate the log loss
of the validation set in PredictionCheckpoint()
------------------------------------------
"""
# class_weights = [1., 1., 1., 1., 1., 2.]
class_weights = [1.]
epsilon = 1e-7
preds = np.clip(preds, epsilon, 1-epsilon)
loss = trues * np.log(preds) + (1 - trues) * np.log(1 - preds)
loss_samples = np.average(loss, axis=1, weights=class_weights)
return - loss_samples.mean() |
#!/usr/bin/env python
from setuptools import setup
setup(name='pystran3',
version='0.3',
description='Set of diagnostic tools for model structure analysis',
url='https://github.com/stijnvanhoey/pystran',
author='Stijn Van Hoey',
author_email='stijnvanhoey@gmail.com',
packages=['pystran3'],
license='BSD 3-clause New or Revised License',
install_requires=['matplotlib', 'numpy', 'scipy'],
keywords='modelling, sensitivity analysis, optimization, metric calculation',)
|
# src:
# https://www.geeksforgeeks.org/practice-questions-time-complexity-analysis/
"""
Name Big O
Constant O(c)
Linear O(n)
Quadratic O(n^2)
Cubic O(n^3)
Exponential O(2^n)
Logarithmic O(log(n))
Log Linear O(nlog(n))
"""
"""
1. What is the time, space complexity of following code:
int a = 0, b = 0;
for (i = 0; i < N; i++) {
a = a + rand();
}
for (j = 0; j < M; j++) {
b = b + rand();
}
O(N+M) time;
O(1) space;
"""
"""
2. What is the time complexity of following code:
int a = 0;
for (i = 0; i < N; i++) {
for (j = N; j > i; j--) {
a = a + i + j;
}
}
O(N*N) time
"""
# N = 10
# for i in range(10):
# j = N
# while j > i:
# print(i, j)
# j -= 1
"""
3. What is the time complexity of following code:
int i, j, k = 0;
for (i = n / 2; i <= n; i++) {
for (j = 2; j <= n; j = j * 2) {
k = k + n / 2;
}
}
O(n * logn)
j keeps doubling till it is less than or equal to n.
Number of times, we can double a number till it is less than n would be log(n).
"""
"""
5. What is the time complexity of following code:
int a = 0, i = N;
while (i > 0) {
a += i;
i /= 2;
}
O(logn)
"""
|
"""
General Resources for all API resources modules
"""
from flask_restful import Resource
class BaseResource(Resource):
def __init__(self, **kwargs):
self.repo = kwargs['data'] # repo: DataInterface
self.selector = kwargs['selector'] # selector: SelectInterface
|
_base_ = '../point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py'
# learning policy
runner = dict(type='EpochBasedRunner', max_epochs=200)
model = dict(
type='PointRend',
roi_head=dict(
type='PointRendRoIHead',
mask_head=dict(
_delete_=True,
type='CoarseMaskHead',
num_classes=1,
),
point_head=dict(
type='MaskPointHead',
num_classes=1,
coarse_pred_each_layer=True
),
bbox_head=dict(
type='Shared2FCBBoxHead',
num_classes=1,
reg_class_agnostic=False
)))
# use caffe img_norm
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='Normalize',
mean=[103.53, 116.28, 123.675],
std=[1.0, 1.0, 1.0],
to_rgb=False),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
classes = ('nucleus',)
data = dict(
samples_per_gpu=1,
workers_per_gpu=1,
train=dict(
img_prefix='/work/zchin31415/nucleus_data/all_train',
classes=classes,
ann_file='/work/zchin31415/nucleus_data/annotations/instance_all_train.json',
pipeline=train_pipeline),
val=dict(
img_prefix='/work/zchin31415/nucleus_data/all_train',
classes=classes,
ann_file='/work/zchin31415/nucleus_data/annotations/instance_all_train.json',
pipeline=test_pipeline),
test=dict(
img_prefix='/work/zchin31415/nucleus_data/test',
classes=classes,
ann_file='/work/zchin31415/nucleus_data/annotations/instance_test.json'),
pipeline=test_pipeline)
load_from = '/home/zchin31415/mmdet-nucleus-instance-segmentation/mmdetection/checkpoints/point_rend_r50_caffe_fpn_mstrain_3x_coco-e0ebb6b7.pth'
|
# coding=utf-8
#
# Copyright © 2015 VMware, Inc. All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
__author__ = 'yfauser'
from tests.config import *
from nsxramlclient.client import NsxClient
import time
client_session = NsxClient(nsxraml_file, nsxmanager, nsx_username, nsx_password, debug=True)
def test_segment_pools():
### Test Segment ID Pool Operations
# Get all configured Segment Pools
get_segment_resp = client_session.read('vdnSegmentPools')
client_session.view_response(get_segment_resp)
# Add a Segment Pool
segments_create_body = client_session.extract_resource_body_example('vdnSegmentPools', 'create')
client_session.view_body_dict(segments_create_body)
segments_create_body['segmentRange']['begin'] = '11002'
segments_create_body['segmentRange']['end'] = '11003'
segments_create_body['segmentRange']['name'] = 'legacy'
create_response = client_session.create('vdnSegmentPools', request_body_dict=segments_create_body)
client_session.view_response(create_response)
time.sleep(5)
# Update the new Segment Pool:
update_segment_body = client_session.extract_resource_body_example('vdnSegmentPool', 'update')
update_segment_body['segmentRange']['name'] = 'PythonTest'
update_segment_body['segmentRange']['end'] = '11005'
client_session.update('vdnSegmentPool', uri_parameters={'segmentPoolId': create_response['objectId']},
request_body_dict=update_segment_body)
time.sleep(5)
# Display a specific Segment pool (the new one)
specific_segement_resp = client_session.read('vdnSegmentPool', uri_parameters={'segmentPoolId':
create_response['objectId']})
client_session.view_response(specific_segement_resp)
time.sleep(5)
# Delete new Segment Pool
client_session.delete('vdnSegmentPool', uri_parameters={'segmentPoolId': create_response['objectId']})
def test_mcast_pools():
### Test Multicast Pool Operations
# Add a multicast Pool
mcastpool_create_body = client_session.extract_resource_body_example('vdnMulticastPools', 'create')
client_session.view_body_dict(mcastpool_create_body)
mcastpool_create_body['multicastRange']['desc'] = 'Test'
mcastpool_create_body['multicastRange']['begin'] = '235.0.0.0'
mcastpool_create_body['multicastRange']['end'] = '235.1.1.1'
mcastpool_create_body['multicastRange']['name'] = 'legacy'
create_response = client_session.create('vdnMulticastPools', request_body_dict=mcastpool_create_body)
client_session.view_response(create_response)
# Get all configured Multicast Pools
get_mcast_pools = client_session.read('vdnMulticastPools')
client_session.view_response(get_mcast_pools)
time.sleep(5)
# Update the newly created mcast pool
mcastpool_update_body = client_session.extract_resource_body_example('vdnMulticastPool', 'update')
mcastpool_update_body['multicastRange']['end'] = '235.3.1.1'
mcastpool_update_body['multicastRange']['name'] = 'Python'
update_response = client_session.update('vdnMulticastPool', uri_parameters={'multicastAddresssRangeId':
create_response['objectId']},
request_body_dict=mcastpool_update_body)
client_session.view_response(update_response)
# display a specific Multicast Pool
get_mcast_pool = client_session.read('vdnMulticastPool', uri_parameters={'multicastAddresssRangeId':
create_response['objectId']})
client_session.view_response(get_mcast_pool)
# Delete new mcast pool
client_session.delete('vdnMulticastPool', uri_parameters={'multicastAddresssRangeId': create_response['objectId']})
#test_segment_pools()
#test_mcast_pools()
|
##################### README ###################################################
# This file executes the classification algorithm over input testing images.
# Winner neurons inhibit other neurons by a phenomenon called Lateral inhibition
# Spike for each output neuron at each time stamp is monitored.
################################################################################
import numpy as np
from neuron import neuron
import random
from recep_field import rf
import imageio
from spike_train import *
from weight_initialization import learned_weights
#Parameters
global time, T, dt, t_back, t_fore, w_min
T = 200
time = np.arange(1, T+1, 1)
t_back = -20
t_fore = 20
Pth = 150 #Should be Pth = 6 for deterministic spike train
m = 784 #Number of neurons in first layer
n = 8 #Number of neurons in second layer
epoch = 1
num_of_images = 6
w_max = 0.5
w_min = -0.5
layer2 = []
# creating the hidden layer of neurons
for i in range(n):
a = neuron()
layer2.append(a)
#synapse matrix
synapse = np.zeros((n,m))
#learned weights
weight_matrix = learned_weights()
for i in range (num_of_images):
synapse[i] = weight_matrix[i]
#random initialization for rest of the synapses
for i in range(num_of_images,n):
for j in range(m):
synapse[i][j] = random.uniform(w_min,w_max)
for k in range(epoch):
for i in range(1,7):
spike_count = np.zeros((n,1))
#read the image to be classified
img = imageio.imread("training_images/" + str(i) + ".png")
#initialize the potentials of output neurons
for x in layer2:
x.initial()
#calculate teh membrane potentials of input neurons
pot = rf(img)
#generate spike trains. Select between deterministic and stochastic
# train = np.array(encode_deterministic(pot))
train = np.array(encode_stochastic(img))
#flag for lateral inhibition
f_spike = 0
active_pot = np.zeros((n,1))
for t in time:
for j, x in enumerate(layer2):
active = []
#update potential if not in refractory period
if(x.t_rest<t):
x.P = x.P + np.dot(synapse[j], train[:,t])
if(x.P>x.Prest):
x.P -= x.D
active_pot[j] = x.P
# Lateral Inhibition
if(f_spike==0):
high_pot = max(active_pot)
if(high_pot>Pth):
f_spike = 1
winner = np.argmax(active_pot)
for s in range(n):
if(s!=winner):
layer2[s].P = layer2[s].Pmin
#Check for spikes
for j,x in enumerate(layer2):
s = x.check()
if(s==1):
spike_count[j] += 1
x.t_rest = t + x.t_ref
print spike_count
|
import time
def convert_to_underscore(record, timestamp_key):
""" Convert - to _ """
underscore_record = {}
for key, value in record.items():
underscore_record[key.replace("-", "_")] = value
return underscore_record
def convert_epoch_sec_to_iso8601(record, timestamp_key):
""" Convert epoch sec time to iso8601 format """
record[timestamp_key] = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(int(record[timestamp_key])))
return record
def convert_epoch_ms_to_iso8601(record, timestamp_key):
""" Convert epoch time ms to iso8601 format """
s, ms = divmod(int(record[timestamp_key]), 1000)
record[timestamp_key] = "%s.%03d" % (time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(s)), ms)
return record
def extract_date_from_iso8601(record, timestamp_key):
""" Extract date from iso8601 timestamp """
return record[timestamp_key].split("T")[0]
|
import pygeoip,os
def ipLocation(ip):
path,f = os.path.split(__file__)
fname = os.path.join(path, "GeoIP.dat")
gi = pygeoip.GeoIP(fname)
return gi.country_code_by_addr(ip)
|
__author__ = "ecaz.eth"
__author_email__ = "me@ecaz.xyz"
__description__ = "NFT metadata retrieval tool."
__license__ = "Apache 2.0"
__title__ = "right_click_save"
__url__ = "https://github.com/ecaz-eth/right-click-save"
__version__ = "0.1.0"
|
from thinkindicator.indicator import ThinkIndicator
ThinkIndicator().run()
|
import asyncio
import json
import beeprint
from loguru import logger
import websockets
from AsyncWebsocketStreamInterface import AsyncWebsocketStreamInterface
from HuobiAsyncWebsocket.UrlParamsBuilder import create_signature, UrlParamsBuilder
class HuobiAsyncWs(AsyncWebsocketStreamInterface):
ws_baseurl = 'wss://api-aws.huobi.pro/ws/v2'
def __init__(self, apikey, secret):
super(HuobiAsyncWs, self).__init__()
self._apikey = apikey
self._secret = secret
self._subs = set()
async def _parse_raw_data(self, raw_data):
return json.loads(raw_data)
async def _create_ws(self):
'''
Create a websockets connection.
:return:websockets ws instance just created.
'''
# 新建ws连接
ws = await websockets.connect(self.ws_baseurl)
# 鉴权
asyncio.create_task(self._authenticate(ws))
authentication_stream = self.stream_filter([{
'action': 'req',
'code': 200,
'ch': 'auth'
}])
try:
async for msg in ws:
msg = json.loads(msg)
print(f'msg={msg}')
if isinstance(msg, dict) and msg.get('action') == 'req' and \
msg.get('code') == 200 and msg.get('ch') == 'auth':
break
finally:
asyncio.create_task(authentication_stream.close())
return ws
async def _authenticate(self, ws):
'''不鉴权无心跳'''
builder = UrlParamsBuilder()
create_signature(api_key=self._apikey,
secret_key=self._secret,
method='GET',
url=type(self).ws_baseurl,
builder=builder)
auth_request = {
"action": "req",
"ch": "auth",
"params": {
"authType": "api",
"accessKey": self._apikey,
"signatureMethod": "HmacSHA256",
"signatureVersion": "2.1",
"timestamp": builder.param_map['timestamp'],
"signature": builder.param_map['signature']
}
}
await ws.send(json.dumps(auth_request))
async def _when2create_new_ws(self):
'''
One time check to notice that it is time to exchange the ws.
:return:
'''
# ws可用时期刚开始
# 订阅所有订阅记录
[await task for task in [asyncio.create_task(self.send(sub)) for sub in self._subs]]
# 心跳检测
# {
# 'action': 'ping',
# 'data': {
# 'ts': 1597729470150,
# },
# }
ping_aiter = self.stream_filter([{'action': 'ping'}])
while True:
try:
# 等心跳只能等30s,否则超时
ping = await asyncio.wait_for(ping_aiter.__anext__(), 30)
except asyncio.TimeoutError: # 等心跳超时
logger.debug('Ping timeout.')
asyncio.create_task(ping_aiter.close())
break
else:
pong = json.dumps({
"action": "pong",
"data": {
"ts": ping['data']['ts']
}
})
await self.send(pong)
logger.debug('\n' + beeprint.pp({
"action": "pong",
"data": {
"ts": ping['data']['ts']
}
}, output=False, string_break_enable=False, sort_keys=False))
async def add_subscription(self, new_sub: dict):
b_new_sub = json.dumps(new_sub)
self._subs.add(b_new_sub)
# 订阅订单信息
while not self.present_ws:
await asyncio.sleep(0)
asyncio.create_task(self.send(b_new_sub))
def all_order_stream(self):
'''
Filter the ws order data stream and push the filtered data to the async generator which is returned by the method.
Remember to explicitly call the close method of the async generator to close the stream.
stream=huobiws.order_stream()
#handle message in one coroutine:
async for news in stream:
...
#close the stream in another:
close_task=asyncio.create_task(stream.close())
...
await close_task
:return:
'''
all_orders_sub = {
"action": "sub",
"ch": "orders#*"
}
asyncio.create_task(self.add_subscription(all_orders_sub))
return self.stream_filter([{'action': 'push',
'ch': 'orders#*'}])
|
import numpy as np
import re
import json
import os
def read_obj(model_path, flags = ('v')):
fid = open(model_path, 'r', encoding="utf-8")
data = {}
for head in flags:
data[head] = []
for line in fid:
line = line.strip()
if not line:
continue
line = re.split('\s+', line)
if line[0] in flags:
data[line[0]].append(line[1:])
fid.close()
if 'v' in data.keys():
data['v'] = np.array(data['v']).astype(np.float)
if 'vt' in data.keys():
data['vt'] = np.array(data['vt']).astype(np.float)
if 'vn' in data.keys():
data['vn'] = np.array(data['vn']).astype(np.float)
return data
def read_txt(txt_file_list):
'''
read txt files and output a matrix.
:param exr_file_list:
:return:
'''
if isinstance(txt_file_list, str):
txt_file_list = [txt_file_list]
output_list = []
for txt_file in txt_file_list:
output_list.append(np.loadtxt(txt_file))
return np.array(output_list)
def write_obj(objfile, data):
'''
Write data into obj_file.
:param objfile (str): file path.
:param data (dict): obj contents to be writen.
:return:
'''
with open(objfile, 'w+') as file:
for key, item in data.items():
for point in item:
file.write(key + ' %s' * len(point) % tuple(point) + '\n')
def read_json(file):
'''
read json file
:param file: file path.
:return:
'''
with open(file, 'r') as f:
output = json.load(f)
return output
def write_json(file, data):
'''
read json file
:param file: file path.
:param data: dict content
:return:
'''
assert os.path.exists(os.path.dirname(file))
with open(file, 'w') as f:
json.dump(data, f) |
"""Factory Boy factory classes for ``svdbs``."""
import binning
import factory
from ..models import (
DgvGoldStandardSvs,
DgvSvs,
ExacCnv,
ThousandGenomesSv,
DbVarSv,
GnomAdSv,
EXAC_POP_CHOICES,
)
class DgvGoldStandardSvsFactory(factory.django.DjangoModelFactory):
class Meta:
model = DgvGoldStandardSvs
release = "GRCh37"
chromosome = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
start_outer = factory.Sequence(lambda n: (n + 1) * 100 - 10)
start_inner = factory.Sequence(lambda n: (n + 1) * 100 + 10)
end_inner = factory.Sequence(lambda n: (n + 1) * 100 + 90)
end_outer = factory.Sequence(lambda n: (n + 1) * 100 + 110)
bin = factory.Sequence(lambda n: binning.assign_bin((n + 1) * 100 - 11, (n + 1) * 100 + 110))
accession = factory.Sequence(lambda n: "DGV-GS-%d" % n)
sv_type = "DEL"
sv_sub_type = "DEL"
num_studies = 1
studies = factory.Sequence(lambda n: ["DGV-GS-STUDY-%d" % n])
num_platforms = 1
platforms = factory.Sequence(lambda n: ["DGV-GS-PLATFORM-%d" % n])
num_algorithms = 1
algorithms = factory.Sequence(lambda n: ["DGV-GS-ALGO-%d" % n])
num_variants = 1
num_carriers = 1
num_unique_samples = 1
num_carriers_african = 0
num_carriers_asian = 0
num_carriers_european = 0
num_carriers_mexican = 0
num_carriers_middle_east = 1
num_carriers_native_american = 0
num_carriers_north_american = 0
num_carriers_oceania = 0
num_carriers_south_american = 0
num_carriers_admixed = 0
num_carriers_unknown = 0
class DgvSvsFactory(factory.django.DjangoModelFactory):
class Meta:
model = DgvSvs
release = "GRCh37"
chromosome = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
start = factory.Sequence(lambda n: (n + 1) * 100)
end = factory.Sequence(lambda n: (n + 1) * 100 + 100)
bin = factory.Sequence(lambda n: binning.assign_bin((n + 1) * 100, (n + 1) * 100 + 100))
accession = factory.Sequence(lambda n: "DGV-%d" % n)
sv_type = "DEL"
sv_sub_type = "DEL"
study = factory.Sequence(lambda n: "DGV-STUDY-%d" % n)
platform = factory.Sequence(lambda n: ["DGV-PLATFORM-%d" % n])
num_samples = 1
observed_gains = 0
observed_losses = 1
class ExacCnvFactory(factory.django.DjangoModelFactory):
class Meta:
model = ExacCnv
release = "GRCh37"
chromosome = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
start = factory.Sequence(lambda n: (n + 1) * 100)
end = factory.Sequence(lambda n: (n + 1) * 100 + 100)
bin = factory.Sequence(lambda n: binning.assign_bin((n + 1) * 100, (n + 1) * 100 + 100))
sv_type = "DEL"
population = factory.Iterator([x[0] for x in EXAC_POP_CHOICES])
phred_score = factory.Iterator(list(range(30)))
class ThousandGenomesSvFactory(factory.django.DjangoModelFactory):
class Meta:
model = ThousandGenomesSv
release = "GRCh37"
chromosome = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
start = factory.Sequence(lambda n: (n + 1) * 100)
end = factory.Sequence(lambda n: (n + 1) * 100 + 100)
bin = factory.Sequence(lambda n: binning.assign_bin((n + 1) * 100, (n + 1) * 100 + 100))
start_ci_left = -100
start_ci_right = 100
end_ci_left = -100
end_ci_right = 100
sv_type = "DEL"
source_call_set = "DEL_delly"
mobile_element_info = []
num_samples = 1
num_alleles = 2
num_var_alleles = 1
num_alleles_afr = 2
num_var_alleles_afr = 1
num_alleles_amr = 0
num_var_alleles_amr = 0
num_alleles_eas = 0
num_var_alleles_eas = 0
num_alleles_eur = 0
num_var_alleles_eur = 0
num_alleles_sas = 0
num_var_alleles_sas = 0
class DbVarSvFactory(factory.django.DjangoModelFactory):
class Meta:
model = DbVarSv
release = "GRCh37"
chromosome = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
start = factory.Sequence(lambda n: (n + 1) * 100)
end = factory.Sequence(lambda n: (n + 1) * 100 + 100)
bin = factory.Sequence(lambda n: binning.assign_bin((n + 1) * 100, (n + 1) * 100 + 100))
num_carriers = 1
sv_type = "DEL"
method = "Sequencing"
analysis = "Read_depth"
platform = factory.Sequence(lambda n: "DBVAR-PLATFORM-%d" % n)
study = factory.Sequence(lambda n: "DBVAR-STUDY-%d" % n)
clinical_assertions = []
clinvar_accessions = []
bin_size = "large"
min_ins_length = None
max_ins_length = None
class GnomAdSvFactory(factory.django.DjangoModelFactory):
class Meta:
model = GnomAdSv
release = "GRCh37"
chromosome = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
start = factory.Sequence(lambda n: (n + 1) * 100)
end = factory.Sequence(lambda n: (n + 1) * 100 + 100)
bin = factory.Sequence(lambda n: binning.assign_bin((n + 1) * 100, (n + 1) * 100 + 100))
ref = "N"
alt = ["<DUP>"]
name = [factory.Sequence(lambda n: "DBVAR-SV-%d" % n)]
svtype = "DEL"
svlen = 100
filter = ["PASS"]
evidence = ["BAF", "RD"]
algorithms = ["depth"]
chr2 = factory.Iterator(list(map(str, range(1, 23))) + ["X", "Y"])
cpx_type = None
cpx_intervals = []
source = None
strands = None
unresolved_type = None
pcrplus_depleted = False
pesr_gt_overdispersion = False
protein_coding_lof = []
protein_coding_dup_lof = []
protein_coding_copy_gain = []
protein_coding_dup_partial = []
protein_coding_msv_exon_ovr = []
protein_coding_intronic = []
protein_coding_inv_span = []
protein_coding_utr = []
protein_coding_nearest_tss = []
protein_coding_intergenic = False
protein_coding_promoter = []
an = 2
ac = [1]
af = [0.5]
n_bi_genos = 1
n_homref = 0
n_het = 1
n_homalt = 0
freq_homref = 0.5
freq_het = 0.5
freq_homalt = 0.0
popmax_af = 0.5
afr_an = 1
afr_ac = [1]
afr_af = [0.5]
afr_n_bi_genos = 0
afr_n_homref = 0
afr_n_het = 0
afr_n_homalt = 0
afr_freq_homref = 0.0
afr_freq_het = 0.0
afr_freq_homalt = 0.0
amr_an = 0
amr_ac = [0]
amr_af = [0.0]
amr_n_bi_genos = 0
amr_n_homref = 0
amr_n_het = 0
amr_n_homalt = 0
amr_freq_homref = 0.0
amr_freq_het = 0.0
amr_freq_homalt = 0.0
eas_an = 0
eas_ac = [0]
eas_af = [0.0]
eas_n_bi_genos = 0
eas_n_homref = 0
eas_n_het = 0
eas_n_homalt = 0
eas_freq_homref = 0.0
eas_freq_het = 0.0
eas_freq_homalt = 0.0
eur_an = 0
eur_ac = [0]
eur_af = [0.0]
eur_n_bi_genos = 0
eur_n_homref = 0
eur_n_het = 0
eur_n_homalt = 0
eur_freq_homref = 0.0
eur_freq_het = 0.0
eur_freq_homalt = 0.0
oth_an = 0
oth_ac = [0]
oth_af = [0.0]
oth_n_bi_genos = 0
oth_n_homref = 0
oth_n_het = 0
oth_n_homalt = 0
oth_freq_homref = 0.0
oth_freq_het = 0.0
oth_freq_homalt = 0.0
|
from typing import Dict
from typing import List
class MediastackApiError(Exception):
def __init__(self, code: str, message: str, context: Dict[str, List[str]] = {}) -> None:
self.code = code
self.message = message
self.context = context
def __str__(self) -> str:
return f"code: {self.code}; message: {self.message}, context: {self.context}"
|
import random
class Spell:
def __init__(self,name,cost,dmg,type):
self.name = name
self.cost = cost
self.dmg = dmg
self.type = type
def generate_spell_damage(self):
mgl = self.dmg - 15
mgh = self.dmg + 15
return random.randrange(mgl,mgh) |
#!/usr/bin/env python3
"""Get the Zappi history and import into InfluxDB.
This script will query the first zappi found to obtain a number of days history at the hourly level
It will then import a summary of this information into an influxdb database.
"""
import os
import argparse
import logging
import logging.handlers
from datetime import datetime, timedelta
import dotenv
import influxdb
import myenergi
def get_logger():
"""Log messages to the syslog."""
logger = logging.getLogger()
handler = logging.handlers.SysLogHandler(facility=logging.handlers.SysLogHandler.LOG_DAEMON,
address='/dev/log')
logger.setLevel(logging.INFO)
logger.addHandler(handler)
log_format = 'python[%(process)d]: [%(levelname)s] %(filename)s:%(funcName)s:%(lineno)d \"%(message)s\"'
handler.setFormatter(logging.Formatter(fmt=log_format))
return logger
def get_options():
"""Get the required options using argparse or from a dotenv file."""
env = dotenv.dotenv_values(os.path.expanduser("~/.env"))
parser = argparse.ArgumentParser(description='Gets history from the Zappi and imports to influxdb.')
if "myenergi_serial" not in env:
parser.add_argument('-s', '--serial', required=True, help='myenergi hub serial number')
if "myenergi_password" not in env:
parser.add_argument('-p', '--password', required=True, help='myenergi password')
parser.add_argument('-s', '--start', required=False, type=int, default=1, help='starting number of days ago')
parser.add_argument('-e', '--end', required=False, type=int, default=4, help='ending number of days ago')
args = parser.parse_args()
if "myenergi_serial" in env:
args.serial = env['myenergi_serial']
if "myenergi_password" in env:
args.password = env['myenergi_password']
return args
def main():
"""Get the Zappi history and import into InfluxDB."""
args = get_options()
# Set the logging level for the myenergi api client
logging.getLogger('myenergi.api').setLevel(logging.INFO)
# Setup the local logger
logger = get_logger()
my_influxdb = influxdb.InfluxDBClient(host='localhost', port=8086)
# influxdb.drop_database('myenergi')
# influxdb.create_database('myenergi')
my_influxdb.switch_database('myenergi')
typelist = ['imp', 'exp', 'gen', 'gep', 'h1d', 'h1b', 'hom']
total = {}
logger.info("Adding Myenergi information to influxdb")
with myenergi.API(args.serial, args.password) as mye:
zappiserial = mye.get_serials("ZAPPI")[0]
for daysago in range(args.start, args.end):
thetime = datetime.now()
querytime = thetime - timedelta(days=daysago,
hours=thetime.hour - 1,
minutes=thetime.minute,
seconds=thetime.second)
datestring = querytime.strftime("%Y-%m-%d")
datetimestring = querytime.strftime("%Y-%m-%dT%H:%M")
result = mye.get_zappi_history(zappiserial, "Hour", datestring)
for key in typelist:
total[key] = 0
for entry in result:
for key in typelist:
if key in entry.keys():
entry[key] = round(entry[key] / 3600 / 1000, 2)
total[key] = round(total[key] + entry[key], 2)
else:
entry[key] = 0
total['hom'] = round(total['gep'] + total['imp']
- total['exp'] - total['h1d'] - total['gen'] - total['h1b'], 2)
print(datestring, total)
# Create data for influxdb and write to database
influx_tags = {
'serial_number': zappiserial
}
influx_fields = {
'zappi_diverted': float(total['h1d']),
'zappi_imported': float(total['h1b']),
'home_used': float(total['hom']),
'solar_generated': float(total['gep']),
'solar_used': float(total['gen']),
'grid_imported': float(total['imp']),
'grid_exported': float(total['exp']),
'month': querytime.strftime("%b %Y"),
}
influx_data = [
{
'measurement': "zappi_daily_energy",
'time': datetimestring,
'tags': influx_tags,
'fields': influx_fields,
}
]
my_influxdb.write_points(influx_data)
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.