content
stringlengths 5
1.05M
|
|---|
# -*- coding: utf-8 -*-
def main():
n = int(input())
a = [int(input()) for _ in range(n)]
count = 0
years = 0
for ai in a:
years += ai
if years <= 2018:
count += 1
print(count)
if __name__ == '__main__':
main()
|
import time
import re
import socket
import sys
import traceback
import paho.mqtt.client as mqtt
from threading import Thread
class TemperatureLogger:
config = None
mqtt_client = None
mqtt_connected = False
worker = None
# removed as not one of my requirements
#temperatures = {}
def __init__(self, config):
self.config = config
self.wait_update = self.config.get('wait_update', float(60))
self.wait_process = self.config.get('wait_process', float(5))
def verbose(self, message):
if self.config and 'verbose' in self.config and self.config['verbose'] == 'true':
sys.stdout.write('VERBOSE: ' + message + '\n')
sys.stdout.flush()
def error(self, message):
sys.stderr.write('ERROR: ' + message + '\n')
sys.stderr.flush()
def mqtt_connect(self):
if self.mqtt_broker_reachable():
self.verbose('Connecting to ' + self.config['mqtt_host'] + ':' + self.config['mqtt_port'])
self.mqtt_client = mqtt.Client(self.config['mqtt_client_id'])
if 'mqtt_user' in self.config and 'mqtt_password' in self.config:
self.mqtt_client.username_pw_set(self.config['mqtt_user'], self.config['mqtt_password'])
self.mqtt_client.on_connect = self.mqtt_on_connect
self.mqtt_client.on_disconnect = self.mqtt_on_disconnect
try:
self.mqtt_client.connect(self.config['mqtt_host'], int(self.config['mqtt_port']), 60)
self.mqtt_client.loop_forever()
except:
self.error(traceback.format_exc())
self.mqtt_client = None
else:
self.error(self.config['mqtt_host'] + ':' + self.config['mqtt_port'] + ' not reachable!')
def mqtt_on_connect(self, mqtt_client, userdata, flags, rc):
self.mqtt_connected = True
self.verbose('...mqtt_connected!')
def mqtt_on_disconnect(self, mqtt_client, userdata, rc):
self.mqtt_connected = False
self.verbose('Diconnected! will reconnect! ...')
if rc is 0:
self.mqtt_connect()
else:
time.sleep(5)
while not self.mqtt_broker_reachable():
time.sleep(10)
self.mqtt_client.reconnect()
def mqtt_broker_reachable(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect((self.config['mqtt_host'], int(self.config['mqtt_port'])))
s.close()
return True
except socket.error:
return False
def update(self):
while True:
for source in self.config['sources']:
serial = source['serial']
topic = source['topic']
# added ijm
dev = source['device']
device = open('/sys/bus/w1/devices/' + serial + '/w1_slave')
raw = device.read()
device.close()
match = re.search(r't=([\d]+)', raw)
if match:
temperature_raw = match.group(1)
temperature = round(float(temperature_raw)/1000, 2)
if 'offset' in source:
temperature += float(source['offset'])
self.publish_temperature(topic, temperature, dev)
'''
# the block means only temerature changes are published, my requirement is to publish
# regardless many may want this will look at making it an option
if serial not in self.temperatures or self.temperatures[serial] != temperature:
self.temperatures[serial] = temperature
self.publish_temperature(topic, temperature, dev)'''
self.verbose('Entering wait_process delay of: ' + str(self.wait_process) + ' Seconds')
time.sleep(self.wait_process)
self.verbose('Entering wait_update delay of: ' + str(self.wait_update) + ' Seconds')
time.sleep(self.wait_update)
def publish_temperature(self, topic, temperature, dev):
if self.mqtt_connected:
dev = '{{ "{0}": {1} }}'.format(dev, str(temperature))
self.verbose('Publishing: ' + str(temperature))
self.mqtt_client.publish(topic, dev, 0, True)
def start(self):
self.worker = Thread(target=self.update)
self.worker.setDaemon(True)
self.worker.start()
self.mqtt_connect()
|
import unittest
from calibre.ebooks.metadata.sources.test import (test_identify_plugin,
title_test, authors_test,
series_test)
from source import Comicvine
class TestFileList(unittest.TestCase):
def test_list(self):
f = open("test/passing_titles.txt")
lines = f.readlines()
f.close()
lines = [line.replace("\n", "") for line in lines]
lines = [line.split('|') for line in lines]
tuples = [({'title': line[1]}, [comicvine_id_test(line[0])])
for line in lines]
test_identify_plugin(Comicvine.name, tuples)
class TestFailingFileList(unittest.TestCase):
def test_list(self):
f = open("test/failing_titles.txt")
lines = f.readlines()
f.close()
lines = [line.replace("\n", "") for line in lines]
lines = [line.split('|') for line in lines]
tuples = [({'title': line[1]}, [comicvine_id_test(line[0])])
for line in lines]
test_identify_plugin(Comicvine.name, tuples)
class TestPlugin(unittest.TestCase):
def test_comicvine_id_match(self):
test_identify_plugin(Comicvine.name, [(
{
'title': '',
'identifiers': {'comicvine': '105747'},
},
[
title_test(
'Preacher Special: The Story of You-Know-Who #1: '
'The Story of You-Know-Who',
exact=True
),
authors_test(
['Garth Ennis', 'Richard Case',
'Matt Hollingsworth',
'Clem Robins', 'Glenn Fabry',
'Julie Rottenberg']),
series_test(
'Preacher Special: The Story of You-Know-Who',
1.0),
comicvine_id_test('105747'),
comicvine_volume_id_test('18059'),
]
)])
def test_comicvine_volume_id_match(self):
test_identify_plugin(Comicvine.name, [(
{
'title': 'Preacher',
'identifiers': {
'comicvine-volume': '18059'},
},
[
title_test(
'Preacher Special: The Story of You-Know-Who #1: '
'The Story of You-Know-Who',
exact=True
),
authors_test(
['Garth Ennis', 'Richard Case',
'Matt Hollingsworth',
'Clem Robins', 'Glenn Fabry',
'Julie Rottenberg']),
series_test(
'Preacher Special: The Story of You-Know-Who',
1.0),
comicvine_id_test('105747'),
comicvine_volume_id_test('18059'),
]
)])
def comicvine_id_test(expected):
"""Build a test function to assert comicvine ID."""
def test(metadata):
"""Ensure that the metadata instance contains the expected data."""
if metadata.identifiers:
result = metadata.identifiers.get('comicvine')
else:
result = None
return result and (expected == result)
return test
def comicvine_volume_id_test(expected):
"""Build a test function to assert comicvine volume ID."""
def test(metadata):
"""Ensure that the metadata instance contains the expected data."""
if metadata.identifiers:
result = metadata.identifiers.get('comicvine-volume')
else:
result = None
return result and (expected == result)
return test
|
# -*- coding: utf-8 -*-
"""
Functions for performing statistical preprocessing and analyses
"""
import warnings
import numpy as np
from tqdm import tqdm
from itertools import combinations
from scipy import optimize, spatial, special, stats as sstats
from scipy.stats.stats import _chk2_asarray
from sklearn.utils.validation import check_random_state
from sklearn.linear_model import LinearRegression
from . import utils
def residualize(X, Y, Xc=None, Yc=None, normalize=True, add_intercept=True):
"""
Returns residuals of regression equation from `Y ~ X`
Parameters
----------
X : (N[, R]) array_like
Coefficient matrix of `R` variables for `N` subjects
Y : (N[, F]) array_like
Dependent variable matrix of `F` variables for `N` subjects
Xc : (M[, R]) array_like, optional
Coefficient matrix of `R` variables for `M` subjects. If not specified
then `X` is used to estimate betas. Default: None
Yc : (M[, F]) array_like, optional
Dependent variable matrix of `F` variables for `M` subjects. If not
specified then `Y` is used to estimate betas. Default: None
normalize : bool, optional
Whether to normalize (i.e., z-score) residuals. Will use residuals from
`Yc ~ Xc` for generating mean and variance. Default: True
add_intercept : bool, optional
Whether to add intercept to `X` (and `Xc`, if provided). The intercept
will not be removed, just used in beta estimation. Default: True
Returns
-------
Yr : (N, F) numpy.ndarray
Residuals of `Y ~ X`
Notes
-----
If both `Xc` and `Yc` are provided, these are used to calculate betas which
are then applied to `X` and `Y`.
"""
if ((Yc is None and Xc is not None) or (Yc is not None and Xc is None)):
raise ValueError('If processing against a comparative group, you must '
'provide both `Xc` and `Yc`.')
X, Y = np.asarray(X), np.asarray(Y)
if Yc is None:
Xc, Yc = X.copy(), Y.copy()
else:
Xc, Yc = np.asarray(Xc), np.asarray(Yc)
# add intercept to regressors if requested and calculate fit
if add_intercept:
X, Xc = utils.add_constant(X), utils.add_constant(Xc)
betas, *rest = np.linalg.lstsq(Xc, Yc, rcond=None)
# remove intercept from regressors and betas for calculation of residuals
if add_intercept:
betas = betas[:-1]
X, Xc = X[:, :-1], Xc[:, :-1]
# calculate residuals
Yr = Y - (X @ betas)
Ycr = Yc - (Xc @ betas)
if normalize:
Yr = sstats.zmap(Yr, compare=Ycr)
return Yr
def get_mad_outliers(data, thresh=3.5):
"""
Determines which samples in `data` are outliers
Uses the Median Absolute Deviation for determining whether datapoints are
outliers
Parameters
----------
data : (N, M) array_like
Data array where `N` is samples and `M` is features
thresh : float, optional
Modified z-score. Observations with a modified z-score (based on the
median absolute deviation) greater than this value will be classified
as outliers. Default: 3.5
Returns
-------
outliers : (N,) numpy.ndarray
Boolean array where True indicates an outlier
Notes
-----
Taken directly from https://stackoverflow.com/a/22357811
References
----------
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control: Statistical
Techniques, Edward F. Mykytka, Ph.D., Editor.
Examples
--------
>>> from netneurotools import stats
Create array with three samples of four features each:
>>> X = np.array([[0, 5, 10, 15], [1, 4, 11, 16], [100, 100, 100, 100]])
>>> X
array([[ 0, 5, 10, 15],
[ 1, 4, 11, 16],
[100, 100, 100, 100]])
Determine which sample(s) is outlier:
>>> outliers = stats.get_mad_outliers(X)
>>> outliers
array([False, False, True])
"""
data = np.asarray(data)
if data.ndim == 1:
data = np.vstack(data)
if data.ndim > 2:
data = data.reshape(len(data), -1)
median = np.nanmedian(data, axis=0)
diff = np.nansum((data - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def permtest_1samp(a, popmean, axis=0, n_perm=1000, seed=0):
"""
Non-parametric equivalent of :py:func:`scipy.stats.ttest_1samp`
Generates two-tailed p-value for hypothesis of whether `a` differs from
`popmean` using permutation tests
Parameters
----------
a : array_like
Sample observations
popmean : float or array_like
Expected valued in null hypothesis. If array_like then it must have the
same shape as `a` excluding the `axis` dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole array
of `a`. Default: 0
n_perm : int, optional
Number of permutations to assess. Unless `a` is very small along `axis`
this will approximate a randomization test via Monte Carlo simulations.
Default: 1000
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation. Set to None for "randomness".
Default: 0
Returns
-------
stat : float or numpy.ndarray
Difference from `popmean`
pvalue : float or numpy.ndarray
Non-parametric p-value
Notes
-----
Providing multiple values to `popmean` to run *independent* tests in
parallel is not currently supported.
The lowest p-value that can be returned by this function is equal to 1 /
(`n_perm` + 1).
Examples
--------
>>> from netneurotools import stats
>>> np.random.seed(7654567) # set random seed for reproducible results
>>> rvs = np.random.normal(loc=5, scale=10, size=(50, 2))
Test if mean of random sample is equal to true mean, and different mean. We
reject the null hypothesis in the second case and don't reject it in the
first case.
>>> stats.permtest_1samp(rvs, 5.0)
(array([-0.985602 , -0.05204969]), array([0.48551449, 0.95904096]))
>>> stats.permtest_1samp(rvs, 0.0)
(array([4.014398 , 4.94795031]), array([0.00699301, 0.000999 ]))
Example using axis and non-scalar dimension for population mean
>>> stats.permtest_1samp(rvs, [5.0, 0.0])
(array([-0.985602 , 4.94795031]), array([0.48551449, 0.000999 ]))
>>> stats.permtest_1samp(rvs.T, [5.0, 0.0], axis=1)
(array([-0.985602 , 4.94795031]), array([0.51548452, 0.000999 ]))
"""
a, popmean, axis = _chk2_asarray(a, popmean, axis)
rs = check_random_state(seed)
if a.size == 0:
return np.nan, np.nan
# ensure popmean will broadcast to `a` correctly
if popmean.ndim != a.ndim:
popmean = np.expand_dims(popmean, axis=axis)
# center `a` around `popmean` and calculate original mean
zeroed = a - popmean
true_mean = zeroed.mean(axis=axis) / 1
abs_mean = np.abs(true_mean)
# this for loop is not _the fastest_ but is memory efficient
# the broadcasting alt. would mean storing zeroed.size * n_perm in memory
permutations = np.ones(true_mean.shape)
for perm in range(n_perm):
flipped = zeroed * rs.choice([-1, 1], size=zeroed.shape) # sign flip
permutations += np.abs(flipped.mean(axis=axis)) >= abs_mean
pvals = permutations / (n_perm + 1) # + 1 in denom accounts for true_mean
return true_mean, pvals
def permtest_rel(a, b, axis=0, n_perm=1000, seed=0):
"""
Non-parametric equivalent of :py:func:`scipy.stats.ttest_rel`
Generates two-tailed p-value for hypothesis of whether related samples `a`
and `b` differ using permutation tests
Parameters
----------
a, b : array_like
Sample observations. These arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over whole arrays
of `a` and `b`. Default: 0
n_perm : int, optional
Number of permutations to assess. Unless `a` and `b` are very small
along `axis` this will approximate a randomization test via Monte
Carlo simulations. Default: 1000
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation. Set to None for "randomness".
Default: 0
Returns
-------
stat : float or numpy.ndarray
Average difference between `a` and `b`
pvalue : float or numpy.ndarray
Non-parametric p-value
Notes
-----
The lowest p-value that can be returned by this function is equal to 1 /
(`n_perm` + 1).
Examples
--------
>>> from netneurotools import stats
>>> np.random.seed(12345678) # set random seed for reproducible results
>>> rvs1 = np.random.normal(loc=5, scale=10, size=500)
>>> rvs2 = (np.random.normal(loc=5, scale=10, size=500)
... + np.random.normal(scale=0.2, size=500))
>>> stats.permtest_rel(rvs1, rvs2) # doctest: +SKIP
(-0.16506275161572695, 0.8021978021978022)
>>> rvs3 = (np.random.normal(loc=8, scale=10, size=500)
... + np.random.normal(scale=0.2, size=500))
>>> stats.permtest_rel(rvs1, rvs3) # doctest: +SKIP
(2.40533726097883, 0.000999000999000999)
"""
a, b, axis = _chk2_asarray(a, b, axis)
rs = check_random_state(seed)
if a.shape[axis] != b.shape[axis]:
raise ValueError('Provided arrays do not have same length along axis')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
# calculate original difference in means
ab = np.stack([a, b], axis=0)
if ab.ndim < 3:
ab = np.expand_dims(ab, axis=-1)
true_diff = np.squeeze(np.diff(ab, axis=0)).mean(axis=axis) / 1
abs_true = np.abs(true_diff)
# idx array
reidx = np.meshgrid(*[range(f) for f in ab.shape], indexing='ij')
permutations = np.ones(true_diff.shape)
for perm in range(n_perm):
# use this to re-index (i.e., swap along) the first axis of `ab`
swap = rs.random_sample(ab.shape[:-1]).argsort(axis=axis)
reidx[0] = np.repeat(swap[..., np.newaxis], ab.shape[-1], axis=-1)
# recompute difference between `a` and `b` (i.e., first axis of `ab`)
pdiff = np.squeeze(np.diff(ab[tuple(reidx)], axis=0)).mean(axis=axis)
permutations += np.abs(pdiff) >= abs_true
pvals = permutations / (n_perm + 1) # + 1 in denom accounts for true_diff
return true_diff, pvals
def permtest_pearsonr(a, b, axis=0, n_perm=1000, resamples=None, seed=0):
"""
Non-parametric equivalent of :py:func:`scipy.stats.pearsonr`
Generates two-tailed p-value for hypothesis of whether samples `a` and `b`
are correlated using permutation tests
Parameters
----------
a,b : (N[, M]) array_like
Sample observations. These arrays must have the same length and either
an equivalent number of columns or be broadcastable
axis : int or None, optional
Axis along which to compute test. If None, compute over whole arrays
of `a` and `b`. Default: 0
n_perm : int, optional
Number of permutations to assess. Unless `a` and `b` are very small
along `axis` this will approximate a randomization test via Monte
Carlo simulations. Default: 1000
resamples : (N, P) array_like, optional
Resampling array used to shuffle `a` when generating null distribution
of correlations. This array must have the same length as `a` and `b`
and should have at least the same number of columns as `n_perm` (if it
has more then only `n_perm` columns will be used. When not specified a
standard permutation is used to shuffle `a`. Default: None
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation. Set to None for "randomness".
Default: 0
Returns
-------
corr : float or numpyndarray
Correlations
pvalue : float or numpy.ndarray
Non-parametric p-value
Notes
-----
The lowest p-value that can be returned by this function is equal to 1 /
(`n_perm` + 1).
Examples
--------
>>> from netneurotools import datasets, stats
>>> np.random.seed(12345678) # set random seed for reproducible results
>>> x, y = datasets.make_correlated_xy(corr=0.1, size=100)
>>> stats.permtest_pearsonr(x, y) # doctest: +SKIP
(0.10032564626876286, 0.3046953046953047)
>>> x, y = datasets.make_correlated_xy(corr=0.5, size=100)
>>> stats.permtest_pearsonr(x, y) # doctest: +SKIP
(0.500040365781984, 0.000999000999000999)
Also works with multiple columns by either broadcasting the smaller array
to the larger:
>>> z = x + np.random.normal(loc=1, size=100)
>>> stats.permtest_pearsonr(x, np.column_stack([y, z]))
(array([0.50004037, 0.25843187]), array([0.000999 , 0.01098901]))
or by using matching columns in the two arrays (e.g., `x` and `y` vs
`a` and `b`):
>>> a, b = datasets.make_correlated_xy(corr=0.9, size=100)
>>> stats.permtest_pearsonr(np.column_stack([x, a]), np.column_stack([y, b]))
(array([0.50004037, 0.89927523]), array([0.000999, 0.000999]))
""" # noqa
a, b, axis = _chk2_asarray(a, b, axis)
rs = check_random_state(seed)
if len(a) != len(b):
raise ValueError('Provided arrays do not have same length')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
if resamples is not None:
if n_perm > resamples.shape[-1]:
raise ValueError('Number of permutations requested exceeds size '
'of resampling array.')
# divide by one forces coercion to float if ndim = 0
true_corr = efficient_pearsonr(a, b)[0] / 1
abs_true = np.abs(true_corr)
permutations = np.ones(true_corr.shape)
for perm in range(n_perm):
# permute `a` and determine whether correlations exceed original
if resamples is None:
ap = a[rs.permutation(len(a))]
else:
ap = a[resamples[:, perm]]
permutations += np.abs(efficient_pearsonr(ap, b)[0]) >= abs_true
pvals = permutations / (n_perm + 1) # + 1 in denom accounts for true_corr
return true_corr, pvals
def efficient_pearsonr(a, b, ddof=1, nan_policy='propagate'):
"""
Computes correlation of matching columns in `a` and `b`
Parameters
----------
a,b : array_like
Sample observations. These arrays must have the same length and either
an equivalent number of columns or be broadcastable
ddof : int, optional
Degrees of freedom correction in the calculation of the standard
deviation. Default: 1
nan_policy : bool, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default: 'propagate'
Returns
-------
corr : float or numpy.ndarray
Pearson's correlation coefficient between matching columns of inputs
pval : float or numpy.ndarray
Two-tailed p-values
Notes
-----
If either input contains nan and nan_policy is set to 'omit', both arrays
will be masked to omit the nan entries.
Examples
--------
>>> from netneurotools import datasets, stats
Generate some not-very-correlated and some highly-correlated data:
>>> np.random.seed(12345678) # set random seed for reproducible results
>>> x1, y1 = datasets.make_correlated_xy(corr=0.1, size=100)
>>> x2, y2 = datasets.make_correlated_xy(corr=0.8, size=100)
Calculate both correlations simultaneously:
>>> stats.efficient_pearsonr(np.c_[x1, x2], np.c_[y1, y2])
(array([0.10032565, 0.79961189]), array([3.20636135e-01, 1.97429944e-23]))
"""
a, b, axis = _chk2_asarray(a, b, 0)
if len(a) != len(b):
raise ValueError('Provided arrays do not have same length')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError(f'Value for nan_policy "{nan_policy}" not allowed')
a, b = a.reshape(len(a), -1), b.reshape(len(b), -1)
if (a.shape[1] != b.shape[1]):
a, b = np.broadcast_arrays(a, b)
mask = np.logical_or(np.isnan(a), np.isnan(b))
if nan_policy == 'raise' and np.any(mask):
raise ValueError('Input cannot contain NaN when nan_policy is "omit"')
elif nan_policy == 'omit':
# avoid making copies of the data, if possible
a = np.ma.masked_array(a, mask, copy=False, fill_value=np.nan)
b = np.ma.masked_array(b, mask, copy=False, fill_value=np.nan)
with np.errstate(invalid='ignore'):
corr = (sstats.zscore(a, ddof=ddof, nan_policy=nan_policy)
* sstats.zscore(b, ddof=ddof, nan_policy=nan_policy))
sumfunc, n_obs = np.sum, len(a)
if nan_policy == 'omit':
corr = corr.filled(np.nan)
sumfunc = np.nansum
n_obs = np.squeeze(np.sum(np.logical_not(np.isnan(corr)), axis=0))
corr = sumfunc(corr, axis=0) / (n_obs - 1)
corr = np.squeeze(np.clip(corr, -1, 1)) / 1
# taken from scipy.stats
ab = (n_obs / 2) - 1
prob = 2 * special.btdtr(ab, ab, 0.5 * (1 - np.abs(corr)))
return corr, prob
def _gen_rotation(seed=None):
"""
Generates random matrix for rotating spherical coordinates
Parameters
----------
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation
Returns
-------
rotate_{l,r} : (3, 3) numpy.ndarray
Rotations for left and right hemisphere coordinates, respectively
"""
rs = check_random_state(seed)
# for reflecting across Y-Z plane
reflect = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])
# generate rotation for left
rotate_l, temp = np.linalg.qr(rs.normal(size=(3, 3)))
rotate_l = rotate_l @ np.diag(np.sign(np.diag(temp)))
if np.linalg.det(rotate_l) < 0:
rotate_l[:, 0] = -rotate_l[:, 0]
# reflect the left rotation across Y-Z plane
rotate_r = reflect @ rotate_l @ reflect
return rotate_l, rotate_r
def gen_spinsamples(coords, hemiid, n_rotate=1000, check_duplicates=True,
method='original', exact=False, seed=None, verbose=False,
return_cost=False):
"""
Returns a resampling array for `coords` obtained from rotations / spins
Using the method initially proposed in [ST1]_ (and later modified + updated
based on findings in [ST2]_ and [ST3]_), this function applies random
rotations to the user-supplied `coords` in order to generate a resampling
array that preserves its spatial embedding. Rotations are generated for one
hemisphere and mirrored for the other (see `hemiid` for more information).
Due to irregular sampling of `coords` and the randomness of the rotations
it is possible that some "rotations" may resample with replacement (i.e.,
will not be a true permutation). The likelihood of this can be reduced by
either increasing the sampling density of `coords` or changing the
``method`` parameter (see Notes for more information on the latter).
Parameters
----------
coords : (N, 3) array_like
X, Y, Z coordinates of `N` nodes/parcels/regions/vertices defined on a
sphere
hemiid : (N,) array_like
Array denoting hemisphere designation of coordinates in `coords`, where
values should be {0, 1} denoting the different hemispheres. Rotations
are generated for one hemisphere and mirrored across the y-axis for the
other hemisphere.
n_rotate : int, optional
Number of rotations to generate. Default: 1000
check_duplicates : bool, optional
Whether to check for and attempt to avoid duplicate resamplings. A
warnings will be raised if duplicates cannot be avoided. Setting to
True may increase the runtime of this function! Default: True
method : {'original', 'vasa', 'hungarian'}, optional
Method by which to match non- and rotated coordinates. Specifying
'original' will use the method described in [ST1]_. Specfying 'vasa'
will use the method described in [ST4]_. Specfying 'hungarian' will use
the Hungarian algorithm to minimize the global cost of reassignment
(will dramatically increase runtime). Default: 'original'
seed : {int, np.random.RandomState instance, None}, optional
Seed for random number generation. Default: None
verbose : bool, optional
Whether to print occasional status messages. Default: False
return_cost : bool, optional
Whether to return cost array (specified as Euclidean distance) for each
coordinate for each rotation Default: True
Returns
-------
spinsamples : (N, `n_rotate`) numpy.ndarray
Resampling matrix to use in permuting data based on supplied `coords`.
cost : (N, `n_rotate`,) numpy.ndarray
Cost (specified as Euclidean distance) of re-assigning each coordinate
for every rotation in `spinsamples`. Only provided if `return_cost` is
True.
Notes
-----
By default, this function uses the minimum Euclidean distance between the
original coordinates and the new, rotated coordinates to generate a
resampling array after each spin. Unfortunately, this can (with some
frequency) lead to multiple coordinates being re-assigned the same value:
>>> from netneurotools import stats as nnstats
>>> coords = [[0, 0, 1], [1, 0, 0], [0, 0, 1], [1, 0, 0]]
>>> hemi = [0, 0, 1, 1]
>>> nnstats.gen_spinsamples(coords, hemi, n_rotate=1, seed=1,
... method='original', check_duplicates=False)
array([[0],
[0],
[2],
[3]])
While this is reasonable in most circumstances, if you feel incredibly
strongly about having a perfect "permutation" (i.e., all indices appear
once and exactly once in the resampling), you can set the ``method``
parameter to either 'vasa' or 'hungarian':
>>> nnstats.gen_spinsamples(coords, hemi, n_rotate=1, seed=1,
... method='vasa', check_duplicates=False)
array([[1],
[0],
[2],
[3]])
>>> nnstats.gen_spinsamples(coords, hemi, n_rotate=1, seed=1,
... method='hungarian', check_duplicates=False)
array([[0],
[1],
[2],
[3]])
Note that setting this parameter may increase the runtime of the function
(especially for `method='hungarian'`). Refer to [ST1]_ for information on
why the default (i.e., ``exact`` set to False) suffices in most cases.
For the original MATLAB implementation of this function refer to [ST5]_.
References
----------
.. [ST1] Alexander-Bloch, A., Shou, H., Liu, S., Satterthwaite, T. D.,
Glahn, D. C., Shinohara, R. T., Vandekar, S. N., & Raznahan, A. (2018).
On testing for spatial correspondence between maps of human brain
structure and function. NeuroImage, 178, 540-51.
.. [ST2] Blaser, R., & Fryzlewicz, P. (2016). Random Rotation Ensembles.
Journal of Machine Learning Research, 17(4), 1–26.
.. [ST3] Lefèvre, J., Pepe, A., Muscato, J., De Guio, F., Girard, N.,
Auzias, G., & Germanaud, D. (2018). SPANOL (SPectral ANalysis of Lobes):
A Spectral Clustering Framework for Individual and Group Parcellation of
Cortical Surfaces in Lobes. Frontiers in Neuroscience, 12, 354.
.. [ST4] Váša, F., Seidlitz, J., Romero-Garcia, R., Whitaker, K. J.,
Rosenthal, G., Vértes, P. E., ... & Jones, P. B. (2018). Adolescent
tuning of association cortex in human structural brain networks.
Cerebral Cortex, 28(1), 281-294.
.. [ST5] https://github.com/spin-test/spin-test
"""
methods = ['original', 'vasa', 'hungarian']
if method not in methods:
raise ValueError('Provided method "{}" invalid. Must be one of {}.'
.format(method, methods))
if exact:
warnings.warn('The `exact` parameter will no longer be supported in '
'an upcoming release. Please use the `method` parameter '
'instead.', DeprecationWarning, stacklevel=3)
if exact == 'vasa' and method == 'original':
method = 'vasa'
elif exact and method == 'original':
method = 'hungarian'
seed = check_random_state(seed)
coords = np.asanyarray(coords)
hemiid = np.squeeze(np.asanyarray(hemiid, dtype='int8'))
# check supplied coordinate shape
if coords.shape[-1] != 3 or coords.squeeze().ndim != 2:
raise ValueError('Provided `coords` must be of shape (N, 3), not {}'
.format(coords.shape))
# ensure hemisphere designation array is correct
if hemiid.ndim != 1:
raise ValueError('Provided `hemiid` array must be one-dimensional.')
if len(coords) != len(hemiid):
raise ValueError('Provided `coords` and `hemiid` must have the same '
'length. Provided lengths: coords = {}, hemiid = {}'
.format(len(coords), len(hemiid)))
if np.max(hemiid) > 1 or np.min(hemiid) < 0:
raise ValueError('Hemiid must have values in {0, 1} denoting left and '
'right hemisphere coordinates, respectively. '
+ 'Provided array contains values: {}'
.format(np.unique(hemiid)))
# empty array to store resampling indices
spinsamples = np.zeros((len(coords), n_rotate), dtype=int)
cost = np.zeros((len(coords), n_rotate))
inds = np.arange(len(coords), dtype=int)
# generate rotations and resampling array!
msg, warned = '', False
for n in range(n_rotate):
count, duplicated = 0, True
if verbose:
msg = 'Generating spin {:>5} of {:>5}'.format(n, n_rotate)
print(msg, end='\r', flush=True)
while duplicated and count < 500:
count, duplicated = count + 1, False
resampled = np.zeros(len(coords), dtype='int32')
# rotate each hemisphere separately
for h, rot in enumerate(_gen_rotation(seed=seed)):
hinds = (hemiid == h)
coor = coords[hinds]
if len(coor) == 0:
continue
# if we need an "exact" mapping (i.e., each node needs to be
# assigned EXACTLY once) then we have to calculate the full
# distance matrix which is a nightmare with respect to memory
# for anything that isn't parcellated data.
# that is, don't do this with vertex coordinates!
if method == 'vasa':
dist = spatial.distance_matrix(coor, coor @ rot)
# min of max a la Vasa et al., 2018
col = np.zeros(len(coor), dtype='int32')
for r in range(len(dist)):
# find parcel whose closest neighbor is farthest away
# overall; assign to that
row = dist.min(axis=1).argmax()
col[row] = dist[row].argmin()
cost[inds[hinds][row], n] = dist[row, col[row]]
# set to -inf and inf so they can't be assigned again
dist[row] = -np.inf
dist[:, col[row]] = np.inf
# optimization of total cost using Hungarian algorithm. this
# may result in certain parcels having higher cost than with
# `method='vasa'` but should always result in the total cost
# being lower #tradeoffs
elif method == 'hungarian':
dist = spatial.distance_matrix(coor, coor @ rot)
row, col = optimize.linear_sum_assignment(dist)
cost[hinds, n] = dist[row, col]
# if nodes can be assigned multiple targets, we can simply use
# the absolute minimum of the distances (no optimization
# required) which is _much_ lighter on memory
# huge thanks to https://stackoverflow.com/a/47779290 for this
# memory-efficient method
elif method == 'original':
dist, col = spatial.cKDTree(coor @ rot).query(coor, 1)
cost[hinds, n] = dist
resampled[hinds] = inds[hinds][col]
# if we want to check for duplicates ensure that we don't have any
if check_duplicates:
if np.any(np.all(resampled[:, None] == spinsamples[:, :n], 0)):
duplicated = True
# if our "spin" is identical to the input then that's no good
elif np.all(resampled == inds):
duplicated = True
# if we broke out because we tried 500 rotations and couldn't generate
# a new one, warn that we're using duplicate rotations and give up.
# this should only be triggered if check_duplicates is set to True
if count == 500 and not warned:
warnings.warn('Duplicate rotations used. Check resampling array '
'to determine real number of unique permutations.')
warned = True
spinsamples[:, n] = resampled
if verbose:
print(' ' * len(msg) + '\b' * len(msg), end='', flush=True)
if return_cost:
return spinsamples, cost
return spinsamples
def get_dominance_stats(X, y, use_adjusted_r_sq=True, verbose=False):
"""
Returns the dominance analysis statistics for multilinear regression.
This is a rewritten & simplified version of [DA1]_. It is briefly
tested against the original package, but still in early stages.
Please feel free to report any bugs.
Warning: Still work-in-progress. Parameters might change!
Parameters
----------
X : (N, M) array_like
Input data
y : (N,) array_like
Target values
use_adjusted_r_sq : bool, optional
Whether to use adjusted r squares. Default: True
verbose : bool, optional
Whether to print debug messages. Default: False
Returns
-------
model_metrics : dict
The dominance metrics, currently containing `individual_dominance`,
`partial_dominance`, `total_dominance`, and `full_r_sq`.
model_r_sq : dict
Contains all model r squares
Notes
-----
Example usage
.. code:: python
from netneurotools.stats import get_dominance_stats
from sklearn.datasets import load_boston
X, y = load_boston(return_X_y=True)
model_metrics, model_r_sq = get_dominance_stats(X, y)
To compare with [DA1]_, use `use_adjusted_r_sq=False`
.. code:: python
from dominance_analysis import Dominance_Datasets
from dominance_analysis import Dominance
boston_dataset=Dominance_Datasets.get_boston()
dominance_regression=Dominance(data=boston_dataset,
target='House_Price',objective=1)
incr_variable_rsquare=dominance_regression.incremental_rsquare()
dominance_regression.dominance_stats()
References
----------
.. [DA1] https://github.com/dominance-analysis/dominance-analysis
"""
# this helps to remove one element from a tuple
def remove_ret(tpl, elem):
lst = list(tpl)
lst.remove(elem)
return tuple(lst)
# sklearn linear regression wrapper
def get_reg_r_sq(X, y):
lin_reg = LinearRegression()
lin_reg.fit(X, y)
yhat = lin_reg.predict(X)
SS_Residual = sum((y - yhat) ** 2)
SS_Total = sum((y - np.mean(y)) ** 2)
r_squared = 1 - (float(SS_Residual)) / SS_Total
adjusted_r_squared = 1 - (1 - r_squared) * \
(len(y) - 1) / (len(y) - X.shape[1] - 1)
if use_adjusted_r_sq:
return adjusted_r_squared
else:
return r_squared
# generate all predictor combinations in list (num of predictors) of lists
n_predictor = X.shape[-1]
# n_comb_len_group = n_predictor - 1
predictor_combs = [list(combinations(range(n_predictor), i))
for i in range(1, n_predictor + 1)]
if verbose:
print(f"[Dominance analysis] Generated \
{len([v for i in predictor_combs for v in i])} combinations")
# get all r_sq's
model_r_sq = dict()
for len_group in tqdm(predictor_combs, desc='num-of-predictor loop',
disable=not verbose):
for idx_tuple in tqdm(len_group, desc='insider loop',
disable=not verbose):
r_sq = get_reg_r_sq(X[:, idx_tuple], y)
model_r_sq[idx_tuple] = r_sq
if verbose:
print(f"[Dominance analysis] Acquired {len(model_r_sq)} r^2's")
# getting all model metrics
model_metrics = dict([])
# individual dominance
individual_dominance = []
for i_pred in range(n_predictor):
individual_dominance.append(model_r_sq[(i_pred,)])
individual_dominance = np.array(individual_dominance).reshape(1, -1)
model_metrics["individual_dominance"] = individual_dominance
# partial dominance
partial_dominance = [[] for _ in range(n_predictor - 1)]
for i_len in range(n_predictor - 1):
i_len_combs = list(combinations(range(n_predictor), i_len + 2))
for j_node in range(n_predictor):
j_node_sel = [v for v in i_len_combs if j_node in v]
reduced_list = [remove_ret(comb, j_node) for comb in j_node_sel]
diff_values = [
model_r_sq[j_node_sel[i]] - model_r_sq[reduced_list[i]]
for i in range(len(reduced_list))]
partial_dominance[i_len].append(np.mean(diff_values))
# save partial dominance
partial_dominance = np.array(partial_dominance)
model_metrics["partial_dominance"] = partial_dominance
# get total dominance
total_dominance = np.mean(
np.r_[individual_dominance, partial_dominance], axis=0)
# test and save total dominance
assert np.allclose(total_dominance.sum(),
model_r_sq[tuple(range(n_predictor))]), \
"Sum of total dominance is not equal to full r square!"
model_metrics["total_dominance"] = total_dominance
# save full r^2
model_metrics["full_r_sq"] = model_r_sq[tuple(range(n_predictor))]
return model_metrics, model_r_sq
|
import sys
import pathlib
# sys.path.append(pathlib.Path(__file__).resolve().parents[1].as_posix())
# sys.path.append((pathlib.Path(__file__).resolve().parents[1] / 'submodule/FightingICE/python').as_posix())
import numpy as np
# from nike.contest_modified.utils import Environment, Agent
from utils import Agent
class PartialPolicy:
def __init__(self, state):
self._orig_state = state
def policy(self, state):
raise NotImplementedError()
class PartialPolicyManager:
def __init__(self, config):
self._config = config
self._curr = config['init'](None)
self._transit_map = self._make_transit_map(config['transit'])
print(self._transit_map)
def _make_transit_map(self, transit):
def make_one(xs):
dests = [x['dest'] for x in xs]
weights = np.array([x['weight'] for x in xs])
assert 1.0 - np.sum(weights) < 1e-5, f"{np.sum(weights)} shuld be equal to 1.0"
return {
'dests': dests,
'weights': weights,
}
return dict((k, make_one(v)) for (k, v) in transit.items())
def get_policy_and_update(self, state):
res, transit = self._curr.policy(state)
print(res, transit)
if transit:
info = self._transit_map[self._curr.__class__]
i = np.random.choice(len(info['weights']), p=info['weights'])
self._curr = info['dests'][i](state)
return res
class PPOneAction(PartialPolicy):
def policy(self, state):
return self.ACTION, True
class PPInit(PPOneAction):
ACTION = None
class PPGuard(PPOneAction):
ACTION = '↓'
class PPPunch(PPOneAction):
ACTION = 'A'
class PPKick(PPOneAction):
ACTION = 'B'
class PPC(PPOneAction):
ACTION = 'C'
# actions = ["↖", "↑", "↗", "←", "→", "↙", "↓", "↘", "A", "B", "C", "_"]
class PPHadoken(PPOneAction):
ACTION = '↓↘→A'
class PPContinuousAction(PartialPolicy):
ACTION = None
N = None
def __init__(self, state):
self._n = self.N
def policy(self, state):
self._n -= 1
if self._n < 0:
return self.ACTION, True
else:
return self.ACTION, False
class PPMoveRight10(PPContinuousAction):
ACTION = '→'
N = 10
class PPMoveLeft10(PPContinuousAction):
ACTION = '←'
N = 10
def _random(p, pps):
q = p / len(pps)
return [{'weight': q, 'dest': pp} for pp in pps]
class TeamD4Agent(Agent):
def __init__(self, environment):
super().__init__(environment)
self._pp_manager = PartialPolicyManager({
'init': PPInit,
'transit': {
PPInit: [
{'weight': 1.0, 'dest': PPGuard},
],
PPGuard: _random(1.0, [PPPunch, PPKick, PPC, PPHadoken, PPMoveLeft10, PPMoveRight10]),
PPPunch: [
{'weight': 1.0, 'dest': PPGuard},
],
PPKick: [
{'weight': 1.0, 'dest': PPGuard},
],
PPC: [
{'weight': 1.0, 'dest': PPGuard},
],
PPHadoken: [
{'weight': 1.0, 'dest': PPGuard},
],
PPMoveLeft10: [
{'weight': 1.0, 'dest': PPGuard},
],
PPMoveRight10: [
{'weight': 1.0, 'dest': PPGuard},
],
}
})
def policy(self, state):
try:
res = self._pp_manager.get_policy_and_update(state)
except Exception as e:
print(e)
raise Exception from e
return res
def roundEnd(self, x, y, z):
print(x)
print(y)
print(z)
# def main():
# class DummyEnv:
# _gateway = None
# agent = TeamD4Agent(DummyEnv())
# state = {}
# for _ in range(100):
# agent.policy(state)
# # import sys; sys.exit('========== END HERE ==========')
# print('start')
# env = Environment()
# env.play(TeamD4Agent, TeamD4Agent)
# if __name__ == '__main__':
# main()
|
class RemovedInWagtailMenus29Warning(DeprecationWarning):
pass
removed_in_next_version_warning = RemovedInWagtailMenus29Warning
class RemovedInWagtailMenus210Warning(PendingDeprecationWarning):
pass
class RemovedInWagtailMenus211Warning(PendingDeprecationWarning):
pass
|
# settings.py #
# ============================================================================
# DEFINE THE SETTINGS CLASS FOR THE GAME TO SET UP A SPECIFIED ENVIRONMENT.
#
# !!!WARNING: THE GAME MIGHT NOT BEHAVE PROPERLY IF YOU CHANGE THE VALUES
# IN THIS FILE. BACK UP THIS FILE IF YOU WISH TO EDIT ANYTHING.
#
class Settings:
"""Define all in-game specifications."""
def __init__(self, debug=False):
self._debug_mode = debug
self._window_size = (675, 900)
self._inert_bg_color = (255, 255, 255)
self._action_bg_color = (253, 237, 236)
self._frame_rate = 60
self._playership_speed = 3
@property
def debug_mode(self):
"""Currently support force game-over; upgrade/downgrade the
player ship; set player ship invincible/normal; spawn single enemy or
upgrade pack; enter next stage;"""
return self._debug_mode
@property
def window_size(self):
return self._window_size
@property
def inert_bg_color(self):
"""This is for testing only."""
return self._inert_bg_color
@property
def action_bg_color(self):
"""This is for testing only."""
return self._action_bg_color
@property
def frame_rate(self):
return self._frame_rate
@property
def playership_speed(self):
return self._playership_speed
|
from .anchor_utils import *
from .hyperparams import *
from .image_resizer import *
|
import threading
from common.pygamescreen import PyGameScreen
from webserver.server import setup_server_runner, run_http_server, run_socket_server
from webserver.video import VideoSource
from webserver.websocket import WebSocketManager
class WebControlManager:
def __init__(self, enable_http, enable_socket, pygame_screen: PyGameScreen):
self.pygame_screen = pygame_screen
self.is_socket_enabled = enable_socket
self.is_http_enabled = enable_http
self.video_source = VideoSource()
self.socket = WebSocketManager()
def start_http(self):
if self.is_http_enabled:
server_runner = setup_server_runner(self.video_source, self.pygame_screen)
td = threading.Thread(target=run_http_server, args=(server_runner,))
td.start()
def start_socket(self):
if self.is_socket_enabled:
td2 = threading.Thread(target=run_socket_server, args=(self.socket,))
td2.start()
def send_frame(self, frame):
self.video_source.update(frame)
def send_msg(self, msg):
self.socket.msg = msg
|
from flask import Flask
from flask import jsonify
from flask_cors import CORS
from bs4 import BeautifulSoup
import threading
from queue import Queue
import requests
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
request_lock = threading.Lock()
q = Queue()
data_dvds = []
@app.route('/api')
def api():
base_url = "http://nccardinal.org"
library_number = 132
# Get the search page.
search_url = "/eg/opac/results?bool=and&qtype=keyword&contains=contains&query=&bool=and&qtype=title&contains=contains&query=&bool=and&qtype=author&contains=contains&query=&_adv=1&detail_record_view=0&fi%3Aitem_type=g&fi%3Avr_format=v&locg=" + str(library_number) + "&pubdate=is&date1=&date2=&sort=pubdate.descending"
res = requests.get(base_url + search_url)
# Find the .record_title.search_link elements.
soup = BeautifulSoup(res.text, 'html.parser')
titles = soup.find_all('a', class_='record_title search_link')
titles = [title.string.split('[videorecording]')[0].strip()
for title in titles]
# Use threads to get additional info for each title.
for x in range(4):
t = threading.Thread(target=threader)
# t.daemon = True
t.start()
dvds = []
for title in titles:
q.put(title)
dvds.append(q.join())
#dvds = data_dvds
#data_dvds = []
return jsonify(data_dvds)
def threader():
while True:
title = q.get()
#data_dvds.append(search_wiki(title))
search_wiki(title)
q.task_done()
def search_wiki(title):
print('async search_wiki title:', title)
url = 'https://en.wikipedia.org/w/api.php?action=opensearch&format=json&search=' + title + ' film'
res = requests.get(url)
data = res.json()
hits = []
for idx, hit in enumerate(data[1]):
hits.append({
'title': hit,
'description': data[2][idx],
'wiki_url': data[3][idx],
'image': get_image(hit)
})
return hits
def get_image(title):
url = 'https://en.wikipedia.org/w/api.php?action=query&prop=pageprops&format=json&titles=' + title
res = requests.get(url)
data = res.json()
page_id = [key for key in data['query']['pages']][0]
image_url = ''
try:
image_name = data['query']['pages'][page_id]['pageprops']['page_image']
image_page_url = 'https://en.wikipedia.org/w/api.php?action=query&prop=imageinfo&iiprop=url&format=json&titles=Image:' + image_name
image_res = requests.get(image_page_url)
image_data = image_res.json()
image_page_id = [key for key in image_data['query']['pages']][0]
image_url = image_data['query']['pages'][image_page_id]['imageinfo'][0]['url']
except KeyError:
pass
return image_url
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2017-04-15 11:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20170321_2006'),
]
operations = [
migrations.AddField(
model_name='product',
name='recommended',
field=models.BooleanField(default=False),
),
]
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Exceptions related to IBM Quantum experiments."""
from ..exceptions import IBMError
class IBMExperimentError(IBMError):
"""Base class for errors raised by the experiment service modules."""
pass
class IBMExperimentEntryNotFound(IBMExperimentError):
"""Errors raised when an experiment entry cannot be found."""
class IBMExperimentEntryExists(IBMExperimentError):
"""Errors raised when an experiment entry already exists."""
|
"""
Given the array nums consisting of 2n elements in the form [x1,x2,...,xn,y1,y2,...,yn].
Return the array in the form [x1,y1,x2,y2,...,xn,yn].
Example 1:
Input: nums = [2,5,1,3,4,7], n = 3
Output: [2,3,5,4,1,7]
Explanation: Since x1=2, x2=5, x3=1, y1=3, y2=4, y3=7 then the answer is [2,3,5,4,1,7].
Example 2:
Input: nums = [1,2,3,4,4,3,2,1], n = 4
Output: [1,4,2,3,3,2,4,1]
Example 3:
Input: nums = [1,1,2,2], n = 2
Output: [1,2,1,2]
Solution:
Single Pass
"""
# Single Pass
# Time: O(n), n is the length of nums
# Space: O(1)
class Solution:
def shuffle(self, nums: List[int], n: int) -> List[int]:
res = []
for i in range(n):
res.append(nums[i])
res.append(nums[i+n])
return res
|
from lbry.cryptoutils import get_lbry_hash_obj
MAX_BLOB_SIZE = 2 * 2 ** 20
# digest_size is in bytes, and blob hashes are hex encoded
blobhash_length = get_lbry_hash_obj().digest_size * 2
|
import uuid
import logging
from sqlalchemy import create_engine
from dependency_injector import containers, providers
from dependency_injector.wiring import inject # noqa
from modules.catalog.module import CatalogModule
from modules.catalog.infrastructure.listing_repository import (
PostgresJsonListingRepository,
)
from modules.iam.module import IdentityAndAccessModule
from modules.iam.application.services import AuthenticationService
from modules.iam.infrastructure.user_repository import PostgresJsonUserRepository
from seedwork.infrastructure.request_context import RequestContext
def _default(val):
import uuid
if isinstance(val, uuid.UUID):
return str(val)
raise TypeError()
def dumps(d):
import json
return json.dumps(d, default=_default)
class DummyService:
def __init__(self, config) -> None:
self.config = config
def serve(self):
return f"serving with config {self.config}"
def create_request_context(engine):
from seedwork.infrastructure.request_context import request_context
request_context.setup(engine)
return request_context
def create_engine_once(config):
engine = create_engine(
config["DATABASE_URL"], echo=config["DEBUG"], json_serializer=dumps
)
from seedwork.infrastructure.database import Base
# TODO: it seems like a hack, but it works...
Base.metadata.bind = engine
return engine
class Container(containers.DeclarativeContainer):
"""Dependency Injection Container
see https://github.com/ets-labs/python-dependency-injector for more details
"""
__self__ = providers.Self()
config = providers.Configuration()
engine = providers.Singleton(create_engine_once, config)
dummy_service = providers.Factory(DummyService, config)
dummy_singleton = providers.Singleton(DummyService, config)
request_context: RequestContext = providers.Factory(
create_request_context, engine=engine
)
correlation_id = providers.Factory(
lambda request_context: request_context.correlation_id.get(), request_context
)
# catalog module and it's dependencies
listing_repository = providers.Factory(
PostgresJsonListingRepository, db_session=request_context.provided.db_session
)
catalog_module = providers.Factory(
CatalogModule, listing_repository=listing_repository
)
# iam module and it's dependencies
user_repository = providers.Factory(
PostgresJsonUserRepository, db_session=request_context.provided.db_session
)
authentication_service = providers.Factory(
AuthenticationService, user_repository=user_repository
)
iam_module = providers.Factory(
IdentityAndAccessModule, authentication_service=authentication_service
)
|
"""
Signal Filtering and Generation of Synthetic Time-Series.
Copyright (c) 2020 Gabriele Gilardi
"""
import numpy as np
def synthetic_wave(P, A=None, phi=None, num=1000):
"""
Generates a multi-sine wave given periods, amplitudes, and phases.
P (n, ) Periods
A (n, ) Amplitudes
phi (n, ) Phases (rad)
t (num, ) Time
f (num, ) Multi-sine wave
The default value for the amplitudes is 1 and for the phases is zero. The
time goes from zero to the largest period.
"""
n_waves = len(P) # Number of waves
P = np.asarray(P)
# Amplitudes
if (A is None):
A = np.ones(n_waves) # Default is 1
else:
A = np.asarray(A)
# Phases
if (phi is None):
phi = np.zeros(n_waves) # Default is 0
else:
phi = np.asarray(phi)
# Time
t = np.linspace(0.0, np.amax(P), num=num)
# Add up all the sine waves
f = np.zeros(len(t))
for i in range(n_waves):
f = f + A[i] * np.sin(2.0 * np.pi * t / P[i] + phi[i])
return t, f
def synthetic_sampling(X, n_reps=1, replace=True):
"""
Generates surrogates of the time-series X using randomized-sampling
(bootstrap) with or without replacement. Input X must be a 1D array.
X (n, ) Original time-series
idx (n_reps, n) Random index of X
X_synt (n_reps, n) Synthetic time-series
"""
X = X.flatten() # Reshape to (n, )
n = len(X)
# Sampling with replacement
if (replace):
idx = np.random.randint(0, n, size=(n_reps, n))
# Sampling without replacement
else:
idx = np.argsort(np.random.rand(n_reps, n), axis=1)
# Synthetic time-series
X_synt = X[idx]
return X_synt
def synthetic_FFT(X, n_reps=1):
"""
Generates surrogates of the time-series X using the phase-randomized
Fourier-transform algorithm. Input X must be a 1D array.
X (n, ) Original time-series
X_fft (n, ) FFT of the original time-series
X_synt_fft (n_reps, n) FFT of the synthetic time-series
X_synt (n_reps, n) Synthetic time-series
"""
X = X.flatten() # Reshape to (n, )
n = len(X)
# The number of samples must be odd
if ((n % 2) == 0):
print("Warning: data reduced by one (even number of samples)")
n = n - 1
X = X[0:n, :]
# FFT of the original time-serie
X_fft = np.fft.fft(X)
# Parameters
half_len = (n - 1) // 2
idx1 = np.arange(1, half_len+1, dtype=int) # 1st half
idx2 = np.arange(half_len+1, n, dtype=int) # 2nd half
# Generate the random phases
phases = np.random.rand(n_reps, half_len)
phases1 = np.exp(2.0 * np.pi * 1j * phases)
phases2 = np.conj(np.flipud(phases1))
# FFT of the synthetic time-series (1st sample is unchanged)
X_synt_fft = np.zeros((n_reps, n), dtype=complex)
X_synt_fft[:, 0] = X_fft[0]
X_synt_fft[:, idx1] = X_fft[idx1] * phases1 # 1st half
X_synt_fft[:, idx2] = X_fft[idx2] * phases2 # 2nd half
# Synthetic time-series
X_synt = np.real(np.fft.ifft(X_synt_fft, axis=1))
return X_synt
def synthetic_MEboot(X, n_reps=1, alpha=0.1, bounds=False, scale=False):
"""
Generates surrogates of the time-series X using the maximum entropy
bootstrap algorithm. Input X must be a 1D array.
X (n, ) Original time-series
idx (n, ) Original order of X
y (n, ) Sorted original time-series
z (n+1, ) Intermediate points
mt (n, ) Interval means
t_w (n_reps, n) Random new points
w_int (n_reps, n) Interpolated new points
w_corr (n_reps, n) Interpolated new points with corrections for first
and last interval
X_synt (n_reps, n) Synthetic time-series
"""
X = X.flatten() # Reshape to (n, )
n = len(X)
# Sort the time series keeping track of the original order
idx = np.argsort(X)
y = X[idx]
# Trimmed mean
g = int(np.floor(n * alpha))
r = n * alpha - g
m_trm = ((1.0 - r) * (y[g] + y[n-g-1]) + y[g+1:n-g-1].sum()) \
/ (n * (1.0 - 2.0 * alpha))
# Intermediate points
z = np.zeros(n+1)
z[0] = y[0] - m_trm
z[1:-1] = (y[0:-1] + y[1:]) / 2.0
z[n] = y[n-1] + m_trm
# Interval means
mt = np.zeros(n)
mt[0] = 0.75 * y[0] + 0.25 * y[1]
mt[1:n-1] = 0.25 * y[0:n-2] + 0.5 * y[1:n-1] + 0.25 * y[2:n]
mt[n-1] = 0.25 * y[n-2] + 0.75 * y[n-1]
# Generate randomly new points and sort them
t_w = np.random.rand(n_reps, n)
t_w = np.sort(t_w, axis=1)
# Interpolate the new points
t = np.linspace(0.0, 1.0, num=n+1)
w_int = np.interp(t_w, t, z)
# Correct the new points in the first and last interval to satisfy
# the mass constraint
idw = (np.floor_divide(t_w, 1.0 / n)).astype(int)
corr = np.where(idw == 0, mt[idw] - (z[idw] + z[idw+1]) / 2.0, 0.0)
w_corr = w_int + corr
if (n > 1):
corr = np.where(idw == n-1, mt[idw] - (z[idw] + z[idw+1]) / 2.0, 0.0)
w_corr += corr
# Enforce limits (if requested)
if (bounds):
w_corr = np.fmin(np.fmax(w_corr, z[0]), z[n])
# Recovery the time-dependency of the original time-series
X_synt = np.zeros((n_reps, n))
X_synt[:, idx] = w_corr
# Scale to force equal variance (if requested)
if (scale):
var_z = np.diff(z) ** 2.0 / 12.0
X_mean = X.mean(axis=0)
var_ME = (((mt - X_mean) ** 2).sum() + var_z.sum()) / n
std_X = X.std(ddof=1)
std_ME = np.sqrt(var_ME)
k_scale = std_X / std_ME - 1.0
X_synt = X_synt + k_scale * (X_synt - X_mean)
return X_synt
def normalize_data(X, param=(), ddof=0):
"""
If mu and sigma are not defined, returns a column-normalized version of
X with zero mean and standard deviation equal to one. If mu and sigma are
defined returns a column-normalized version of X using mu and sigma.
X Input dataset
Xn Column-normalized input dataset
param Tuple with mu and sigma
mu Mean
sigma Standard deviation
ddof Delta degrees of freedom (if ddof = 0 then divide by m, if
ddof = 1 then divide by m-1, with m the number of data in X)
"""
# Column-normalize using mu and sigma
if (len(param) > 0):
Xn = (X - param[0]) / param[1]
return Xn
# Column-normalize using mu=0 and sigma=1
else:
mu = X.mean(axis=0)
sigma = X.std(axis=0, ddof=ddof)
Xn = (X - mu) / sigma
param = (mu, sigma)
return Xn, param
def scale_data(X, param=()):
"""
If X_min and X_max are not defined, returns a column-scaled version of
X in the interval (-1,+1). If X_min and X_max are defined returns a
column-scaled version of X using X_min and X_max.
X Input dataset
Xs Column-scaled input dataset
param Tuple with X_min and X_max
X_min Min. value along the columns (features) of the input dataset
X_max Max. value along the columns (features) of the input dataset
"""
# Column-scale using X_min and X_max
if (len(param) > 0):
Xs = -1.0 + 2.0 * (X - param[0]) / (param[1] - param[0])
return Xs
# Column-scale using X_min=-1 and X_max=+1
else:
X_min = np.amin(X, axis=0)
X_max = np.amax(X, axis=0)
Xs = -1.0 + 2.0 * (X - X_min) / (X_max - X_min)
param = (X_min, X_max)
return Xs, param
def value2diff(X, percent=True):
"""
Returns the 1st discrete difference of array X.
X (n, ) Input dataset
dX (n-1, ) 1st discrete differences
Notes:
- the discrete difference can be calculated in percent or in value.
- dX is one element shorter than X.
- X must be a 1D array.
"""
X = X.flatten() # Reshape to (n, )
# Discrete difference in percent
if (percent):
dX = X[1:] / X[:-1] - 1.0
# Discrete difference in value
else:
dX = X[1:] - X[:-1]
return dX
def diff2value(dX, X0, percent=True):
"""
Rebuilds array X from the 1st discrete difference using X0 as initial value.
dX (n, ) Discrete differences
X0 scalar Initial value
X (n+1, ) Output dataset
Notes:
- the discrete difference can be in percent or in value.
- X is one element longer than dX.
- dX must be a 1D array.
If the discrete difference is in percent:
X[0] = X0
X[1] = X[0] * (1 + dX[0])
X[2] = X[1] * (1 + dX[1]) = X[0] * (1 + dX[0]) * (1 + dX[1])
....
If the discrete difference is in value:
X[0] = X0
X[1] = X[0] + dX[0]
X[2] = X[1] + dX[1] = X[0] + dX[0] + dX[1]
....
"""
dX = dX.flatten() # Reshape to (n, )
X = np.zeros(len(dX) + 1)
X[0] = X0 # Initial value
# Discrete difference in percent
if (percent):
X[1:] = X0 * np.cumprod(1.0 + dX)
# Discrete difference in value
else:
X[1:] = X0 + np.cumsum(dX)
return X
|
from typing import BinaryIO
from pycubexr.classes import MetricValues, Metric
from pycubexr.parsers.data_parser import parse_data
from pycubexr.parsers.index_parser import parse_index
def extract_metric_values(
*,
metric: Metric,
index_file: BinaryIO,
data_file: BinaryIO
) -> MetricValues:
index = parse_index(index_file=index_file)
values = parse_data(
data_file=data_file,
data_type=metric.data_type,
endianness_format_char=index.endianness_format
)
return MetricValues(
metric=metric,
cnode_indices=index.cnode_indices,
values=values
)
|
import csv
from functools import partial
import gzip
from pathlib import Path
from typing import Collection, Dict, Optional
from tqdm import tqdm
from molpal.objectives.base import Objective
class LookupObjective(Objective):
"""A LookupObjective calculates the objective function by looking the
value up in an input file.
Useful for retrospective studies.
Attributes
----------
self.data : str
the path of a file containing a Shelf object that holds a dictionary
mapping an input string to its objective function value
Parameters
----------
lookup_path : str
the path of the file containing lookup data
lookup_title_line : bool (Default = True)
is there a title in in the lookup file?
lookup_smiles_col : int (Default = 0)
the column containing the SMILES string in the lookup file
lookup_data_col : int (Default = 1)
the column containing the desired data in the lookup file
**kwargs
unused and addditional keyword arguments
"""
def __init__(self, lookup_path: str,
lookup_sep: str = ',', lookup_title_line: bool = True,
lookup_smiles_col: int = 0, lookup_data_col: int = 1,
**kwargs):
if Path(lookup_path).suffix == '.gz':
open_ = partial(gzip.open, mode='rt')
else:
open_ = open
self.data = {}
with open_(lookup_path) as fid:
reader = csv.reader(fid, delimiter=lookup_sep)
if lookup_title_line:
next(fid)
for row in tqdm(reader, desc='Building oracle'):
# assume all data is a float value right now
key = row[lookup_smiles_col]
val = row[lookup_data_col]
try:
self.data[key] = float(val)
except ValueError:
pass
super().__init__(**kwargs)
def calc(self, smis: Collection[str],
*args, **kwargs) -> Dict[str, Optional[float]]:
return {
smi: self.c * self.data[smi] if smi in self.data else None
for smi in smis
}
|
# Copyright 2017, Inderpreet Singh, All rights reserved.
import unittest
import json
from datetime import datetime
from pytz import timezone
from .test_serialize import parse_stream
from web.serialize import SerializeModel
from model import ModelFile
class TestSerializeModel(unittest.TestCase):
def test_event_names(self):
serialize = SerializeModel()
out = parse_stream(serialize.model([]))
self.assertEqual("model-init", out["event"])
out = parse_stream(
serialize.update_event(SerializeModel.UpdateEvent(
SerializeModel.UpdateEvent.Change.ADDED, None, None
))
)
self.assertEqual("model-added", out["event"])
out = parse_stream(
serialize.update_event(SerializeModel.UpdateEvent(
SerializeModel.UpdateEvent.Change.UPDATED, None, None
))
)
self.assertEqual("model-updated", out["event"])
out = parse_stream(
serialize.update_event(SerializeModel.UpdateEvent(
SerializeModel.UpdateEvent.Change.REMOVED, None, None
))
)
self.assertEqual("model-removed", out["event"])
def test_model_is_a_list(self):
serialize = SerializeModel()
files = []
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(list, type(data))
files = [ModelFile("a", True), ModelFile("b", False)]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(list, type(data))
self.assertEqual(2, len(data))
def test_update_event_is_a_dict(self):
serialize = SerializeModel()
out = parse_stream(
serialize.update_event(SerializeModel.UpdateEvent(
SerializeModel.UpdateEvent.Change.UPDATED, None, None
))
)
data = json.loads(out["data"])
self.assertEqual(dict, type(data))
self.assertEqual(None, data["old_file"])
self.assertEqual(None, data["new_file"])
def test_update_event_files(self):
serialize = SerializeModel()
a1 = ModelFile("a", False)
a1.local_size = 100
a2 = ModelFile("a", False)
a2.local_size = 200
out = parse_stream(
serialize.update_event(SerializeModel.UpdateEvent(
SerializeModel.UpdateEvent.Change.UPDATED, a1, a2
))
)
data = json.loads(out["data"])
self.assertEqual("a", data["old_file"]["name"])
self.assertEqual(100, data["old_file"]["local_size"])
self.assertEqual("a", data["new_file"]["name"])
self.assertEqual(200, data["new_file"]["local_size"])
out = parse_stream(
serialize.update_event(SerializeModel.UpdateEvent(
SerializeModel.UpdateEvent.Change.ADDED, None, a1
))
)
data = json.loads(out["data"])
self.assertEqual(None, data["old_file"])
self.assertEqual("a", data["new_file"]["name"])
self.assertEqual(100, data["new_file"]["local_size"])
out = parse_stream(
serialize.update_event(SerializeModel.UpdateEvent(
SerializeModel.UpdateEvent.Change.ADDED, a2, None
))
)
data = json.loads(out["data"])
self.assertEqual("a", data["old_file"]["name"])
self.assertEqual(200, data["old_file"]["local_size"])
self.assertEqual(None, data["new_file"])
def test_file_name(self):
serialize = SerializeModel()
files = [ModelFile("a", True), ModelFile("b", False)]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(2, len(data))
self.assertEqual("a", data[0]["name"])
self.assertEqual("b", data[1]["name"])
def test_file_is_dir(self):
serialize = SerializeModel()
files = [ModelFile("a", True), ModelFile("b", False)]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(2, len(data))
self.assertEqual(True, data[0]["is_dir"])
self.assertEqual(False, data[1]["is_dir"])
def test_file_state(self):
serialize = SerializeModel()
a = ModelFile("a", True)
a.state = ModelFile.State.DEFAULT
b = ModelFile("b", False)
b.state = ModelFile.State.DOWNLOADING
c = ModelFile("c", True)
c.state = ModelFile.State.QUEUED
d = ModelFile("d", True)
d.state = ModelFile.State.DOWNLOADED
e = ModelFile("e", False)
e.state = ModelFile.State.DELETED
f = ModelFile("f", False)
f.state = ModelFile.State.EXTRACTING
g = ModelFile("g", False)
g.state = ModelFile.State.EXTRACTED
files = [a, b, c, d, e, f, g]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(7, len(data))
self.assertEqual("default", data[0]["state"])
self.assertEqual("downloading", data[1]["state"])
self.assertEqual("queued", data[2]["state"])
self.assertEqual("downloaded", data[3]["state"])
self.assertEqual("deleted", data[4]["state"])
self.assertEqual("extracting", data[5]["state"])
self.assertEqual("extracted", data[6]["state"])
def test_remote_size(self):
serialize = SerializeModel()
a = ModelFile("a", True)
a.remote_size = None
b = ModelFile("b", False)
b.remote_size = 0
c = ModelFile("c", True)
c.remote_size = 100
files = [a, b, c]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(3, len(data))
self.assertEqual(None, data[0]["remote_size"])
self.assertEqual(0, data[1]["remote_size"])
self.assertEqual(100, data[2]["remote_size"])
def test_local_size(self):
serialize = SerializeModel()
a = ModelFile("a", True)
a.local_size = None
b = ModelFile("b", False)
b.local_size = 0
c = ModelFile("c", True)
c.local_size = 100
files = [a, b, c]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(3, len(data))
self.assertEqual(None, data[0]["local_size"])
self.assertEqual(0, data[1]["local_size"])
self.assertEqual(100, data[2]["local_size"])
def test_downloading_speed(self):
serialize = SerializeModel()
a = ModelFile("a", True)
a.downloading_speed = None
b = ModelFile("b", False)
b.downloading_speed = 0
c = ModelFile("c", True)
c.downloading_speed = 100
files = [a, b, c]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(3, len(data))
self.assertEqual(None, data[0]["downloading_speed"])
self.assertEqual(0, data[1]["downloading_speed"])
self.assertEqual(100, data[2]["downloading_speed"])
def test_eta(self):
serialize = SerializeModel()
a = ModelFile("a", True)
a.eta = None
b = ModelFile("b", False)
b.eta = 0
c = ModelFile("c", True)
c.eta = 100
files = [a, b, c]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(3, len(data))
self.assertEqual(None, data[0]["eta"])
self.assertEqual(0, data[1]["eta"])
self.assertEqual(100, data[2]["eta"])
def test_file_is_extractable(self):
serialize = SerializeModel()
a = ModelFile("a", True)
a.is_extractable = False
b = ModelFile("b", False)
b.is_extractable = True
files = [a, b]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(2, len(data))
self.assertEqual(False, data[0]["is_extractable"])
self.assertEqual(True, data[1]["is_extractable"])
def test_local_created_timestamp(self):
serialize = SerializeModel()
a = ModelFile("a", True)
b = ModelFile("b", False)
b.local_created_timestamp = datetime(2018, 11, 9, 21, 40, 18, tzinfo=timezone('UTC'))
files = [a, b]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(2, len(data))
self.assertEqual(None, data[0]["local_created_timestamp"])
self.assertEqual(str(1541799618.0), data[1]["local_created_timestamp"])
def test_local_modified_timestamp(self):
serialize = SerializeModel()
a = ModelFile("a", True)
b = ModelFile("b", False)
b.local_modified_timestamp = datetime(2018, 11, 9, 21, 40, 18, tzinfo=timezone('UTC'))
files = [a, b]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(2, len(data))
self.assertEqual(None, data[0]["local_modified_timestamp"])
self.assertEqual(str(1541799618.0), data[1]["local_modified_timestamp"])
def test_remote_created_timestamp(self):
serialize = SerializeModel()
a = ModelFile("a", True)
b = ModelFile("b", False)
b.remote_created_timestamp = datetime(2018, 11, 9, 21, 40, 18, tzinfo=timezone('UTC'))
files = [a, b]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(2, len(data))
self.assertEqual(None, data[0]["remote_created_timestamp"])
self.assertEqual(str(1541799618.0), data[1]["remote_created_timestamp"])
def test_remote_modified_timestamp(self):
serialize = SerializeModel()
a = ModelFile("a", True)
b = ModelFile("b", False)
b.remote_modified_timestamp = datetime(2018, 11, 9, 21, 40, 18, tzinfo=timezone('UTC'))
files = [a, b]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(2, len(data))
self.assertEqual(None, data[0]["remote_modified_timestamp"])
self.assertEqual(str(1541799618.0), data[1]["remote_modified_timestamp"])
def test_children(self):
serialize = SerializeModel()
a = ModelFile("a", True)
b = ModelFile("b", True)
b.add_child(ModelFile("ba", False))
b.add_child(ModelFile("bb", True))
c = ModelFile("c", True)
ca = ModelFile("ca", True)
ca.add_child(ModelFile("caa", False))
ca.add_child(ModelFile("cab", False))
c.add_child(ca)
cb = ModelFile("cb", False)
c.add_child(cb)
c.eta = 100
files = [a, b, c]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(3, len(data))
self.assertEqual(list, type(data[0]["children"]))
self.assertEqual(0, len(data[0]["children"]))
self.assertEqual(list, type(data[1]["children"]))
self.assertEqual(2, len(data[1]["children"]))
self.assertEqual("ba", data[1]["children"][0]["name"])
self.assertEqual(0, len(data[1]["children"][0]["children"]))
self.assertEqual("bb", data[1]["children"][1]["name"])
self.assertEqual(0, len(data[1]["children"][1]["children"]))
self.assertEqual(list, type(data[2]["children"]))
self.assertEqual(2, len(data[2]["children"]))
self.assertEqual("ca", data[2]["children"][0]["name"])
self.assertEqual(2, len(data[2]["children"][0]["children"]))
self.assertEqual("caa", data[2]["children"][0]["children"][0]["name"])
self.assertEqual(0, len(data[2]["children"][0]["children"][0]["children"]))
self.assertEqual("cab", data[2]["children"][0]["children"][1]["name"])
self.assertEqual(0, len(data[2]["children"][0]["children"][1]["children"]))
self.assertEqual("cb", data[2]["children"][1]["name"])
self.assertEqual(0, len(data[2]["children"][1]["children"]))
def test_full_path(self):
serialize = SerializeModel()
a = ModelFile("a", True)
b = ModelFile("b", True)
b.add_child(ModelFile("ba", False))
b.add_child(ModelFile("bb", True))
c = ModelFile("c", True)
ca = ModelFile("ca", True)
ca.add_child(ModelFile("caa", False))
ca.add_child(ModelFile("cab", False))
c.add_child(ca)
cb = ModelFile("cb", False)
c.add_child(cb)
c.eta = 100
files = [a, b, c]
out = parse_stream(serialize.model(files))
data = json.loads(out["data"])
self.assertEqual(3, len(data))
self.assertEqual("a", data[0]["full_path"])
self.assertEqual("b", data[1]["full_path"])
self.assertEqual("b/ba", data[1]["children"][0]["full_path"])
self.assertEqual("b/bb", data[1]["children"][1]["full_path"])
self.assertEqual("c", data[2]["full_path"])
self.assertEqual("c/ca", data[2]["children"][0]["full_path"])
self.assertEqual("c/ca/caa", data[2]["children"][0]["children"][0]["full_path"])
self.assertEqual("c/ca/cab", data[2]["children"][0]["children"][1]["full_path"])
self.assertEqual("c/cb", data[2]["children"][1]["full_path"])
|
"""
Makes a figure providing an overview of our dataset with a focus on lineages
laid out as follows:
a - Patient metadata
b - Donut plot of our lineage distributions vs the world
c - Timeline of patient sampling vs lineages identified
d - Choropleth of lineages by region
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from typing import Dict
import logging
import matplotlib
from matplotlib.lines import Line2D
from mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition,
mark_inset)
from covid_bronx import lineage_colors_dict, lineage_colors_dict_rgb
from covid_bronx.quality import fasta_files, sam_files, variant_files
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
import matplotlib
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
savefile_a = "figures_final/figure1a"
savefile_b = "figures_final/figure1b"
months = {
1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'May',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Nov',
12: 'Dec',
}
from covid_bronx.metadata import preprocess_metadata
from matplotlib.colors import colorConverter
# a) Timeline of lineages
logger.info("Plotting 1a")
timeline = pd.read_csv("data/external/global_lineages.csv")
from covid_bronx.metadata import get_metadata
metadata = get_metadata()
index = pd.date_range(metadata['collection_date'].min(), metadata['collection_date'].max())
metadata.index = metadata['name']
df = pd.read_csv("data/external/pangolin2.csv")
df.index = df['Sequence name'].apply(lambda x: x.split(" ")[0])
df.index = df.index.map(lambda x: "AECOM-" + str(int(x.split("-")[1])))
metadata[df.columns] = df
lineages_df = pd.read_csv("data/external/Lineages_updated.csv", index_col=0)
lineages = lineages_df['lineage'].dropna()
lineages.index = lineages.index.map(lambda x: x.replace("_", "-"))
metadata['Lineage'] = lineages
metadata = pd.concat([metadata,metadata.loc[['AECOM-126','AECOM-127','AECOM-128','AECOM-129','AECOM-130']]]).drop_duplicates(keep=False)
ddf = pd.DataFrame([ # Incremental Values
{
l: (metadata[metadata['collection_date'] == d]['Lineage']==l).sum()
for l in lineages
}
for d in index
],
index=index
)
ddf.index = ddf.index.map(lambda x: months[x.month])
ddmf = pd.DataFrame({k: v.sum(0) for k,v in ddf.groupby(ddf.index)})
cdf = pd.DataFrame([ # Cumulative Values
{
l: (metadata[metadata['collection_date'] <= d]['Lineage']==l).sum()
for l in lineages
}
for d in index
],
index=index
)
dd = pd.read_csv("data/external/data-by-day.csv", index_col=0)
dd.index = pd.to_datetime(dd.index)
dd['month'] = dd.index.map(lambda x: months[x.month])
bronx_sampling = ddmf.sum(0)
sampling = pd.read_csv("data/external/sampling.csv", index_col=0) # TODO: Verify this
sampling['date'] = pd.to_datetime(sampling['date'])
sampling['month'] = sampling['date'].apply(lambda x: months[x.month])
# deathsdmf = pd.Series({k:v['Deaths'].sum() for k,v in sampling.groupby('month')})
# casesdmf = pd.Series({k:v['Cases'].sum() for k,v in sampling.groupby('month')})
# hospitalizationdmf = pd.Series({k:v['Hospitalizations'].sum() for k,v in sampling.groupby('month')})
deathsdmf = pd.Series({k:v['DEATH_COUNT'].sum() for k,v in dd.groupby('month')})
casesdmf = pd.Series({k:v['CASE_COUNT'].sum() for k,v in dd.groupby('month')})
hospitalizationdmf = pd.Series({k:v['HOSPITALIZED_COUNT'].sum() for k,v in dd.groupby('month')})
sampling_df = pd.DataFrame({"Sampling": bronx_sampling, "Cases": casesdmf, "Deaths": deathsdmf, "Hospitalizations": hospitalizationdmf}).fillna(0.)
##########################################################
# Start Plotting
matplotlib.rcParams.update({'font.size': 16})
plt.clf()
plt.close()
fig1a = plt.figure(figsize=(24,24))
from covid_bronx.geography import gen_points_in_gdf_polys, blank_background_choropleth, get_zip_codes_metadata_geo
import geopandas as gpd
metadata = preprocess_metadata()
coverage_levels = pd.read_csv("data/processed/sequencing/coverage.csv", index_col=0)['0']
coverage_levels = pd.read_csv("data/processed/sequencing/coverage.csv", index_col=0)['0']
passed = coverage_levels[coverage_levels>=.95].index.intersection(sam_files.keys()).map(lambda x: x.replace("_","-").replace("-0", "-").replace("-0", "-"))
num_samples = len(passed)
from covid_bronx.metadata import preprocess_metadata
from matplotlib import colors
def colorizer(df: pd.DataFrame, color_dict: Dict) -> pd.Series:
"""
Given a dataframe where the rows are zip codes and columns are lineages,
along with a dict explaining what the RGB color values are, returns a series
linking zip codes to a color output.
"""
scale_factors = df.sum(1) / max(df.sum(1))
weights = (df.T / df.sum(1))
color_series = pd.DataFrame( [np.sum(weights[z][c]*v for c,v in color_dict.items()) for z in weights.columns], index=weights.columns, columns=['r','g','b'])
return color_series.T
df = pd.read_csv("data/external/lineages_final.csv", index_col=0)
df.index = df['taxon'].apply(lambda x: x.split(" ")[0])
metadata[df.columns] = df
zips = metadata.loc[metadata.index.intersection(passed)]['zip_code'].to_numpy()
zips = np.array(sorted(zips)[2:])
# Get a listing of coordinates by zip code
bronx_zip_codes = [10453, 10457, 10460, 10458, 10467, 10468,10451, 10452, 10456,10454, 10455, 10459, 10474, 10463, 10471,10466, 10469, 10470, 10475,10461, 10462,10464, 10465, 10472, 10473]
gdf = gpd.read_file("data/external/ZIP_CODE_040114/ZIP_CODE_040114.geojson")
gdf.index = gdf['ZIPCODE']
gdf = gdf.loc[list(map(str, bronx_zip_codes))]
# Remove extraneous zip codes
latlons = gpd.GeoDataFrame({"ZIPCODE": gdf['ZIPCODE'], 'geometry': gdf['geometry'].centroid}).set_index("ZIPCODE")
zdf, bzip = get_zip_codes_metadata_geo()
zdf.index = zdf['zip_code'].map(lambda x: str(int(float(x))))
gdf[zdf.columns] = zdf
gdf = gdf.fillna(0.)
geocolor_dict = {k: lineage_colors_dict_rgb[k] for k in ['B.1', 'B.1.3', 'B.1.1']} # {'B.1': np.array([1,0,0]), 'B.1.3': np.array([0,1,0]), 'B.1.1': np.array([0,0,1])}
lineage_colors = colorizer(zdf[['B.1', 'B.1.3', 'B.1.1']], geocolor_dict).to_numpy()
lineage_colors = np.nan_to_num(lineage_colors, 0.)
gdf['lineage_colors'] = pd.Series([colors.to_rgba(lineage_colors[:,i]/256) for i in range(len(lineage_colors.T))], index=zdf.index)
gdf['lineage_colors'] = gdf['lineage_colors'].fillna('#000000')
fig, ax = plt.subplots()
gdf.fillna(0.).plot(column='count', cmap='Purples',ax=ax, legend=True, legend_kwds={'shrink': 0.3})
gdf.boundary.plot(color='black', ax=ax)
ax.set_axis_off()
# Plot hospital locations
from shapely.geometry import Point
hospitals = [Point(-73.846184,40.849010)]
hospitals_df = gpd.GeoDataFrame(geometry=hospitals)
# hospitals_df.plot(ax=ax, markersize=500, color='black', marker='.', label="Collection Site") # We decided not to do this
plt.tight_layout(pad=.3)
plt.savefig(savefile_a + '.pdf')
plt.savefig(savefile_a + '.svg')
plt.clf()
# Plot lineage colored distribution
geocolor_dict = {k: lineage_colors_dict_rgb[k] for k in ['B.1', 'B.1.3', 'B.1.1']} # {'B.1': np.array([1,0,0]), 'B.1.3': np.array([0,1,0]), 'B.1.1': np.array([0,0,1])}
lineage_colors = colorizer(zdf[['B.1', 'B.1.3', 'B.1.1']], geocolor_dict).to_numpy()
lineage_colors = np.nan_to_num(lineage_colors, 0.)
gdf['lineage_colors'] = pd.Series([colors.to_rgba(lineage_colors[:,i]/256) for i in range(len(lineage_colors.T))], index=zdf.index)
gdf['lineage_colors'] = gdf['lineage_colors'].fillna('#000000')
fig, ax = plt.subplots()
gdf.plot(ax=ax, color=gdf['lineage_colors'])
gdf.boundary.plot(color='black', ax=ax)
ax.set_axis_off()
plt.savefig("figures_final/figure1a_lineage.pdf")
plt.savefig("figures_final/figure1a_lineage.svg")
plt.show()
plt.clf()
# Figure 1b. Sampling Density
fig, ax = plt.subplots(figsize=(15,10))
ax_2 = ax.twinx()
sampling_df[['Cases', 'Hospitalizations', 'Deaths']].loc[['Feb','Mar','Apr','May','Jun','Jul','Aug','Sep', 'Oct']].plot(ax=ax, label=True, color=['yellowgreen','orange','red'], linewidth=6)
ax.grid(linestyle='--', linewidth=1)
ax.set_ylim([0,115000])
ax_2.set_ylim([0,80])
ax.set_ylabel("Count of Cases / Hospitalizations / Deaths")
ax.legend()
ax_2.set_ylabel("Count of Genomes Sequenced")
ax.set_xlabel("Month")
ax.set_xticklabels(['Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct'])
sampling_df['Sampling'][['Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct']].plot.bar(ax=ax_2, alpha=.5)
ax_2.grid(linestyle='--', color='blue', alpha=.5, linewidth=1)
ax_2.spines['right'].set_color('blue')
ax_2.yaxis.label.set_color('blue')
ax_2.tick_params(axis='y', colors='blue')
plt.savefig(savefile_b + '.pdf')
plt.savefig(savefile_b + '.svg')
plt.show()
plt.clf()
# Figure 2b. Lineages Over Time
fig, ax = plt.subplots(figsize=(30,15))
cdf_colors = [lineage_colors_dict[k] for k in ['B.1.26', 'B.1', 'B.2', 'B.2.1', 'A.1', 'B.1.3', 'B.1.1.1', 'B.1.1']]
cdf[['A.1', 'B.1', 'B.1.1', 'B.1.1.1', 'B.1.26', 'B.1.3', 'B.2', 'B.2.1',]].plot.line(legend=True, color=cdf_colors, ax=ax, linewidth=6)
ax.set_ylabel("Cumulative Sample Counts by Lineage")
plt.savefig('figures_final/figure2a' + '.pdf')
plt.savefig('figures_final/figure2a' + '.svg')
plt.clf()
# b) Donut Plot showing lineage distributions in world, US, NYS, and Bronx
# ax_q = fig1.add_subplot(gs[0:7, 13:])
import matplotlib
matplotlib.rcParams.update({'font.size':24})
fig, ax_q = plt.subplots(figsize=(30,30))
facecolor = colorConverter.to_rgba('white', alpha=0)
circulo = lambda r: plt.Circle((0,0), r, ec='white', fc=facecolor, lw=2)
logger.info("Plotting 1b")
donut = pd.read_csv("data/external/Donut_churro_plot.csv", index_col=0)
donut_colors = [lineage_colors_dict[k] for k in donut.index]
artist = donut['world'].plot.pie(radius=1, ax=ax_q, colors=donut_colors)
circle_1 = circulo(.8)
ax_q.add_artist(circle_1)
donut['USA'].plot.pie(radius=.8, ax=ax_q, labels=None, colors=donut_colors)
circle_1a = circulo(.6)
ax_q.add_artist(circle_1a)
donut['NYS'].plot.pie(radius=.6, ax=ax_q, labels=None, colors=donut_colors)
circle_2 = circulo(.4)
ax_q.add_artist(circle_2)
donut['Bronx'].plot.pie(radius=.4, ax=ax_q, labels=None, colors=donut_colors)
circle_3 = circulo(.2)
circle_4 = plt.Circle((0,0), .2, color='white')
ax_q.add_artist(circle_3)
ax_q.add_artist(circle_4)
ax_q.set_ylabel('')
plt.savefig("figures_final/figure2b.pdf")
plt.savefig("figures_final/figure2b.svg")
plt.show()
# Plot a triangular legend
fig, ax = plt.subplots()
x = np.array([-1,0])
y = np.array([1,0])
z = np.array([0,1])
x_c = geocolor_dict['B.1']/256
y_c = geocolor_dict['B.1.3']/256
z_c = geocolor_dict['B.1.1']/256
# Do convex combinations of everything
coordinates = []
k = 100
for lambd in np.linspace(0,1,k):
for mu in np.linspace(0, 1-lambd, int(k*(1-lambd))):
for w in np.linspace(0, 1-lambd-mu, int(k*(1-mu))):
combo = lambd*x + mu*y + w*z
color = colors.to_hex(max(lambd,0)*x_c + max(mu,0)*y_c + max(w,0)*z_c)
coordinates.append([combo[0], combo[1], color])
coordinates = np.array(coordinates)
xy = coordinates[:, 0:2].astype(float)
ax.scatter(xy[:,0],xy[:,1], c=coordinates[:,2])
ax.text(-1.4,-.1, 'B.1')
ax.text(1.05,-.1, 'B.1.3')
ax.text(-.25,1.1, 'B.1.1')
ax.set_axis_off()
plt.savefig("figures_final/figure1a_lineage_legend.pdf")
plt.savefig("figures_final/figure1a_lineage_legend.svg")
plt.show()
plt.clf()
|
"""recipient use composite primary key
Revision ID: 2693c4ff6368
Revises: 39df5dce6493
Create Date: 2022-05-06 20:39:56.757617+00:00
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
# revision identifiers, used by Alembic.
revision = "2693c4ff6368"
down_revision = "39df5dce6493"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"recipients", "message_id", existing_type=sa.INTEGER(), nullable=False
)
op.drop_column("recipients", "id")
op.create_primary_key(None, "recipients", ["message_id", "group"])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("recipients_pkey", "recipients")
op.add_column(
"recipients", sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False)
)
op.alter_column(
"recipients", "message_id", existing_type=sa.INTEGER(), nullable=True
)
# ### end Alembic commands ###
|
"""Instalador para el paquete pyqt_password"""
from setuptools import setup
long_description = (
open("README.org").read()
+ '\n' +
open("LICENSE").read()
+ '\n')
setup(
name="pyqt_password",
version='1.0',
description='Aplicación para gestión de contraseñas',
long_description=long_description,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Gui',
'Intended Audience :: Any',
'License :: Apache',
'Programming Language :: Python',
'Programming Language :: Python 3.10 '
'Operating System :: OS Independent'
],
keywords='Gestor de contraseñas pyqt5',
author='Francisco Muñoz Sánchez',
author_email='pacomun.gm@gmail.com',
url='https://github.com/pacomun/pyqt-password',
download_url='https://github.com/pacomun/pyqt-password',
license='Apache',
platforms='Unix, Windows',
packages=['pyqt_password', 'pyqt_password/helpGit'],
include_package_data=True,
)
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Experimental application for getting appointment statistics out of
your Google Calendar.
Inspired by the Google examples here:
https://developers.google.com/api-client-library/python/platforms/google_app_engine
"""
__author__ = 'allan@chartbeat.com (Allan Beaufour)'
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from handlers.analyze import AnalyzeHandler
from handlers.choose_cal import ChooseCalendarHandler
from handlers.env import decorator
from handlers.index import IndexHandler
def main():
application = webapp.WSGIApplication(
[
('/', IndexHandler),
('/choose_cal', ChooseCalendarHandler),
('/analyze', AnalyzeHandler),
(decorator.callback_path, decorator.callback_handler()),
],
debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
#!/home/toby/anaconda/bin/python
import matplotlib.pyplot as plt
import numpy as np
def log_10_product(x, pos):
if (x < 1.0):
return '%3.1f' % (x)
else:
return '%i' % (x)
cc = '0.10'
font = {'family' : 'DejaVu Serif',
'weight' : 'normal',
'size' : 20,
}
tfont = {
'family' : 'DejaVu Serif',
'weight' : 'normal',
'size' : 14}
sfont = {
'family' : 'DejaVu Serif',
'weight' : 'bold',
'style': 'italic',
'size' : 10}
plt.rc('font', **tfont)
plt.rc("axes", linewidth=2.0,edgecolor=cc)
fig, ax = plt.subplots()
ax = plt.subplot(111, axisbg='0.90', axisbelow=True)
ax.grid(b=True, which='major', color='#ffffff', linewidth=2, linestyle='-')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.xaxis.set_tick_params(width=2,length=4)
ax.yaxis.set_tick_params('minor',width=2,length=0)
ax.yaxis.set_tick_params('major',width=2,length=4)
ax.yaxis.label.set_color(cc)
ax.xaxis.label.set_color(cc)
ax.tick_params(axis='x', colors=cc)
ax.tick_params(axis='y', colors=cc)
# End Defaults
eln,ab,cc = np.loadtxt("10022_REE.dat",usecols=(1,2,3), unpack=True)
eln15,ab15,cc15 = np.loadtxt("15415REE.dat",usecols=(1,4,3), unpack=True)
el = np.array(['x','La','Ce','Pr','Nd','Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb','Lu'])
ratio = ab/cc
ratio15 = ab15/cc15
ax.set_yscale('log')
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.xaxis.set_major_formatter(plt.FixedFormatter(el))
formatter = plt.FuncFormatter(log_10_product)
ax.yaxis.set_major_formatter(formatter)
plt.plot(eln,ratio,linewidth=2,color='b')
plt.plot(eln,ratio,'o',markersize=15,color='r')
plt.plot(eln15,ratio15,linewidth=2,color='g')
plt.plot(eln15,ratio15,'o',markersize=15,color='yellow')
plt.xlim(56.5,71.5)
plt.ylim(0.05,500)
plt.xlabel('\nElement', fontdict=font)
plt.ylabel('Sample / Chondrite', fontdict=font)
plt.text(67,175, '10022', fontdict=font)
plt.text(67,0.3, '15415', fontdict=font)
plt.subplots_adjust(bottom=0.15, left=.15, right=.99, top=.99)
plt.savefig('test.png',dpi=300)
#plt.show()
|
import config
from page_objects.common import Common
from utilities.data_factory import DataRead
from utilities.locator_strategy import LocatorStrategy
class Contact(Common):
def __init__(self, driver):
super().__init__(driver)
self.driver = driver
self.data = DataRead.json_read('data.json')
contact_button = LocatorStrategy.locator_by_id("contact-link")
text_message = LocatorStrategy.locator_by_id("message")
subject_heading = LocatorStrategy.locator_by_id("id_contact")
email_address = LocatorStrategy.locator_by_id("email")
send_button = LocatorStrategy.locator_by_id("email")
upload_file = LocatorStrategy.locator_by_id("fileUpload")
def contact_us_form(self):
self.click(Contact.contact_button)
self.enter_text(Contact.text_message, text="This is a test.")
select = self.select_option_from_drop_down(Contact.subject_heading)
select.select_by_index(1)
self.clear_text(Contact.email_address)
self.enter_text(Contact.email_address,text=self.data['contact_email'])
self.enter_text(Contact.upload_file,text=config.file_path + self.data['upload_file'])
self.click(Contact.send_button)
|
from nnf import Var
from lib204 import Encoding
from tree import buildTree, findLeaves, findAllParents
givenBoardCondition = ["Wo_11 >> (Wh_21 || Sh_22)", "Wh_21 >> Br_31", "Wh_21 >> Wo_32", "Sh_22 >> Wo_31"]
S = ["Wh", "Wo", "Br"] #set of nodes required for a winning branch
k = 3 #maximum amount of steps required to find a winning branch
'''
This function creates variables of Var type and stores them in a variable dictionary from an input board condition.
Parameters: takes in a list of strings representing givenBoardCondition of n nodes organized by implications and or logical operators. And k, an integer variable representing the amount of steps allowed for a path to be found in.
Returns: a dictionary of resource variables of Var type that does not contain any variables after the kth row.
'''
def createVariables(givenBoardCondition,k):
#creating dictionaries for each resource to hold their resources
whVariables = {}
brVariables = {}
woVariables = {}
shVariables = {}
variableDictionary = {}
#Running through each list value in the given board condition and splitting them based on the operators to just leave variable names ie. W_31
#Then creating a dictionary key value pair where the key is the variable name and the value is a Var with the variable name used as the
# constructor value
for node in givenBoardCondition:
parts=node.replace(">>"," ").replace("&"," ").replace("|"," ").replace("("," ").replace(")"," ").split()
for variable in parts:
variable.strip()
#if variables are greater than k, they are not included in the final dictionary
if(int(variable[3]) <= k+1):
if "wh" in variable.lower():
whVariables[variable] = Var(variable)
if "wo" in variable.lower():
woVariables[variable] = Var(variable)
if "br" in variable.lower():
brVariables[variable] = Var(variable)
if "sh" in variable.lower():
shVariables[variable] = Var(variable)
#Merging variable dictionaries into 1 master dictionary containing all variables
variableDictionary = mergeDictionaries(variableDictionary,whVariables,woVariables,shVariables,brVariables)
return variableDictionary
'''
Helper method to merge two dictionaries together, as long as both dictionaries are not null.
Parameters: overall variable dictionary to be merged with the dictionary containing the wheat, brick, wood and sheep values of the tree.
Returns: merged dictionary of all variables
'''
def mergeDictionaries(variableDictionary,whVariables,woVariables,shVariables,brVariables):
#checks to see if the variables dictionary is null, if so copies the contents of resource dictionary to prevent null type error from occuring with update. If not merges the two dictionaries.
if not variableDictionary and whVariables:
variableDictionary = whVariables.copy()
if not variableDictionary and woVariables:
variableDictionary = woVariables.copy()
elif variableDictionary and woVariables:
variableDictionary.update(woVariables)
if not variableDictionary and brVariables:
variableDictionary = brVariables.copy()
elif variableDictionary and brVariables:
variableDictionary.update(brVariables)
if not variableDictionary and shVariables:
variableDictionary = shVariables.copy()
elif variableDictionary and shVariables:
variableDictionary.update(shVariables)
return variableDictionary
def createBoardConstraints(givenBoardCondition, variables, E, k):
"""Adds implication constraints to the logical encoding
Args:
givenBoardCondition: a list of strings representing the game board configuration
variables: a dictionary of Var type variables, where the key is a string of the variable name and the value is the matching variable
E: the logical encoding
k: an integer representing the maximum number of steps a solution may have
Returns:
E: the logical encoding with the implication constraints added
"""
for node in givenBoardCondition:
parts=node.replace(">>"," ").replace("&"," ").replace("|"," ").replace("("," ").replace(")"," ").split()
top = variables[parts[0]]
connectedNodes = []
for part in parts:
if part != parts[0]:
if int(part[3]) <= k+1:
connectedNodes.append(variables[part])
for node in connectedNodes:
E.add_constraint(node.negate() | top)
return E
def createImplicationList(givenBoardCondition,k):
"""Returns all implications in the logical model
Args:
givenBoardCondition: a list of strings representing the game board configuration
k: an integer representing the maximum number of steps a solution may have
Returns:
groups: a list of lists of dictionaries, each representing a node in the tree, with the node in the first index of a list being implied by the node in the second index of the list, i.e the node at the first index is "below" and "connected to" the node at the second index.
"""
groups = []
for node in givenBoardCondition:
parts = []
parts=node.replace(">>"," ").replace("&"," ").replace("|"," ").replace("("," ").replace(")"," ").split()
top = parts[0]
connectedNodes = []
for part in parts:
if part != parts[0]:
if int(part[3]) <= k+1:
connectedNodes.append(part)
for node in connectedNodes:
nodeData = {"res": node[0:2], "row": int(node[3]), "col": int(node[4])}
topData = {"res": top[0:2], "row": int(top[3]), "col": int(top[4])}
groups.append([nodeData,topData])
return groups
#creates lists containing each of the common variables, based on node type, in the dictionary
def createVariableLists(variables):
"""Returns lists containing each of the common variables based on node type
Args:
variables: a dictionary of Var type variables
Returns:
wood, wheat, sheep, brick: lists of strings each representing nodes that are of the list's node type
"""
wood, wheat, brick, sheep = [], [], [], []
for key in variables:
if "Wh" in key:
wheat.append(key)
elif "Wo" in key:
wood.append(key)
elif "Br" in key:
brick.append(key)
elif "Sh" in key:
sheep.append(key)
return wood, wheat, sheep, brick
'''
The following three functions take each of the leaves and creates a constraint based on the idea that if one leaf is chosen that leaf implies its parent node, which in turn implies its parent node all the way to the root (ie, for every leaf there is one path in the tree). However, only one of these paths can be true at one time. The function creates n contstraints where n is the length of leaf-list, in the form of leaf1 >> !leaf2 and !leaf3 ... !leafn, leaf2 >> !leaf1 ... ect.
Parameters: list of leaf dictionaries, the encoding E, and the variables dictionary.
Returns: Encoding E with added constraints
'''
def leaf_constraints(leaf_list,E,variables):
for leaf in leaf_list:
other_leaves = [] #array to contain the leaves that are not the current leaf
for next_leaf in leaf_list:
if next_leaf != leaf:
other_leaves.append(next_leaf)
#adding constraint to course library E for the current leaf
E.add_constraint(dict_to_var(leaf, variables).negate() |( set_leaf_constraint(other_leaves,variables)))
return E
'''
Helper method that returns a new constraint based on the leaf array, in the the form of leaf >> !leaf2 and !leaf3 ...
Parameters: other_leaves as list of leaf dictionaries minus the current leaf, and the variables dictionary
Returns: a new constraint to be added to E
'''
def set_leaf_constraint(other_leaves,variables):
for other in other_leaves:
var = dict_to_var(other,variables)
#if not at the last node add the current leaf Var negated to the constraint with &. Otherwise, add the leaf constraint normally to the current constraint.
if other == other_leaves[0]:
new_constraint = var.negate()
else:
new_constraint &= var.negate()
#new_constraivar.negate() & new_constraint new_constraint + "!" + str + "&"
return new_constraint
'''
Helper method that converts String dictionary to Var value so it can be added to a constraint.
Parameters: some leaf dictionary and the variables dictionary
Returns: Associated Var Node that matches the leaf
'''
def dict_to_var(leaf,variables):
var = ""
var = leaf["res"] + "_" + str(leaf["row"]) + str(leaf["col"])
return variables[var]
'''
A function that creates the required constraints for overall variables Wh, Sh, Br, Wo. Each node of a certain type implies the corresponding overall type and if all nodes of a type are false, the overall variable is false.
Parameters: All 4 overall variables, variable dictionary, variable node type lists, E
Returns: E after constraints have been added
'''
def setOverallVariablesTrueOrFalse(Wh, Wo, Br, Sh, variables, wood, wheat, sheep, brick,E, S):
if "Wo" in S:
new_constraint = False
for node in wood:
E.add_constraint(variables[node].negate() | Wo)
if (node == wood[0]):
new_constraint = variables[node].negate()
else:
new_constraint &= variables[node].negate()
E.add_constraint(new_constraint.negate() | Wo.negate())
if "Wh" in S:
new_constraint = False
for node in wheat:
E.add_constraint(variables[node].negate() | Wh)
if (node == wheat[0]):
new_constraint = variables[node].negate()
else:
new_constraint &= variables[node].negate()
E.add_constraint(new_constraint.negate() | Wh.negate())
if "Sh" in S:
new_constraint = False
for node in sheep:
E.add_constraint(variables[node].negate() | Sh)
if (node == sheep[0]):
new_constraint = variables[node].negate()
else:
new_constraint &= variables[node].negate()
E.add_constraint(new_constraint.negate() | Sh.negate())
if "Br" in S:
new_constraint = False
for node in brick:
E.add_constraint(variables[node].negate() | Br)
if (node == brick[0]):
new_constraint = variables[node].negate()
else:
new_constraint &= variables[node].negate()
E.add_constraint(new_constraint.negate() | Br.negate())
return E
'''
Checks required nodes and provided we don't need to hit a node,
Parameters: the required nodes, E, overall variables
Returns: E
'''
def implementRequiredNodes(S,E,Wo,Wh,Sh,Br):
if "Wo" not in S:
E.add_constraint(Wo)
if "Wh" not in S:
E.add_constraint(Wh)
if "Sh" not in S:
E.add_constraint(Sh)
if "Br" not in S:
E.add_constraint(Br)
return E
"""
Takes in all the variables and returns a 2D list of variables in the same row
Variables in Row 1 goes in the first nested list, Row 2 goes in the second nested list, etc.
"""
def variablesToRows(variables):
counter = 0
for key in variables:
if int(key[3]) > counter:
counter = int(key[3])
rowVariables = []
for x in range(counter+1):
rowVariables.append([])
for key in variables:
row = int(key[3])
rowVariables[row-1].append(variables[key])
print(rowVariables)
return rowVariables
""" Takes in the rowVariables and E and creates constraints so that only node
from each row can be chosen. Returns E after the constraints have been added"""
def rowVariablesToConstraints(rowVariables,E):
for row in rowVariables:
for variable in row:
for other in row:
if variable != other:
E.add_constraint(variable.negate() | other.negate())
return E
#
# Build an example full theory for your setting and return it.
#
# There should be at least 10 variables, and a sufficiently large formula to describe it (>50 operators).
# This restriction is fairly minimal, and if there is any concern, reach out to the teaching staff to clarify
# what the expectations are.
def example_theory():
variables=createVariables(givenBoardCondition,k)
#overall variables for overall node types and winning condition
Wh = Var("Wh")
Wo = Var("Wo")
Sh = Var("Sh")
Br = Var("Br")
W = Var("W")
#create variable arrays
wood, wheat, sheep, brick = createVariableLists(variables)
tree = buildTree(variables, createImplicationList(givenBoardCondition,k))
rowVariables = variablesToRows(variables)
E = Encoding()
#Making Constraints based on board condition
E = createBoardConstraints(givenBoardCondition,variables,E,k)
#Adding constraint for winning condition
E.add_constraint(W.
negate() | (Wh & Wo & Sh & Br))
#Setting W to always true so that the solver tries to find a winning model
E.add_constraint(W)
E = setOverallVariablesTrueOrFalse(Wh, Wo, Br, Sh, variables, wood, wheat, sheep, brick,E,S)
E = leaf_constraints(findLeaves(tree,findAllParents(tree,createImplicationList(givenBoardCondition,k))), E, variables)
E = implementRequiredNodes(S,E,Wo,Wh,Sh,Br)
E = rowVariablesToConstraints(rowVariables,E)
#print(findLeaves(tree,findAllParents(tree,createImplicationList(givenBoardCondition,k))))
print(E.constraints)
return E
if __name__ == "__main__":
T = example_theory()
print("\nSatisfiable: %s" % T.is_satisfiable())
print("# Solutions: %d" % T.count_solutions())
print(" Solution: %s" % T.solve())
'''
print("\nVariable likelihoods:")
for v,vn in zip([a,b,c,x,y,z], 'abcxyz'):
print(" %s: %.2f" % (vn, T.likelihood(v)))
print()
'''
|
# Generated by Django 3.1.1 on 2020-09-08 10:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sciencehistory', '0008_auto_20200908_1901'),
]
operations = [
migrations.AddField(
model_name='referringflow',
name='significance',
field=models.IntegerField(choices=[(1, 'Lowest'), (2, 'Low'), (3, 'Moderate'), (4, 'High'), (5, 'Highest')], default=3),
),
]
|
from ioUtils import getFile
from fsUtils import setDir, mkDir, isDir, moveDir
class myMusicName:
def __init__(self, debug=False):
self.debug = False
self.abrv = {}
self.abrv["AllMusic"] = "AM"
self.abrv["MusicBrainz"] = "MC"
self.abrv["Discogs"] = "DC"
self.abrv["AceBootlegs"] = "AB"
self.abrv["RateYourMusic"] = "RM"
self.abrv["LastFM"] = "LM"
self.abrv["DatPiff"] = "DP"
self.abrv["RockCorner"] = "RC"
self.abrv["CDandLP"] = "CL"
self.abrv["MusicStack"] = "MS"
self.abrv["MetalStorm"] = "MT"
self.moveFilename = "myMusicAlbumMatch.yaml"
def discConv(self, x):
if x is None:
return ""
x = x.replace("/", "-")
x = x.replace("¡", "")
while x.startswith(".") and len(x) > 1:
x = x[1:]
x = x.strip()
return x
def formatAlbum(self, albumName, albumType):
if albumType == 3:
retval = albumName.replace("(Single)", "")
retval = retval.replace("(EP)", "")
retval = retval.strip()
return retval
return albumName
def getMatchedDirName(self, albumName, albumID, db):
if self.abrv.get(db) is None:
raise ValueError("Could not find DB {0} in MyMusicName".format(db))
dbAbrv = self.abrv[db]
albumConvName = self.discConv(albumName)
matchedDirName = " :: ".join([albumConvName, "[{0}-{1}]".format(dbAbrv, albumID)])
return matchedDirName
def getUnMatchedDirName(self, matchedDirName, mediaDirType):
vals = matchedDirName.split(" :: ")
if len(vals) == 2:
albumName = vals[0]
albumIDval = vals[1]
try:
albumID = int(albumIDval[(albumIDval.find("[")+3):albumIDval.rfind("]")])
except:
raise ValueError("Could not extract album ID from {0}".format(albumIDval))
if sum([x in mediaDirType for x in ["Single", "EP"]]) > 0:
albumName = "{0} (Single)".format(albumName)
if sum([x in mediaDirType for x in ["Mix", "MixTape"]]) > 0:
albumName = "{0} (MixTape)".format(albumName)
return albumName
else:
raise ValueError("Could not extract album name from {0}".format(matchedDirName))
def moveMyMatchedMusicAlbums(self, show=False):
rename = True
albumsToMove = getFile(ifile=self.moveFilename)
print("Found {0} music <-> discogs albums maps".format(len(albumsToMove)))
for db, dbValues in albumsToMove.items():
if dbValues is None:
continue
for artistName, artistAlbums in dbValues.items():
print("==>",artistName)
for myAlbumName,albumVals in artistAlbums.items():
dirval = albumVals["Dir"]
albumVal = albumVals["Album"]
ratio = albumVals["Ratio"]
dbAlbumName = albumVal["Name"]
dbAlbumCode = albumVal["Code"]
mediaType = albumVal["MediaType"]
matchedDir = setDir(dirval, "Match")
mkDir(matchedDir)
srcName = myAlbumName
srcDir = setDir(dirval, srcName)
if not isDir(srcDir):
print("{0} does not exist".format(srcDir))
continue
mediaDir = setDir(matchedDir, self.discConv(mediaType))
mkDir(mediaDir)
if rename is True:
dstName = self.getMatchedDirName(self.discConv(dbAlbumName), dbAlbumCode, db)
else:
dstName = self.getMatchedDirName(myAlbumName, dbAlbumCode, db)
if show is True:
print('\t{0}'.format(mediaDir))
print("\t\t[{0}]".format(srcName))
print("\t\t[{0}]".format(dstName))
continue
dstDir = setDir(mediaDir, dstName)
if isDir(dstDir):
print("{0} already exists".format(dstDir))
continue
print("\tMoving {0} ---> {1}".format(srcDir, dstDir))
moveDir(srcDir, dstDir, debug=True)
|
"""
Package for components related to video/image processing,
excluding camera/display interface related components.
"""
|
# coding=utf-8
# Copyright 2022 The Deeplab2 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers and conversions for third party pycocotools.
This is derived from code in the Tensorflow Object Detection API:
https://github.com/tensorflow/models/tree/master/research/object_detection
Huang et. al. "Speed/accuracy trade-offs for modern convolutional object
detectors" CVPR 2017.
"""
from typing import Any, Collection, Dict, List, Optional, Union
import numpy as np
from pycocotools import mask
COCO_METRIC_NAMES_AND_INDEX = (
('Precision/mAP', 0),
('Precision/mAP@.50IOU', 1),
('Precision/mAP@.75IOU', 2),
('Precision/mAP (small)', 3),
('Precision/mAP (medium)', 4),
('Precision/mAP (large)', 5),
('Recall/AR@1', 6),
('Recall/AR@10', 7),
('Recall/AR@100', 8),
('Recall/AR@100 (small)', 9),
('Recall/AR@100 (medium)', 10),
('Recall/AR@100 (large)', 11)
)
def _ConvertBoxToCOCOFormat(box: np.ndarray) -> List[float]:
"""Converts a box in [ymin, xmin, ymax, xmax] format to COCO format.
This is a utility function for converting from our internal
[ymin, xmin, ymax, xmax] convention to the convention used by the COCO API
i.e., [xmin, ymin, width, height].
Args:
box: a [ymin, xmin, ymax, xmax] numpy array
Returns:
a list of floats representing [xmin, ymin, width, height]
"""
return [float(box[1]), float(box[0]), float(box[3] - box[1]),
float(box[2] - box[0])]
def ExportSingleImageGroundtruthToCoco(
image_id: Union[int, str],
next_annotation_id: int,
category_id_set: Collection[int],
groundtruth_boxes: np.ndarray,
groundtruth_classes: np.ndarray,
groundtruth_masks: np.ndarray,
groundtruth_is_crowd: Optional[np.ndarray] = None) -> List[Dict[str, Any]]:
"""Exports groundtruth of a single image to COCO format.
This function converts groundtruth detection annotations represented as numpy
arrays to dictionaries that can be ingested by the COCO evaluation API. Note
that the image_ids provided here must match the ones given to
ExportSingleImageDetectionsToCoco. We assume that boxes and classes are in
correspondence - that is: groundtruth_boxes[i, :], and
groundtruth_classes[i] are associated with the same groundtruth annotation.
In the exported result, "area" fields are always set to the foregorund area of
the mask.
Args:
image_id: a unique image identifier either of type integer or string.
next_annotation_id: integer specifying the first id to use for the
groundtruth annotations. All annotations are assigned a continuous integer
id starting from this value.
category_id_set: A set of valid class ids. Groundtruth with classes not in
category_id_set are dropped.
groundtruth_boxes: numpy array (float32) with shape [num_gt_boxes, 4]
groundtruth_classes: numpy array (int) with shape [num_gt_boxes]
groundtruth_masks: uint8 numpy array of shape [num_detections, image_height,
image_width] containing detection_masks.
groundtruth_is_crowd: optional numpy array (int) with shape [num_gt_boxes]
indicating whether groundtruth boxes are crowd.
Returns:
a list of groundtruth annotations for a single image in the COCO format.
Raises:
ValueError: if (1) groundtruth_boxes and groundtruth_classes do not have the
right lengths or (2) if each of the elements inside these lists do not
have the correct shapes or (3) if image_ids are not integers
"""
if len(groundtruth_classes.shape) != 1:
raise ValueError('groundtruth_classes is '
'expected to be of rank 1.')
if len(groundtruth_boxes.shape) != 2:
raise ValueError('groundtruth_boxes is expected to be of '
'rank 2.')
if groundtruth_boxes.shape[1] != 4:
raise ValueError('groundtruth_boxes should have '
'shape[1] == 4.')
num_boxes = groundtruth_classes.shape[0]
if num_boxes != groundtruth_boxes.shape[0]:
raise ValueError('Corresponding entries in groundtruth_classes, '
'and groundtruth_boxes should have '
'compatible shapes (i.e., agree on the 0th dimension).'
'Classes shape: %d. Boxes shape: %d. Image ID: %s' % (
groundtruth_classes.shape[0],
groundtruth_boxes.shape[0], image_id))
has_is_crowd = groundtruth_is_crowd is not None
if has_is_crowd and len(groundtruth_is_crowd.shape) != 1:
raise ValueError('groundtruth_is_crowd is expected to be of rank 1.')
groundtruth_list = []
for i in range(num_boxes):
if groundtruth_classes[i] in category_id_set:
iscrowd = groundtruth_is_crowd[i] if has_is_crowd else 0
segment = mask.encode(np.asfortranarray(groundtruth_masks[i]))
area = mask.area(segment)
export_dict = {
'id': next_annotation_id + i,
'image_id': image_id,
'category_id': int(groundtruth_classes[i]),
'bbox': list(_ConvertBoxToCOCOFormat(groundtruth_boxes[i, :])),
'segmentation': segment,
'area': area,
'iscrowd': iscrowd
}
groundtruth_list.append(export_dict)
return groundtruth_list
def ExportSingleImageDetectionMasksToCoco(
image_id: Union[int, str], category_id_set: Collection[int],
detection_masks: np.ndarray, detection_scores: np.ndarray,
detection_classes: np.ndarray) -> List[Dict[str, Any]]:
"""Exports detection masks of a single image to COCO format.
This function converts detections represented as numpy arrays to dictionaries
that can be ingested by the COCO evaluation API. We assume that
detection_masks, detection_scores, and detection_classes are in correspondence
- that is: detection_masks[i, :], detection_classes[i] and detection_scores[i]
are associated with the same annotation.
Args:
image_id: unique image identifier either of type integer or string.
category_id_set: A set of valid class ids. Detections with classes not in
category_id_set are dropped.
detection_masks: uint8 numpy array of shape [num_detections, image_height,
image_width] containing detection_masks.
detection_scores: float numpy array of shape [num_detections] containing
scores for detection masks.
detection_classes: integer numpy array of shape [num_detections] containing
the classes for detection masks.
Returns:
a list of detection mask annotations for a single image in the COCO format.
Raises:
ValueError: if (1) detection_masks, detection_scores and detection_classes
do not have the right lengths or (2) if each of the elements inside these
lists do not have the correct shapes or (3) if image_ids are not integers.
"""
if len(detection_classes.shape) != 1 or len(detection_scores.shape) != 1:
raise ValueError('All entries in detection_classes and detection_scores'
'expected to be of rank 1.')
num_boxes = detection_classes.shape[0]
if not num_boxes == len(detection_masks) == detection_scores.shape[0]:
raise ValueError('Corresponding entries in detection_classes, '
'detection_scores and detection_masks should have '
'compatible lengths and shapes '
'Classes length: %d. Masks length: %d. '
'Scores length: %d' % (
detection_classes.shape[0], len(detection_masks),
detection_scores.shape[0]
))
detections_list = []
for i in range(num_boxes):
if detection_classes[i] in category_id_set:
detections_list.append({
'image_id': image_id,
'category_id': int(detection_classes[i]),
'segmentation': mask.encode(np.asfortranarray(detection_masks[i])),
'score': float(detection_scores[i])
})
return detections_list
|
n = int(input())
mem = [[0, 0] for i in range(80)]
m1 = m2 = 0
m = 80
for i in range(n):
x = int(input())
p = x % m
y_min = mem[p][0]
y_max = mem[p][1]
if y_max and abs(x - y_max) > abs(m1 - m2):
m1 = x
m2 = y_max
if y_min and abs(x - y_min) > abs(m1 - m2):
m1 = x
m2 = y_min
if x < y_min or not y_min:
mem[p][0] = x
if x > y_max:
mem[p][1] = x
print(m1, m2)
|
from .catboost_ranker import CatBoostRanker
|
import numpy as np
# Answer for Part 2a)
def MLE_transition_parameters(train_dir = "data/ES/train"):
''' Calculates the transition parameters by count(y->x)/count(y)
:param train_dir: our train file path to either ES or RU
:type train_dir: str
:return: count_y_dict, Count(yi-1), keys are word '!', value MLE
:rtype: dict
:return: count_y_to_y_dict, Count(yi-1,yi), keys are tuples of word and label ('!', 'O'), value MLE
:rtype: dict
:return: transition_dict, Count(yi-1, yi)/Count(yi-1), keys are tuples of word and label ('!', 'O'), value MLE
:rtype: dict
'''
count_y_dict = {}
count_y_to_y_dict = {}
transition_dict = {}
prev_label = ""
with open(train_dir, "r", encoding="utf8") as f:
for line in f:
# Parse each line
if len(line.split(" ")) == 2:
word, label = line.replace("\n","").split(" ")
else:
label = ''
if label == '' and prev_label != '':
count_y_dict["STOP"] = count_y_dict.get("STOP") + 1 if count_y_dict.get("STOP") else 1
elif label !='':
if prev_label == '':
count_y_dict["START"] = count_y_dict.get("START") + 1 if count_y_dict.get("START") else 1
if label in count_y_dict:
count_y_dict[label] = count_y_dict.get(label)+1
else:
count_y_dict[label] = 1
if prev_label == '' and label != '':
if ("START", label) in count_y_to_y_dict:
count_y_to_y_dict[("START", label)] = count_y_to_y_dict.get(("START", label)) + 1
else:
count_y_to_y_dict[("START", label)] = 1
elif label == '' and prev_label != '':
if (prev_label, "STOP") in count_y_to_y_dict:
count_y_to_y_dict[(prev_label, "STOP")] = count_y_to_y_dict.get((prev_label, "STOP")) + 1
else:
count_y_to_y_dict[(prev_label, "STOP")] = 1
elif label != '' and prev_label != '':
if (prev_label, label) in count_y_to_y_dict:
count_y_to_y_dict[(prev_label, label)] = count_y_to_y_dict.get((prev_label, label)) + 1
else:
count_y_to_y_dict[(prev_label, label)] = 1
prev_label = label
# print("count(y): \n", count_y_dict, "\n")
# print("count(y->x): \n",list(count_y_to_y_dict.items()), len(count_y_to_y_dict), "\n")
# Calculate our transition
for key, value in count_y_to_y_dict.items(): # Default is iterate keys()
prev_label = key[0]
label = key[1]
prob = value / count_y_dict.get(prev_label)
transition_dict[key] = np.where(prob != 0, np.log(prob), float("-inf"))
# print("MLE: \n",list(transition_dict.items()), len(transition_dict) ,"\n")
return count_y_dict, count_y_to_y_dict, transition_dict
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def add_mask_transformer(self, temperature=.66, hard_sigmoid=(-.1, 1.1)):
"""
hard_sigmoid:
False: use sigmoid only
True: hard thresholding
(a, b): hard thresholding on rescaled sigmoid
"""
self.temperature = temperature
self.hard_sigmoid = hard_sigmoid
if hard_sigmoid is False:
self.transform = lambda x: torch.sigmoid(x / temperature)
elif hard_sigmoid is True:
self.transform = lambda x: F.hardtanh(
x / temperature, 0, 1)
else:
a, b = hard_sigmoid
self.transform = lambda x: F.hardtanh(
torch.sigmoid(x / temperature) * (b - a) + a, 0, 1)
def dconv_bn_relu(in_dim, out_dim):
return nn.Sequential(
nn.ConvTranspose2d(in_dim, out_dim, 5, 2,
padding=2, output_padding=1, bias=False),
nn.BatchNorm2d(out_dim),
nn.ReLU())
# Must sub-class ConvGenerator to provide transform()
class ConvGenerator(nn.Module):
def __init__(self, latent_size=128):
super().__init__()
dim = 64
self.l1 = nn.Sequential(
nn.Linear(latent_size, dim * 8 * 4 * 4, bias=False),
nn.BatchNorm1d(dim * 8 * 4 * 4),
nn.ReLU())
self.l2_5 = nn.Sequential(
dconv_bn_relu(dim * 8, dim * 4),
dconv_bn_relu(dim * 4, dim * 2),
dconv_bn_relu(dim * 2, dim),
nn.ConvTranspose2d(dim, self.out_channels, 5, 2,
padding=2, output_padding=1))
def forward(self, input):
net = self.l1(input)
net = net.view(net.shape[0], -1, 4, 4)
net = self.l2_5(net)
return self.transform(net)
class ConvDataGenerator(ConvGenerator):
def __init__(self, latent_size=128):
self.out_channels = 3
super().__init__(latent_size=latent_size)
self.transform = lambda x: torch.sigmoid(x)
class ConvMaskGenerator(ConvGenerator):
def __init__(self, latent_size=128, temperature=.66,
hard_sigmoid=(-.1, 1.1)):
self.out_channels = 1
super().__init__(latent_size=latent_size)
add_mask_transformer(self, temperature, hard_sigmoid)
|
import time
from typing import Any
class Cache:
''' Inner cache for registered callbacks '''
def __init__(self):
self.data = {}
def add(self, seq, webhook_url: str, pin: str, retention_sec: int, rand: str, context: Any) -> None:
'''
Saves data in a dictionary
: param seq: random id
: param webhook_url: user public key
: param context: serializable context
: param retention_sec: time period to keep data
'''
self.data[seq] = {
'webhook_url': webhook_url,
'pin': pin,
'context': context,
'rand': rand,
'timestamp': time.time(),
'retention_sec': retention_sec
}
def clean_obsolete(self):
obsolete_contexts = []
for k in list(self.data):
v = self.data[k]
if v['timestamp'] + v['retention_sec'] < time.time():
obsolete_contexts.append(v['context'])
del self.data[k]
return obsolete_contexts
def get(self, key):
return self.data.get(key)
def remove(self, key):
return self.data.pop(key)
|
from dataclasses import dataclass
from datetime import timedelta
from functools import lru_cache
from typing import Optional, Union, List
from bson import ObjectId
from extutils.dt import localtime
from extutils.linesticker import LineStickerUtils
from JellyBot import systemconfig
from flags import AutoReplyContentType, ModelValidityCheckResult
from models import OID_KEY
from models.exceptions import FieldKeyNotExistError
from models.utils import AutoReplyValidator
from extutils.utils import enumerate_ranking
from ._base import Model
from .field import (
ObjectIDField, TextField, AutoReplyContentTypeField, ModelField, ModelArrayField,
BooleanField, IntegerField, ArrayField, DateTimeField, ColorField, ModelDefaultValueExt
)
__all__ = ["AutoReplyContentModel", "AutoReplyModuleModel", "AutoReplyModuleExecodeModel", "AutoReplyModuleTagModel",
"AutoReplyTagPopularityScore", "UniqueKeywordCountEntry", "UniqueKeywordCountResult"]
@lru_cache(maxsize=1000)
def _content_to_str(content_type, content):
if content_type == AutoReplyContentType.TEXT:
return content
return f"({content_type.key} / {content})"
@lru_cache(maxsize=1000)
def _content_to_html(content_type, content):
if content_type == AutoReplyContentType.TEXT:
return content.replace("\n", "<br>").replace(" ", " ")
if content_type == AutoReplyContentType.IMAGE:
return f'<img src="{content}"/>'
if content_type == AutoReplyContentType.LINE_STICKER:
return f'<img src="{LineStickerUtils.get_sticker_url(content)}"/>'
return content
class AutoReplyContentModel(Model):
WITH_OID = False
Content = TextField(
"c", default=ModelDefaultValueExt.Required, maxlen=systemconfig.AutoReply.MaxContentLength,
allow_none=False, must_have_content=True)
ContentType = AutoReplyContentTypeField("t")
@property
def content_html(self):
return _content_to_html(self.content_type, self.content)
# noinspection PyAttributeOutsideInit
def perform_validity_check(self) -> ModelValidityCheckResult:
if self.content_type is None:
self.content_type = AutoReplyContentType.default()
if self.is_field_none("Content"):
return ModelValidityCheckResult.X_AR_CONTENT_EMPTY
valid = AutoReplyValidator.is_valid_content(self.content_type, self.content, online_check=False)
if not valid:
if self.content_type == AutoReplyContentType.IMAGE:
return ModelValidityCheckResult.X_AR_CONTENT_NOT_IMAGE
elif self.content_type == AutoReplyContentType.LINE_STICKER:
return ModelValidityCheckResult.X_AR_CONTENT_NOT_LINE_STICKER
return ModelValidityCheckResult.O_OK
def __str__(self):
return _content_to_str(self.content_type, self.content)
class AutoReplyModuleModel(Model):
key_kw = "kw"
# Main
Keyword = ModelField(key_kw, AutoReplyContentModel, default=ModelDefaultValueExt.Required)
Responses = ModelArrayField("rp", AutoReplyContentModel, default=ModelDefaultValueExt.Required,
max_len=systemconfig.AutoReply.MaxResponses)
KEY_KW_CONTENT = f"{key_kw}.{AutoReplyContentModel.Content.key}"
KEY_KW_TYPE = f"{key_kw}.{AutoReplyContentModel.ContentType.key}"
ChannelOid = ObjectIDField("ch", default=ModelDefaultValueExt.Required)
Active = BooleanField("at", default=True)
# Type
ReferTo = ObjectIDField("rid", allow_none=True, default=ModelDefaultValueExt.Optional)
# Record
CreatorOid = ObjectIDField("cr", stores_uid=True, default=ModelDefaultValueExt.Required)
RemoverOid = ObjectIDField("rmv", stores_uid=True, default=ModelDefaultValueExt.Optional)
# Property
Pinned = BooleanField("p")
Private = BooleanField("pr")
CooldownSec = IntegerField("cd", positive_only=True)
ExcludedOids = ArrayField("e", ObjectId, stores_uid=True)
TagIds = ArrayField("t", ObjectId)
# Stats
CalledCount = IntegerField("c")
LastUsed = DateTimeField("l", allow_none=True)
RemovedAt = DateTimeField("rm", allow_none=True)
@property
def refer_oid(self) -> Optional[ObjectId]:
try:
if self.is_reference:
return self.refer_to
else:
return None
except (KeyError, FieldKeyNotExistError, AttributeError):
return None
@property
def is_reference(self) -> bool:
try:
return not self.is_field_none("ReferTo")
except (KeyError, FieldKeyNotExistError, AttributeError):
return False
@property
def keyword_repr(self) -> str:
return f"{str(self.keyword)}"
@property
def created_at_expr(self) -> str:
"""
Expression of the module creation timestamp.
Used in module info displaying on the website.
"""
return localtime(self.id.generation_time).strftime("%Y-%m-%d %H:%M:%S")
@property
def last_used_expr(self) -> Optional[str]:
"""
Expression of the module last used timestamp.
Used in module info displaying on the website.
"""
if self.last_used:
return localtime(self.last_used).strftime("%Y-%m-%d %H:%M:%S")
else:
return None
@property
def removed_at_expr(self) -> Optional[str]:
"""
Expression of the module removal timestamp.
Used in module info displaying on the website.
"""
if self.removed_at:
return localtime(self.removed_at).strftime("%Y-%m-%d %H:%M:%S")
else:
return None
def can_be_used(self, current_time):
if self.last_used:
return current_time - self.last_used > timedelta(seconds=self.cooldown_sec)
else:
return True
class AutoReplyModuleExecodeModel(Model):
WITH_OID = False
Keyword = ModelField(AutoReplyModuleModel.key_kw, AutoReplyContentModel,
default=ModelDefaultValueExt.Required)
Responses = ModelArrayField("rp", AutoReplyContentModel, default=ModelDefaultValueExt.Required,
max_len=systemconfig.AutoReply.MaxResponses)
Pinned = BooleanField("p", readonly=True)
Private = BooleanField("pr", readonly=True)
CooldownSec = IntegerField("cd", readonly=True)
TagIds = ArrayField("t", ObjectId)
class AutoReplyModuleTagModel(Model):
Name = TextField("n", must_have_content=True, default=ModelDefaultValueExt.Required)
Color = ColorField("c")
@dataclass
class AutoReplyTagPopularityScore:
KEY_W_AVG_TIME_DIFF = "w_atd"
KEY_W_APPEARANCE = "w_app"
KEY_APPEARANCE = "app"
SCORE = "sc"
tag_id: ObjectId
score: float
appearances: int
weighted_avg_time_diff: float
weighted_appearances: float
@staticmethod
def parse(d: dict):
return AutoReplyTagPopularityScore(
tag_id=d[OID_KEY],
score=d[AutoReplyTagPopularityScore.SCORE],
appearances=d[AutoReplyTagPopularityScore.KEY_APPEARANCE],
weighted_avg_time_diff=d[AutoReplyTagPopularityScore.KEY_W_AVG_TIME_DIFF],
weighted_appearances=d[AutoReplyTagPopularityScore.KEY_W_APPEARANCE],
)
@dataclass
class UniqueKeywordCountEntry:
word: str
word_type: Union[int, AutoReplyContentType]
count_usage: int
count_module: int
rank: str
def __post_init__(self):
self.word_type = AutoReplyContentType.cast(self.word_type)
@property
def word_str(self):
return _content_to_str(self.word_type, self.word)
@property
def word_html(self):
"""
Get the keyword representation in HTML.
:return: keyword representation in HTML
"""
return _content_to_html(self.word_type, self.word)
class UniqueKeywordCountResult:
KEY_WORD = "w"
KEY_WORD_TYPE = "wt"
KEY_COUNT_USAGE = "cu"
KEY_COUNT_MODULE = "cm"
def __init__(self, crs, limit: Optional[int] = None):
self.data: List[UniqueKeywordCountEntry] = []
usage_key = UniqueKeywordCountResult.KEY_COUNT_USAGE
for rank, d in enumerate_ranking(crs, is_tie=lambda cur, prv: cur[usage_key] == prv[usage_key]):
self.data.append(
UniqueKeywordCountEntry(
word=d[OID_KEY][UniqueKeywordCountResult.KEY_WORD],
word_type=d[OID_KEY][UniqueKeywordCountResult.KEY_WORD_TYPE],
count_usage=d[UniqueKeywordCountResult.KEY_COUNT_USAGE],
count_module=d[UniqueKeywordCountResult.KEY_COUNT_MODULE],
rank=rank
)
)
self.limit = limit
|
#! ./venv/bin/python
from GitHubBatchRunner import GitHubBatchRunner
if __name__ == '__main__':
batch_file = "repo_names.json"
output_folder = r'./Output/all_issues_c_repositories'
github_user_name = 'user_name'
github_password = 'password'
log_flag = True
error_log_file_name = r'error_log.txt'
amend_result = True
temp_folder = None
waiting_between_request = 0
waiting_after_many_request = (-1, 600)
waiting_after_exception = 300
core_api_threshold = 100
github_batch_runner = GitHubBatchRunner(batch_file=batch_file, output_folder=output_folder,
github_user_name=github_user_name, github_password=github_password,
log_flag=log_flag, error_log_file_name=error_log_file_name,
amend_result=amend_result, temp_folder=temp_folder,
waiting_between_request=waiting_between_request,
waiting_after_many_request=waiting_after_many_request,
waiting_after_exception=waiting_after_exception,
core_api_threshold=core_api_threshold)
# github_batch_runner.run_batch()
# github_batch_runner.check_repos()
amend_batch_file = 'java_issue_amendment.json'
github_batch_runner.amend_issue(batch_file_address=amend_batch_file)
|
__all__ = ['vk']
from .vk import residual_screen, residual_screen_sphere
|
from flask_restful.reqparse import RequestParser
class aggregateValidate(object):
def validate(self):
valid = RequestParser(bundle_errors=True)
valid.add_argument("pipeline", required=True)
valid.add_argument("entity", required=True)
return valid.parse_args()
|
from bindings import Cloudevent
import bindings as b
import wasmtime
from cloudevents.http import CloudEvent, to_binary
def run(cloudevent: CloudEvent) -> None:
store = wasmtime.Store()
module = wasmtime.Module.from_file(store.engine, "crates/ce/target/wasm32-wasi/release/ce.wasm")
linker = wasmtime.Linker(store.engine)
linker.define_wasi()
wasi = wasmtime.WasiConfig()
wasi.inherit_stdout()
wasi.inherit_stderr()
store.set_wasi(wasi)
headers, body = to_binary(cloudevent)
wasm = b.WasiCe(store, linker, module)
event = Cloudevent.create(store, wasm)
event.set_id(store, headers["ce-id"])
event.set_source(store, headers["ce-source"])
event.set_type(store, headers["ce-type"])
event.set_specversion(store, headers["ce-specversion"])
event.set_time(store, headers["ce-time"])
event.set_data(store, body)
res = wasm.ce_handler(store, event)
assert event.get_id(store) == headers["ce-id"]
assert event.get_source(store) == headers["ce-source"]
assert event.get_type(store) == headers["ce-type"]
assert event.get_specversion(store) == headers["ce-specversion"]
assert event.get_time(store) == headers["ce-time"]
assert event.get_data(store) == body
event.drop(store)
res.value.drop(store)
if __name__ == "__main__":
# Create a CloudEvent
# - The CloudEvent "id" is generated if omitted. "specversion" defaults to "1.0".
attributes = {
"type": "com.microsoft.steelthread.wasm",
"source": "https://example.com/event-producer",
}
data = {"message": "Hello World!"}
event = CloudEvent(attributes, data)
run(event)
|
import numpy as np
from dipy.tracking.distances import (bundles_distances_mam,
bundles_distances_mdf)
if __name__ == '__main__':
np.random.seed(42)
filename_idxs = [0, 1]
embeddings = ['DR', 'FLIP']
ks = [5, 20, 40, 100]
nbs_points = [20, 64]
distance_thresholds = [20.0, 200.0]
distance_functions = [bundles_distances_mam, bundles_distances_mdf]
for filename_idx in filename_idxs:
for embedding in embeddings:
for k in ks:
for nb_points in nbs_points:
for distance_threshold in distance_thresholds:
for distance_function in distance_functions:
print("EXPERIMENT BEGINS")
experiment(filename_idx, embedding, k, distance_function, nb_points, distance_threshold)
print("EXPERIMENT ENDS")
print("")
print("")
print("")
print("")
|
#flask testing
from flask import Flask, render_template, request, redirect, url_for, send_file, send_from_directory
from Tkinter import * #Needed for the GUI portion
import Tkinter as tk
import os #Needed for the GUI portion
import shlex, subprocess #Needed to call on the C program
from email.mime.multipart import MIMEMultipart #Needed to email user
from email.mime.text import MIMEText #Needed to email user
from werkzeug.utils import secure_filename
import smtplib
from string import Template
MY_ADDRESS = 'example@gmail.com' #need to setup an email account for this
PASSWORD = '####'
output_file=""
UPLOAD_FOLDER = '/Users/Team_HoLab/Desktop/RSC/Documents/Coding/Zhunt/uploads'
ALLOWED_EXTENSIONS = {'txt', 'fasta'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
#f=open("./uploads/"+filename)
email=request.form.get("user_email")
command_line = "zhunt 12 6 12 ./uploads/" + filename
args = shlex.split(command_line)
p = subprocess.Popen(args)
tmp=subprocess.call("./a.out")
# message_template = "Your Zhunt run is complete"
#
# # set up the SMTP server
# s = smtplib.SMTP(host='smtp.gmail.com', port=587)
# s.starttls()
# s.login(MY_ADDRESS, PASSWORD)
#
# msg = MIMEMultipart() # create a message
#
# # setup the parameters of the message
# msg['From']=MY_ADDRESS
# msg['To']=request.form.get("user_email")
# msg['Subject']="Zhunt Run Complete"
#
# # add in the message body
# msg.attach(MIMEText(message_template, 'plain'))
# message = 'Subject: {}\n\n{}'.format("Zhunt Run Complete", message_template)
# # send the message via the server set up earlier.
# s.sendmail(MY_ADDRESS,request.form.get("user_email"),message)
# del msg
#
# # Terminate the SMTP session and close the connection
# s.quit()
output_file="/uploads/"+filename+".Z-SCORE"
return render_template("downloads.html",output_file=output_file)
return '''
<!doctype html>
<title>Zhunt</title>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
<style>
body{
padding:15px;
}
</style>
<h1>Zhunt</h1>
<body>
<p>
<br>Welcome to Zhunt.<br>
<br>
Please upload the .fasta file you would like to analyze and your email address to get an email after run completion.<br>
</p>
<form method=post enctype=multipart/form-data>
FASTA File: <input type=file name=file></br>
Email: <input type="text" name="user_email" /></br></br>
<input type=submit value=Submit>
</form>
</body>
'''
@app.route('/return-file/', methods=["POST"])
def downloadFile ():
#For windows you need to use drive name [ex: F:/Example.pdf]
filename = request.form['output_file']
print(filename)
path = "/Users/Team_HoLab/Desktop/RSC/Documents/Coding/Zhunt" + filename
return send_file(path, as_attachment=True)
|
# -*- coding: utf-8 -*-
"""
Testing class for record progress endpoints of the Castor EDC API Wrapper.
Link: https://data.castoredc.com/api#/record-progress
@author: R.C.A. van Linschoten
https://orcid.org/0000-0003-3052-596X
"""
import pytest
from castoredc_api.tests.test_api_endpoints.data_models import (
record_progress_model,
steps_model,
)
class TestRecordProgress:
record_progress_keys = record_progress_model.keys()
steps_keys = steps_model.keys()
@pytest.fixture(scope="class")
def progress_report(self, client):
"""Get all progress reports from the study."""
progress_report = client.record_progress()
return progress_report
def test_record_progress(self, progress_report, client):
"""Tests if all progress reports are properly retrieved."""
# Tests if progress reports are retrieved for all non-archived recodrs
assert len(progress_report) == len(client.all_records(archived=0))
for record in progress_report:
api_record_keys = record.keys()
# Tests if the models are of the same length
assert len(api_record_keys) == len(self.record_progress_keys)
# Tests if the same same keys and type of values are retrieved for the record.
for key in api_record_keys:
assert key in self.record_progress_keys
assert type(record[key]) in record_progress_model[key]
# Tests if the same same keys and type of values are retrieved for the steps within a record.
for step in record["steps"]:
api_step_keys = step.keys()
assert len(api_step_keys) == len(self.steps_keys)
for step_key in api_step_keys:
assert step_key in self.steps_keys
assert type(step[step_key]) in steps_model[step_key]
|
# An implementation of Memory game
# (c) gengwg [at] gmail com
try:
import simplegui
except ImportError:
import SimpleGUICS2Pygame.simpleguics2pygame as simplegui
import random
# cards
deck = []
# exposed lists. if True, expose card. Else Green rectangle.
exposed = []
# define event handlers
def new_game():
"""reinitiate game global variables"""
global state, deck, exposed, click1, click2, turn
state = 0
# two global variables to store the index of each of the two cards
# that were clicked in the previous turn.
click1 = 0
click2 = 0
turn = 0
deck = 2 * range(8)
random.shuffle(deck)
exposed = [False for _ in deck]
label.set_text("Number of Tries: " + str(turn))
def mouseclick(pos):
"""handler of mouse click"""
global state, click_index, click1, click2, turn
click_index = pos[0] / 50
if not exposed[click_index]:
if state == 0:
state = 1
click1 = click_index
exposed[click_index] = True
elif state == 1:
state = 2
exposed[click_index] = True
click1 = click_index
else:
state = 1
turn += 1
exposed[click_index] = True
if deck[click1] != deck[click2]:
exposed[click1] = False
exposed[click2] = False
click2 = click_index
label.set_text("Number of Tries: " + str(turn))
def draw(canvas):
"""draw handler"""
for i, num in enumerate(deck):
if exposed[i]:
canvas.draw_text(str(num), [50 * i + 25, 62], 48, "White")
else:
canvas.draw_polygon([(50 * i, 0), (50 * (i + 1), 0),
(50 * (i + 1), 100), (50 * i, 100)],
2, 'White', 'Green')
# create frame and add a button and labels
frame = simplegui.create_frame("Memory", 800, 100)
frame.add_button("Restart", new_game, 200)
# register event handlers
frame.set_draw_handler(draw)
frame.set_mouseclick_handler(mouseclick)
label = frame.add_label("", 200)
# get things rolling
new_game()
frame.start()
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2007 Alec Thomas <alec@swapoff.org>
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
"""CLY and readline, together at last.
This module uses readline's line editing and tab completion along with CLY's
grammar parser to provide an interactive command line environment.
It includes support for application specific history files, dynamic prompt,
customisable completion key, interactive help and more.
Press ``?`` at any location to contextual help.
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import input
from builtins import object
import os
import sys
import readline
import cly.rlext
import cly.console as console
from cly.exceptions import Error, ParseError
from cly.builder import Grammar
from cly.parser import Parser
__all__ = ['Interact', 'interact']
__docformat__ = 'restructuredtext en'
class Interact(object):
"""CLY interaction through readline. Due to readline limitations, only one
Interact object can be active within an application.
Constructor arguments:
``parser``: ``Parser`` or ``Grammar`` object
The parser/grammar to use for interaction.
``application='cly'``: string
The application name. Used to construct the history file name and
prompt, if not provided.
``prompt=None``: string
The prompt.
``user_context=None``: `anything`
A user-specified object to pass to the parser. The parser builds each
parse ``Context`` with this object, which in turn will deliver this
object on to terminal nodes that have set ``with_context=True``.
``with_context=False``: `boolean`
Force ``user_context`` to be passed to all action nodes, unless they
explicitly set the member variable ``with_context=False``.
``history_file=None``: `string`
Defaults to ``~/.<application>_history``.
``history_length=500``: `integer`
Lines of history to keep.
``completion_key='tab'``: `string`
Key to use for completion, per the readline documentation.
``completion_delimiters=' \t'``: `string`
Characters that terminate completion.
``help_key='?'``: `key`
Key to use for tab completion.
"""
_cli_inject_text = ''
_completion_candidates = []
_parser = None
prompt = None
user_context = None
history_file = None
application = None
def __init__(self, grammar_or_parser, application='cly', prompt=None,
user_context=None, with_context=None, history_file=None,
history_length=500, completion_key='tab',
completion_delimiters=' \t',
help_key='?', inhibit_exceptions=False,
with_backtrace=False):
if prompt is None:
prompt = application + '> '
if history_file is None:
history_file = os.path.expanduser('~/.%s_history' % application)
if isinstance(grammar_or_parser, Grammar):
parser = Parser(grammar_or_parser)
else:
parser = grammar_or_parser
if with_context is not None:
parser.with_context = with_context
if user_context is not None:
parser.user_context = user_context
Interact._parser = parser
Interact.prompt = prompt
Interact.application = application
Interact.user_context = user_context
Interact.history_file = history_file
Interact.history_length = history_length
Interact.completion_delimiters = completion_delimiters
Interact.completion_key = completion_key
try:
readline.set_history_length(history_length)
readline.read_history_file(history_file)
except:
pass
readline.parse_and_bind("%s: complete" % completion_key)
readline.set_completer_delims(self.completion_delimiters)
readline.set_completer(Interact._cli_completion)
readline.set_startup_hook(Interact._cli_injector)
# Use custom readline extensions
cly.rlext.bind_key(ord(help_key), Interact._cli_help)
def once(self, default_text='', callback=None):
"""Input one command from the user and return the result of the
executed command. `callback` is called with the Interact object before
each line is displayed."""
Interact._cli_inject_text = default_text
while True:
command = ''
try:
command = input(self.prompt)
except KeyboardInterrupt:
print()
continue
except EOFError:
print()
return None
try:
context = Interact._parser.parse(command, user_context=self.user_context)
context.execute()
except ParseError as e:
self.print_error(context, e)
return context
def loop(self, inhibit_exceptions=False, with_backtrace=False):
"""Repeatedly read and execute commands from the user.
Arguments:
``inhibit_exceptions=True``: `boolean`
Normally, ``interact_loop`` will pass exceptions back to the caller for
handling. Setting this to ``True`` will cause an error message to
be printed, but interaction will continue.
``with_backtrace=False``: `boolean`
Whether to print a full backtrace when ``inhibit_exceptions=True``.
"""
try:
while True:
try:
if not self.once():
break
except Exception as e:
if inhibit_exceptions:
if with_backtrace:
import traceback
console.cerror(traceback.format_exc())
else:
console.cerror('error: %s' % e)
else:
raise
finally:
self.write_history()
def print_error(self, context, e):
"""Called by `once()` to print a ParseError."""
candidates = [help[1] for help in context.help()]
if len(candidates) > 1:
message = '%s (candidates are %s)'
else:
message = '%s (expected %s)'
message = message % (str(e), ', '.join(candidates))
self.error_at_cursor(context, message)
def error_at_cursor(self, context, text):
"""Attempt to intelligently print an error at the current cursor
offset."""
text = str(text)
term_width = console.termwidth()
indent = ' ' * (context.cursor % term_width
+ len(Interact.prompt))
if len(indent + text) > term_width:
console.cerror(indent + '^')
console.cerror(text)
else:
console.cerror(indent + '^ ' + text)
def write_history(self):
""" Write command line history out. """
try:
readline.write_history_file(self.history_file)
except:
pass
@staticmethod
def _dump_traceback(exception):
import traceback
from io import StringIO
out = StringIO()
traceback.print_exc(file=out)
print(str(exception), file=sys.stderr)
print(out.getvalue(), file=sys.stderr)
@staticmethod
def _cli_injector():
readline.insert_text(Interact._cli_inject_text)
Interact._cli_inject_text = ''
@staticmethod
def _cli_completion(text, state):
line = readline.get_line_buffer()[0:readline.get_begidx()]
ctx = None
try:
result = Interact._parser.parse(line)
if not state:
Interact._completion_candidates = list(result.candidates(text))
if Interact._completion_candidates:
return Interact._completion_candidates.pop()
return None
except cly.Error:
return None
except Exception as e:
Interact._dump_traceback(e)
cly.rlext.force_redisplay()
raise
@staticmethod
def _cli_help(key, count):
try:
command = readline.get_line_buffer()[:cly.rlext.cursor()]
context = Interact._parser.parse(command)
if context.remaining.strip():
print()
candidates = [help[1] for help in context.help()]
text = '%s^ invalid token (candidates are %s)' % \
(' ' * (context.cursor + len(Interact.prompt)),
', '.join(candidates))
console.cerror(text)
cly.rlext.force_redisplay()
return
help = context.help()
print()
help.format(sys.stdout)
cly.rlext.force_redisplay()
return 0
except Exception as e:
Interact._dump_traceback(e)
cly.rlext.force_redisplay()
return 0
def interact(grammar_or_parser, inhibit_exceptions=False, with_backtrace=False,
*args, **kwargs):
"""Start an interactive session with the given grammar or parser object."""
interact = Interact(grammar_or_parser, *args, **kwargs)
interact.loop(inhibit_exceptions=inhibit_exceptions,
with_backtrace=with_backtrace)
|
# -*- coding: utf-8 -*-
"""Language ISO codes
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
"""
from os import path
import re
__all__ = [
'CODES_FILE',
'iso_codes'
]
# default model location
CODES_FILE = path.join(path.dirname(__file__), 'data', 'lang_iso_codes.csv')
with open(CODES_FILE, 'r') as f:
data = f.readlines()
iso_codes = dict()
for row in data:
row = re.sub('\s*', '', row)
cols = row.split(',')
iso_codes[cols[1]] = cols[0]
if __name__ == '__main__':
print(iso_codes)
|
import torch
from torch.fx.graph import Node
from .pattern_utils import (
register_fusion_pattern,
)
from .utils import _parent_name
from .quantization_types import QuantizerCls
from ..fuser_method_mappings import get_fuser_method
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict
# ---------------------
# Fusion Pattern Registrations
# ---------------------
# Base Pattern Handler
class FuseHandler(ABC):
""" Base handler class for the fusion patterns
"""
def __init__(self, quantizer: QuantizerCls, node: Node):
pass
@abstractmethod
def fuse(self, quantizer: QuantizerCls, load_arg: Callable,
fuse_custom_config_dict: Dict[str, Any]) -> Node:
pass
@register_fusion_pattern((torch.nn.ReLU, torch.nn.Conv1d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.Conv2d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.Conv3d))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.Conv1d))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.Conv2d))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.Conv3d))
@register_fusion_pattern((torch.nn.BatchNorm1d, torch.nn.Conv1d))
@register_fusion_pattern((torch.nn.BatchNorm2d, torch.nn.Conv2d))
@register_fusion_pattern((torch.nn.BatchNorm3d, torch.nn.Conv3d))
@register_fusion_pattern((torch.nn.ReLU, (torch.nn.BatchNorm1d, torch.nn.Conv1d)))
@register_fusion_pattern((torch.nn.ReLU, (torch.nn.BatchNorm2d, torch.nn.Conv2d)))
@register_fusion_pattern((torch.nn.ReLU, (torch.nn.BatchNorm3d, torch.nn.Conv3d)))
@register_fusion_pattern((torch.nn.functional.relu, (torch.nn.BatchNorm1d, torch.nn.Conv1d)))
@register_fusion_pattern((torch.nn.functional.relu, (torch.nn.BatchNorm2d, torch.nn.Conv2d)))
@register_fusion_pattern((torch.nn.functional.relu, (torch.nn.BatchNorm3d, torch.nn.Conv3d)))
@register_fusion_pattern((torch.nn.BatchNorm1d, torch.nn.Linear))
class ConvOrLinearBNReLUFusion(FuseHandler):
def __init__(self, quantizer: QuantizerCls, node: Node):
super().__init__(quantizer, node)
self.relu_node = None
self.bn_node = None
if (node.op == 'call_function' and node.target is torch.nn.functional.relu) or \
(node.op == 'call_module' and type(quantizer.modules[node.target]) == torch.nn.ReLU):
self.relu_node = node
assert isinstance(node.args[0], Node)
node = node.args[0]
assert node.op == 'call_module'
if type(quantizer.modules[node.target]) in [torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d]:
self.bn_node = node
self.bn = quantizer.modules[self.bn_node.target]
assert isinstance(node.args[0], Node)
node = node.args[0]
assert node.op == 'call_module'
self.conv_or_linear_node = node
self.conv_or_linear = quantizer.modules[self.conv_or_linear_node.target]
def fuse(self, quantizer: QuantizerCls, load_arg: Callable,
fuse_custom_config_dict: Dict[str, Any]) -> Node:
additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
op_list = []
if self.relu_node is not None:
# since relu can be used multiple times, we'll need to create a relu module for each match
if self.relu_node.op == 'call_module':
relu = torch.nn.ReLU(quantizer.modules[self.relu_node.target].inplace)
else:
# TODO: get inplace argument from functional
relu = torch.nn.ReLU()
op_list.append(relu)
relu.training = self.conv_or_linear.training
if self.bn_node is not None:
op_list.append(self.bn)
op_list.append(self.conv_or_linear)
else:
assert self.bn_node is not None
op_list.append(self.bn)
op_list.append(self.conv_or_linear)
# the modules are added in order of relu - bn - conv_or_linear
# so we need to correct it
op_list.reverse()
op_type_list = tuple(type(m) for m in op_list)
conv_or_linear_parent_name, conv_or_linear_name = _parent_name(self.conv_or_linear_node.target)
fuser_method = get_fuser_method(op_type_list, additional_fuser_method_mapping)
if fuser_method is None:
raise NotImplementedError("Cannot fuse modules: {}".format(op_type_list))
fused = fuser_method(*op_list)
setattr(quantizer.modules[conv_or_linear_parent_name], conv_or_linear_name, fused)
# TODO: do we need to make sure bn is only used once?
if self.bn_node is not None:
parent_name, name = _parent_name(self.bn_node.target)
setattr(quantizer.modules[parent_name], name, torch.nn.Identity())
# relu may be used multiple times, so we don't set relu to identity
return quantizer.fused_graph.node_copy(self.conv_or_linear_node, load_arg)
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.Linear))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.Linear))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.BatchNorm2d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.BatchNorm2d))
@register_fusion_pattern((torch.nn.functional.relu, torch.nn.BatchNorm3d))
@register_fusion_pattern((torch.nn.ReLU, torch.nn.BatchNorm3d))
class ModuleReLUFusion(FuseHandler):
def __init__(self, quantizer: QuantizerCls, node: Node):
super().__init__(quantizer, node)
self.relu_node = node
assert isinstance(node.args[0], Node)
node = node.args[0]
assert node.op == 'call_module'
self.module_node = node
self.module = quantizer.modules[self.module_node.target]
def fuse(self, quantizer: QuantizerCls, load_arg: Callable,
fuse_custom_config_dict: Dict[str, Any]) -> Node:
additional_fuser_method_mapping = fuse_custom_config_dict.get("additional_fuser_method_mapping", {})
op_list = []
# since relu can be used multiple times, we'll need to create a relu module for each match
if self.relu_node.op == 'call_module':
relu = torch.nn.ReLU(quantizer.modules[self.relu_node.target].inplace)
else:
# TODO: get inplace argument from functional
relu = torch.nn.ReLU()
relu.training = self.module.training
op_list.append(relu)
op_list.append(self.module)
op_list.reverse()
op_type_list = tuple(type(m) for m in op_list)
module_parent_name, module_name = _parent_name(self.module_node.target)
fuser_method = get_fuser_method(op_type_list, additional_fuser_method_mapping)
setattr(quantizer.modules[module_parent_name], module_name, fuser_method(*op_list))
return quantizer.fused_graph.node_copy(self.module_node, load_arg)
|
# -*- coding: utf-8 -*-
"""
Created on Nov 07, 2014
@author: Tyranic-Moron
"""
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from CommandInterface import CommandInterface
from string import maketrans
class Flip(CommandInterface):
triggers = ['flip']
help = 'flip <text> - flips the text given to it'
def onLoad(self):
table = {
u'a': u'ɐ', u'A': u'∀',
u'b': u'q', u'B': u'ᗺ',
u'c': u'ɔ', u'C': u'Ↄ',
u'd': u'p', u'D': u'◖',
u'e': u'ǝ', u'E': u'Ǝ',
u'f': u'ɟ', u'F': u'Ⅎ',
u'g': u'ƃ', u'G': u'⅁',
u'h': u'ɥ', u'H': u'H',
u'i': u'ı', u'I': u'I',
u'j': u'ɾ', u'J': u'ſ',
u'k': u'ʞ', u'K': u'⋊',
u'l': u'ʃ', u'L': u'⅂',
u'm': u'ɯ', u'M': u'W',
u'n': u'u', u'N': u'ᴎ',
u'o': u'o', u'O': u'O',
u'p': u'd', u'P': u'Ԁ',
u'q': u'b', u'Q': u'Ό',
u'r': u'ɹ', u'R': u'ᴚ',
u's': u's', u'S': u'S',
u't': u'ʇ', u'T': u'⊥',
u'u': u'n', u'U': u'∩',
u'v': u'ʌ', u'V': u'ᴧ',
u'w': u'ʍ', u'W': u'M',
u'x': u'x', u'X': u'X',
u'y': u'ʎ', u'Y': u'⅄',
u'z': u'z', u'Z': u'Z',
u'0': u'0',
u'1': u'⇂',
u'2': u'ᘔ',
u'3': u'Ɛ',
u'4': u'ᔭ',
u'5': u'5',
u'6': u'9',
u'7': u'Ɫ',
u'8': u'8',
u'9': u'6',
u'.': u'˙',
u',': u"'",
u"'": u',',
u'"': u'„',
u'!': u'¡',
u'?': u'¿',
u'<': u'>',
u'(': u')',
u'[': u']',
u'{': u'}',
u'_': u'‾',
u'^': u'∨',
u';': u'؛',
u'&': u'⅋',
u'⁅': u'⁆',
u'∴': u'∵',
u'‿': u'⁀',
}
# Create and append the inverse dictionary
table.update({v: k for k,v in table.iteritems()})
self.translation = {ord(k): v for k,v in table.iteritems()}
def execute(self, message):
"""
@type message: IRCMessage
"""
if len(message.ParameterList) > 0:
translated = message.Parameters.translate(self.translation)
reversed = translated[::-1]
return IRCResponse(ResponseType.Say, reversed, message.ReplyTo)
else:
return IRCResponse(ResponseType.Say, 'Flip what?', message.ReplyTo)
|
import sys
from django.core import serializers
from django.core.management.base import NoArgsCommand
from django.db.models import get_apps, get_models
class Command(NoArgsCommand):
help = 'Dump a common serialized version of the database to stdout.'
def handle_noargs(self, **options):
models = []
for app in get_apps():
models.extend(get_models(app))
OBJECT_LIMIT = 150
serializer = serializers.get_serializer("json")()
totalobjs = 0
for model in models:
totalobjs += model.objects.count()
prev_pct = -1
i = 0
sys.stderr.write("Dump the database. This may take a while...\n")
print "# dbdump v1 - %s objects" % totalobjs
for model in models:
count = model.objects.count()
j = 0
while j < count:
for obj in model.objects.all()[j:j+OBJECT_LIMIT].iterator():
value = serializer.serialize([obj])
if value != "[]":
print value[1:-1] # Skip the "[" and "]"
i += 1
pct = i * 100 / totalobjs
if pct != prev_pct:
sys.stderr.write(" [%s%%]\r" % pct)
sys.stderr.flush()
prev_pct = pct
j += OBJECT_LIMIT
sys.stderr.write("\nDone.\n")
|
# 80 ms ; faster than 85.96 %
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
i = 0
j = len(nums)
left = -1
right = -1
while i<j:
mid = (i+j)//2
if nums[mid] == target:
if mid == 0 or (mid>0 and nums[mid-1] != target):
left = mid
break
else:
j = mid
elif nums[mid] < target:
i = mid+1
else:
j = mid
if left == -1:
return [-1,-1]
i = 0
j = len(nums)
while i<j:
mid = (i+j)//2
if nums[mid] == target:
if mid == len(nums)-1 or (mid < len(nums)-1 and nums[mid+1] != target):
right = mid
break
else:
i = mid+1
elif nums[mid] < target:
i = mid+1
else:
j = mid
return [left,right]
|
"""
Use secrets in a task
----------------------
This example explains how a secret can be accessed in a Flyte Task. Flyte provides different types of Secrets, as part of
SecurityContext. But, for users writing python tasks, you can only access ``secure secrets`` either as environment variable
or injected into a file.
"""
import os
import flytekit
# %%
# Flytekit exposes a type/class called Secrets. It can be imported as follows.
from flytekit import Secret, task, workflow
# %%
# Secrets consists of a name and an enum that indicates how the secrets will be accessed. If the mounting_requirement is
# not specified then the secret will be injected as an environment variable is possible. Ideally, you need not worry
# about the mounting requirement, just specify the ``Secret.name`` that matches the declared ``secret`` in Flyte backend
#
# Let us declare a secret named user_secret in a secret group ``user-info``. A secret group can have multiple secret
# associated with the group. Optionally it may also have a group_version. The version helps in rotating secrets. If not
# specified the task will always retrieve the latest version. Though not recommended some users may want the task
# version to be bound to a secret version.
SECRET_NAME = "user_secret"
SECRET_GROUP = "user-info"
# %%
# Now declare the secret in the requests. The secret can be accessed using the :py:class:`flytekit.ExecutionParameters`,
# through the global flytekit context as shown below
#
@task(secret_requests=[Secret(group=SECRET_GROUP, key=SECRET_NAME)])
def secret_task() -> str:
secret_val = flytekit.current_context().secrets.get(SECRET_GROUP, SECRET_NAME)
# Please do not print the secret value, we are doing so just as a demonstration
print(secret_val)
return secret_val
# %%
# .. note::
#
# - In case of failure to access the secret (it is not found at execution time) an error is raised.
# - Secrets group and key are required parameters during declaration and usage. Failure to specify will cause a
# :py:class:`ValueError`
#
# In some cases you may have multiple secrets and sometimes, they maybe grouped as one secret in the SecretStore.
# For example, In Kubernetes secrets, it is possible to nest multiple keys under the same secret.
# Thus in this case the name would be the actual name of the nested secret, and the group would be the identifier for
# the kubernetes secret.
#
# As an example, let us define 2 secrets username and password, defined in the group user_info
USERNAME_SECRET = "username"
PASSWORD_SECRET = "password"
# %%
# The Secret structure allows passing two fields, matching the key and the group, as previously described:
@task(
secret_requests=[Secret(key=USERNAME_SECRET, group=SECRET_GROUP), Secret(key=PASSWORD_SECRET, group=SECRET_GROUP)])
def user_info_task() -> (str, str):
secret_username = flytekit.current_context().secrets.get(SECRET_GROUP, USERNAME_SECRET)
secret_pwd = flytekit.current_context().secrets.get(SECRET_GROUP, PASSWORD_SECRET)
# Please do not print the secret value, this is just a demonstration.
print(f"{secret_username}={secret_pwd}")
return secret_username, secret_pwd
# %%
# It is also possible to enforce Flyte to mount the secret as a file or an environment variable.
# The File type is useful This is for large secrets that do not fit in environment variables - typically asymmetric
# keys (certs etc). Another reason may be that a dependent library necessitates that the secret be available as a file.
# In these scenarios you can specify the mount_requirement. In the following example we force the mounting to be
# and Env variable
@task(secret_requests=[Secret(group=SECRET_GROUP, key=SECRET_NAME, mount_requirement=Secret.MountType.ENV_VAR)])
def secret_file_task() -> (str, str):
# SM here is a handle to the secrets manager
sm = flytekit.current_context().secrets
f = sm.get_secrets_file(SECRET_GROUP, SECRET_NAME)
secret_val = sm.get(SECRET_GROUP, SECRET_NAME)
# returning the filename and the secret_val
return f, secret_val
# %%
# You can use these tasks in your workflow as usual
@workflow
def my_secret_workflow() -> (str, str, str, str, str):
x = secret_task()
y, z = user_info_task()
f, s = secret_file_task()
return x, y, z, f, s
# %%
# Simplest way to test the secret accessibility is to export the secret as an environment variable. There are some
# helper methods available to do so
from flytekit.testing import SecretsManager
if __name__ == "__main__":
sec = SecretsManager()
os.environ[sec.get_secrets_env_var(SECRET_GROUP, SECRET_NAME)] = "value"
os.environ[sec.get_secrets_env_var(SECRET_GROUP, USERNAME_SECRET)] = "username_value"
os.environ[sec.get_secrets_env_var(SECRET_GROUP, PASSWORD_SECRET)] = "password_value"
x, y, z, f, s = my_secret_workflow()
assert x == "value"
assert y == "username_value"
assert z == "password_value"
assert f == sec.get_secrets_file(SECRET_GROUP, SECRET_NAME)
assert s == "value"
|
import os
from typing import Generator
import asyncpg
import pytest
from ddtrace import Pin
from ddtrace import tracer
from ddtrace.contrib.asyncpg import patch
from ddtrace.contrib.asyncpg import unpatch
from ddtrace.contrib.trace_utils import iswrapped
from tests.contrib.config import POSTGRES_CONFIG
@pytest.fixture(autouse=True)
def patch_asyncpg():
# type: () -> Generator[None, None, None]
patch()
yield
unpatch()
@pytest.fixture
async def patched_conn():
# type: () -> Generator[asyncpg.Connection, None, None]
conn = await asyncpg.connect(
host=POSTGRES_CONFIG["host"],
port=POSTGRES_CONFIG["port"],
user=POSTGRES_CONFIG["user"],
database=POSTGRES_CONFIG["dbname"],
password=POSTGRES_CONFIG["password"],
)
yield conn
await conn.close()
@pytest.mark.asyncio
async def test_connect(snapshot_context):
with snapshot_context():
conn = await asyncpg.connect(
host=POSTGRES_CONFIG["host"],
port=POSTGRES_CONFIG["port"],
user=POSTGRES_CONFIG["user"],
database=POSTGRES_CONFIG["dbname"],
password=POSTGRES_CONFIG["password"],
)
await conn.close()
# Using dsn should result in the same trace
with snapshot_context():
conn = await asyncpg.connect(
dsn="postgresql://%s:%s@%s:%s/%s"
% (
POSTGRES_CONFIG["user"],
POSTGRES_CONFIG["password"],
POSTGRES_CONFIG["host"],
POSTGRES_CONFIG["port"],
POSTGRES_CONFIG["dbname"],
)
)
await conn.close()
@pytest.mark.asyncio
@pytest.mark.snapshot(
ignores=["meta.error.stack", "meta.error.msg", "meta.error.type"]
) # stack is noisy between releases
async def test_bad_connect():
with pytest.raises(OSError):
await asyncpg.connect(
host="localhost",
port=POSTGRES_CONFIG["port"] + 1,
)
@pytest.mark.asyncio
@pytest.mark.snapshot
async def test_connection_methods(patched_conn):
status = await patched_conn.execute(
"""
CREATE TEMP TABLE test (id serial PRIMARY KEY, name varchar(12) NOT NULL UNIQUE);
"""
)
assert status == "CREATE TABLE"
status = await patched_conn.executemany(
"""
INSERT INTO test (name) VALUES ($1), ($2), ($3);
""",
[["val1", "val2", "val3"]],
)
assert status is None
records = await patched_conn.fetch("SELECT * FROM test;")
assert len(records) == 3
val = await patched_conn.fetchval("SELECT * FROM test LIMIT 1;", column=1)
assert val == "val1"
row = await patched_conn.fetchrow("SELECT * FROM test LIMIT 1;")
assert len(row) == 2
assert row["name"] == "val1"
@pytest.mark.asyncio
@pytest.mark.snapshot
async def test_select(patched_conn):
ret = await patched_conn.fetchval("SELECT 1")
assert ret == 1
@pytest.mark.asyncio
@pytest.mark.snapshot(ignores=["meta.error.stack"]) # stack is noisy between releases
async def test_bad_query(patched_conn):
with pytest.raises(asyncpg.exceptions.PostgresSyntaxError):
await patched_conn.execute("malformed; query;dfaskjfd")
@pytest.mark.asyncio
@pytest.mark.snapshot
async def test_cursor(patched_conn):
await patched_conn.execute(
"""
CREATE TEMP TABLE test (id serial PRIMARY KEY, name varchar(12) NOT NULL UNIQUE);
"""
)
await patched_conn.execute(
"""
INSERT INTO test (name) VALUES ($1), ($2);
""",
"value1",
"value2",
)
records = []
async with patched_conn.transaction():
async for r in patched_conn.cursor("SELECT * FROM test;"):
records.append(r["name"])
assert records == ["value1", "value2"]
@pytest.mark.asyncio
@pytest.mark.snapshot
async def test_cursor_manual(patched_conn):
async with patched_conn.transaction():
cur = await patched_conn.cursor("SELECT generate_series(0, 100)")
await cur.forward(10)
await cur.fetchrow()
await cur.fetch(5)
@pytest.mark.asyncio
@pytest.mark.snapshot
async def test_service_override_pin(patched_conn):
Pin.override(patched_conn, service="custom-svc")
await patched_conn.execute("SELECT 1")
@pytest.mark.asyncio
@pytest.mark.snapshot
async def test_parenting(patched_conn):
with tracer.trace("parent"):
await patched_conn.execute("SELECT 1")
with tracer.trace("parent2"):
c = patched_conn.execute("SELECT 1")
await c
@pytest.mark.snapshot(async_mode=False)
def test_configure_service_name_env(ddtrace_run_python_code_in_subprocess):
code = """
import asyncio
import sys
import asyncpg
from tests.contrib.config import POSTGRES_CONFIG
async def test():
conn = await asyncpg.connect(
host=POSTGRES_CONFIG["host"],
port=POSTGRES_CONFIG["port"],
user=POSTGRES_CONFIG["user"],
database=POSTGRES_CONFIG["dbname"],
password=POSTGRES_CONFIG["password"],
)
await conn.execute("SELECT 1")
await conn.close()
if sys.version_info >= (3, 7, 0):
asyncio.run(test())
else:
asyncio.get_event_loop().run_until_complete(test())
"""
env = os.environ.copy()
env["DD_ASYNCPG_SERVICE"] = "global-service-name"
out, err, status, pid = ddtrace_run_python_code_in_subprocess(code, env=env)
assert status == 0, err
assert err == b""
def test_patch_unpatch_asyncpg():
assert iswrapped(asyncpg.connect)
assert iswrapped(asyncpg.protocol.Protocol.execute)
assert iswrapped(asyncpg.protocol.Protocol.bind_execute)
assert iswrapped(asyncpg.protocol.Protocol.query)
assert iswrapped(asyncpg.protocol.Protocol.bind_execute_many)
unpatch()
assert not iswrapped(asyncpg.connect)
assert not iswrapped(asyncpg.protocol.Protocol.execute)
assert not iswrapped(asyncpg.protocol.Protocol.bind_execute)
assert not iswrapped(asyncpg.protocol.Protocol.query)
assert not iswrapped(asyncpg.protocol.Protocol.bind_execute_many)
|
import pandas as pd
import numpy as np
import os
from MagGeoFunctions import ST_IDW_Process
from MagGeoFunctions import CHAOS_ground_values
TotalSwarmRes_A = pd.read_csv(r'./temp_data/TotalSwarmRes_A.csv',low_memory=False, index_col='epoch')
TotalSwarmRes_A['timestamp'] = pd.to_datetime(TotalSwarmRes_A['timestamp'])
TotalSwarmRes_B = pd.read_csv(r'./temp_data/TotalSwarmRes_B.csv',low_memory=False, index_col='epoch')
TotalSwarmRes_B['timestamp'] = pd.to_datetime(TotalSwarmRes_B['timestamp'])
TotalSwarmRes_C = pd.read_csv(r'./temp_data/TotalSwarmRes_C.csv',low_memory=False, index_col='epoch')
TotalSwarmRes_C['timestamp'] = pd.to_datetime(TotalSwarmRes_C['timestamp'])
def row_handler (GPSData):
dn = [] ## List used to add all the GPS points with the annotated MAG Data. See the last bullet point of this process
for index, row in GPSData.iterrows():
GPSLat = row['gpsLat']
GPSLong = row['gpsLong']
GPSDateTime = row['gpsDateTime']
GPSTime = row['epoch']
GPSAltitude = row['gpsAltitude']
print("Process for:", index,"Date&Time:",GPSDateTime, "Epoch", GPSTime)
try:
result=ST_IDW_Process(GPSLat,GPSLong,GPSAltitude, GPSDateTime,GPSTime, TotalSwarmRes_A, TotalSwarmRes_B, TotalSwarmRes_C)
dn.append(result)
except:
print("Ups!.That was a bad Swarm Point, let's keep working with the next point")
result_badPoint= {'Latitude': GPSLat, 'Longitude': GPSLong, 'Altitude':GPSAltitude, 'DateTime': GPSDateTime, 'N_res': np.nan, 'E_res': np.nan, 'C_res':np.nan, 'TotalPoints':0, 'Minimum_Distance':np.nan, 'Average_Distance':np.nan}
dn.append(result_badPoint)
continue
GPS_ResInt = pd.DataFrame(dn)
GPS_ResInt.to_csv (r'./temp_data/GPS_ResInt.csv', header=True)
X_obs, Y_obs, Z_obs =CHAOS_ground_values(GPS_ResInt)
GPS_ResInt['N'] =pd.Series(X_obs)
GPS_ResInt['E'] =pd.Series(Y_obs)
GPS_ResInt['C'] =pd.Series(Z_obs)
GPS_ResInt.drop(columns=['N_res', 'E_res','C_res'], inplace=True)
return GPS_ResInt
|
## ====================================================================================================
## The packages.
import feature
import library
## The root of project.
## Initialize the cache object for save the miscellany.
root = library.os.getcwd()
table = library.cache()
table.folder = library.os.path.join(library.os.getcwd(), 'resource/kaggle(sample)/csv')
## ====================================================================================================
## Load <article> data.
table.article = library.pandas.read_csv(library.os.path.join(table.folder, "articles.csv"), dtype=str)
## Handle missing value.
table.article["detail_desc"] = table.article["detail_desc"].fillna("<NA>")
## Label encoding for category variables.
key = 'article_id_code'
head = 10
table.article[key] = feature.label.encode(table.article['article_id']) + head
loop = [
'product_code', 'prod_name', "product_type_no", 'product_type_name',
"product_group_name", "graphical_appearance_no", "graphical_appearance_name",
"colour_group_code", 'colour_group_name', 'perceived_colour_value_id',
'perceived_colour_value_name', 'perceived_colour_master_id',
'perceived_colour_master_name', 'department_no', 'department_name',
'index_code', 'index_name', 'index_group_no', 'index_group_name',
'section_no', 'section_name', 'garment_group_no',
'garment_group_name', 'detail_desc'
]
for key in loop:
table.article[key] = feature.label.encode(table.article[key]) + head
value = table.article[key].nunique()
print("{}:{}".format(key, value))
pass
## ====================================================================================================
## Load <customer> table.
table.customer = library.pandas.read_csv(library.os.path.join(table.folder, "customers.csv"), dtype=str)
## Handle missing value.
table.customer['FN'] = table.customer['FN'].fillna(0.0)
table.customer['Active'] = table.customer['Active'].fillna(0.0)
table.customer['club_member_status'] = table.customer['club_member_status'].fillna("<NA>")
table.customer['fashion_news_frequency'] = table.customer['fashion_news_frequency'].fillna("<NA>")
table.customer['age'] = table.customer['age'].fillna(-1)
## Label encoding for category variables.
loop = ['club_member_status', 'fashion_news_frequency', 'postal_code']
head = 10
for key in loop:
table.customer[key] = feature.label.encode(table.customer[key]) + head
value = table.customer[key].nunique()
print("{}:{}".format(key, value))
pass
## ====================================================================================================
## Load <transaction> table.
table.transaction = library.pandas.read_csv(library.os.path.join(table.folder, "transactions_train.csv"), dtype=str, nrows=100000)
table.transaction['t_dat'] = library.pandas.to_datetime(table.transaction['t_dat'])
## Label encoding for category variables.
## Transform numeric variable.
head = 10
table.transaction['t_dat_code'] = feature.label.encode(table.transaction['t_dat']) + head
table.transaction['sales_channel_id'] = feature.label.encode(table.transaction['sales_channel_id']) + head
table.transaction['price'] = [head + float(i) for i in table.transaction['price']]
# ## ====================================================================================================
# ## Save the checkpoint.
# storage = library.os.path.join(root, 'resource/{}/csv/'.format("clean"))
# library.os.makedirs(storage, exist_ok=True)
# table.article.to_csv(library.os.path.join(storage, 'article.csv'), index=False)
# table.customer.to_csv(library.os.path.join(storage, 'customer.csv'), index=False)
# table.transaction.to_csv(library.os.path.join(storage, 'transaction.csv'), index=False)
## ====================================================================================================
## Preprocess the tables to sequence by user.
table.sequence = dict()
loop = ['price', 'sales_channel_id', "t_dat_code"]
for variable in loop:
table.sequence[variable] = feature.sequence.flatten(table=table.transaction.astype(str), key='customer_id', variable=variable, group=['customer_id', 't_dat'])
pass
loop = [
'product_code', 'prod_name', "product_type_no", 'product_type_name',
"product_group_name", "graphical_appearance_no", "graphical_appearance_name",
"colour_group_code", 'colour_group_name', 'perceived_colour_value_id',
'perceived_colour_value_name', 'perceived_colour_master_id',
'perceived_colour_master_name', 'department_no', 'department_name',
'index_code', 'index_name', 'index_group_no', 'index_group_name',
'section_no', 'section_name', 'garment_group_no',
'garment_group_name', 'detail_desc', 'article_id_code'
]
for variable in library.tqdm.tqdm(loop, total=len(loop)):
selection = table.transaction[['t_dat', "customer_id", "article_id"]].copy()
selection = library.pandas.merge(selection, table.article[["article_id", variable]], on="article_id", how='inner')
table.sequence[variable] = feature.sequence.flatten(table=selection.astype(str), key='customer_id', group=['customer_id', 't_dat'], variable=variable)
pass
merge = lambda x,y: library.pandas.merge(left=x, right=y, on='customer_id', how='inner')
table.sequence = library.functools.reduce(merge, table.sequence.values())
## ====================================================================================================
## Mix together and save the checkpoint.
storage = library.os.path.join(root, 'resource/{}/csv/'.format("preprocess"))
library.os.makedirs(storage, exist_ok=True)
table.group = library.pandas.merge(left=table.customer, right=table.sequence, on='customer_id', how='outer')
table.group.dropna().to_csv(library.os.path.join(storage, "group(train).csv"), index=False)
table.group.fillna("").to_csv(library.os.path.join(storage, "group(all).csv"), index=False)
# ## ====================================================================================================
# ## Generatoe the <edge> table between [article_id_code] and [article_id_code].
# loop = " ".join(table.group['article_id_code'].dropna()).split()
# edge = ["1-{}".format(i) for i in set(loop)]
# total = len(loop)-1
# for a, b in library.tqdm.tqdm(zip(loop[:-1], loop[1:]), total=total):
# edge = edge + ['-'.join([a,b])]
# pass
# edge = library.pandas.DataFrame({"pair": edge})
# edge = edge.drop_duplicates()
# head = 10
# edge['pair_code'] = feature.label.encode(edge['pair']) + head
# table.edge = edge
# ## Save the checkpoint.
# storage = library.os.path.join(root, 'resource/{}/csv/'.format("preprocess"))
# library.os.makedirs(storage, exist_ok=True)
# table.edge.to_csv(library.os.path.join(storage, "edge.csv"), index=False)
# table.edge.nunique()
# ## Update to <group> table.
# for _, item in table.group.dropna().iterrows():
# line = item['article_id_code'].split()
# if(len(line)>1):
# track = []
# for a, b in zip(line[:-1], line[1:]):
# code = table.edge.loc[table.edge['pair']=="-".join([a,b])]['pair_code'].item()
# track += [str(code)]
# pass
# track = " ".join(track)
# pass
# if(len(line)==1):
# code = table.edge.loc[table.edge['pair']=='1-{}'.format(line[-1])]['pair_code'].item()
# track = [str(code)]
# track = " ".join(track)
# pass
# break
|
from .source_income_serializer import *
|
"""
Задача 11. Создайте класс ИГРУШКА с методами, позволяющими вывести на экран информацию о товаре,
а также определить соответствие игрушки критерию поиска.
Создайте дочерние классы КУБИК (цвет, цена, материал, размер ребра),
МЯЧ (цвет, цена, материал, диаметр),
МАШИНКА (цвет, цена, название, производитель) со своими методами вывода информации на экран и
определения соответствия заданному цвету.
Создайте список из п игрушек, выведите полную информацию из базы на экран,
а также организуйте поиск игрушек заданного цвета.
"""
from random import randint
from faker import Faker
from random_color import random_color
class ToyClass:
"""
Класс игрушка
"""
def __init__(self, color, price, material, size):
self.color = color
self.price = price
self.material = material
self.size = size
def toy_info(self):
return (
"[Базовый класс игрушка]\nЦвет: "
+ self.color
+ "\nЦена: "
+ str(self.price)
+ " руб.\nМатериал: "
+ self.material
+ "\nРазмер: "
+ str(self.size)
+ " см."
)
def color_detector(self, color_input):
if color_input == self.color:
return True
return False
class CubeClass(ToyClass):
"""
Класс Куб
"""
def __init__(self, color, price, material, size):
super().__init__(color, price, material, size)
def toy_info(self):
return (
"[Кубик]\nЦвет: "
+ self.color
+ "\nЦена: "
+ str(self.price)
+ " руб.\nМатериал: "
+ self.material
+ "\nРазмер ребра: "
+ str(self.size)
+ " см."
)
class BallClass(ToyClass):
"""
Класс мяч
"""
def __init__(self, color, price, material, size):
super().__init__(color, price, material, size)
def toy_info(self):
return (
"[Мяч]\nЦвет: "
+ self.color
+ "\nЦена: "
+ str(self.price)
+ " руб.\nМатериал: "
+ self.material
+ "\nДиаметр: "
+ str(self.size)
+ " см."
)
class CarClass(ToyClass):
"""
Класс машинка
"""
def __init__(self, color, price, name, manufacturer):
self.color = color
self.price = price
self.name = name
self.manufacturer = manufacturer
def toy_info(self):
return (
"[Машинка]\nЦвет: "
+ self.color
+ "\nЦена: "
+ str(self.price)
+ " руб.\nНазвание: "
+ self.name
+ "\nПроизводитель: "
+ self.manufacturer
)
def main():
"""
Создайте список из п игрушек, выведите полную информацию из базы на экран,
а также организуйте поиск игрушек заданного цвета.
"""
try:
n = int(input("Введите количество игрушек -> "))
except ValueError:
print("Некорректный ввод данных")
return
d = {
1: ToyClass,
2: CubeClass,
3: BallClass,
4: CarClass,
}
fake = Faker(["ru_RU"])
toy_list = []
for _ in range(n):
r_int = randint(1, 4)
d_args = {
1: (random_color(), randint(1, 1000), "пластик", randint(1, 20)),
2: (random_color(), randint(1, 1000), "пластик", randint(1, 20)),
3: (random_color(), randint(1, 1000), "пластик", randint(1, 20)),
4: (
random_color(),
randint(1, 1000),
fake.word(),
fake.word() + " " + fake.word(),
),
}
toy_list.append(d[r_int](*d_args[r_int]))
for toy in toy_list:
print(toy.toy_info() + "\n")
# организуйте поиск игрушек заданного цвета.
try:
color_input = input("Введите цвет для поиска игрушек -> ")
except ValueError:
print("Некорректный ввод данных")
return
search_flag = False
print("Игрушки c фильтрацией по цвету")
for toy in toy_list:
if toy.color_detector(color_input):
print(toy.toy_info() + "\n")
search_flag = True
if not search_flag:
print("Игрушки не найдены")
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import traceback
from django.test import TestCase, override_settings
from des.models import DynamicEmailConfiguration
from des.backends import ConfiguredEmailBackend
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
class ConfiguredEmailBackendDynamicSettingsTestCase(TestCase):
def setUp(self):
self.configuration = DynamicEmailConfiguration()
def test_backend_does_not_permit_mutex_tls_and_ssl(self):
try:
ConfiguredEmailBackend(
use_tls = True,
use_ssl = True
)
self.fail("No exception thrown. Expected ValueError")
except ValueError:
pass # Test succeeded
except Exception:
self.fail("Incorrect exception thrown: {}".format(
traceback.format_exc()
))
def test_backend_honors_configured_host(self):
host = 'testhost.mysite.com'
self.configuration.host = host
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.host, host)
def test_backend_honors_configured_port(self):
port = 123
self.configuration.port = port
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.port, port)
def test_backend_honors_configured_username(self):
username = 'awesomeuser'
self.configuration.username = username
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.username, username)
def test_backend_honors_configured_password(self):
password = 'secret'
self.configuration.password = password
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.password, password)
def test_backend_honors_configured_use_tls_true(self):
use_tls = True
self.configuration.use_tls = use_tls
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.use_tls, use_tls)
def test_backend_honors_configured_use_ssl_true(self):
use_ssl = True
self.configuration.use_ssl = use_ssl
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.use_ssl, use_ssl)
def test_backend_honors_configured_fail_silently_true(self):
fail_silently = True
self.configuration.fail_silently = fail_silently
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.fail_silently, fail_silently)
def test_backend_honors_configured_use_tls_false(self):
use_tls = False
self.configuration.use_tls = use_tls
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.use_tls, use_tls)
def test_backend_honors_configured_use_ssl_false(self):
use_ssl = False
self.configuration.use_ssl = use_ssl
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.use_ssl, use_ssl)
def test_backend_honors_configured_fail_silently_false(self):
fail_silently = False
self.configuration.fail_silently = fail_silently
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.fail_silently, fail_silently)
def test_backend_honors_configured_timeout(self):
timeout = 12345
self.configuration.timeout = timeout
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.timeout, timeout)
class ConfiguredEmailBackendSettingsFallbackTestCase(TestCase):
def setUp(self):
self.configuration = DynamicEmailConfiguration()
def test_backend_does_not_permit_mutex_tls_and_ssl(self):
try:
ConfiguredEmailBackend(
use_tls = True,
use_ssl = True
)
self.fail("No exception thrown. Expected ValueError")
except ValueError:
pass # Test succeeded
except Exception:
self.fail("Incorrect exception thrown: {}".format(
traceback.format_exc()
))
@override_settings(EMAIL_HOST = 'testhost.mysite.com')
def test_backend_honors_fallback_host(self):
host = 'testhost.mysite.com'
self.configuration.host = None
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.host, host)
@override_settings(EMAIL_PORT = 123)
def test_backend_honors_fallback_port(self):
port = 123
self.configuration.port = None
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.port, port)
@override_settings(EMAIL_HOST_USER = 'awesomeuser')
def test_backend_honors_fallback_username(self):
username = 'awesomeuser'
self.configuration.username = None
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.username, username)
@override_settings(EMAIL_HOST_PASSWORD = 'secret')
def test_backend_honors_fallback_password(self):
password = 'secret'
self.configuration.password = None
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.password, password)
@override_settings(EMAIL_TIMEOUT = 12345)
def test_backend_honors_fallback_timeout(self):
timeout = 12345
self.configuration.timeout = None
self.configuration.save()
backend = ConfiguredEmailBackend()
self.assertEqual(backend.timeout, timeout)
class ConfiguredEmailBackendExplicitSettingsTestCase(TestCase):
def setUp(self):
self.configuration = DynamicEmailConfiguration()
def test_backend_does_not_permit_mutex_tls_and_ssl(self):
try:
ConfiguredEmailBackend(
use_tls = True,
use_ssl = True
)
self.fail("No exception thrown. Expected ValueError")
except ValueError:
pass # Test succeeded
except Exception:
self.fail("Incorrect exception thrown: {}".format(
traceback.format_exc()
))
def test_backend_honors_explicit_host(self):
host = 'testhost.mysite.com'
explicit_host = 'anotherhost.mysite.com'
self.configuration.host = host
self.configuration.save()
backend = ConfiguredEmailBackend(
host = explicit_host
)
self.assertEqual(backend.host, explicit_host)
def test_backend_honors_explicit_port(self):
port = 123
explicit_port = 321
self.configuration.port = port
self.configuration.save()
backend = ConfiguredEmailBackend(
port = explicit_port
)
self.assertEqual(backend.port, explicit_port)
def test_backend_honors_explicit_username(self):
username = 'awesomeuser'
explicit_username = 'anotheruser'
self.configuration.username = username
self.configuration.save()
backend = ConfiguredEmailBackend(
username = explicit_username
)
self.assertEqual(backend.username, explicit_username)
def test_backend_honors_explicit_password(self):
password = 'secret'
explicit_password = 'anothersecret'
self.configuration.password = password
self.configuration.save()
backend = ConfiguredEmailBackend(
password = explicit_password
)
self.assertEqual(backend.password, explicit_password)
def test_backend_honors_explicit_use_tls_true(self):
use_tls = True
explicit_use_tls = False
self.configuration.use_tls = use_tls
self.configuration.save()
backend = ConfiguredEmailBackend(
use_tls = explicit_use_tls
)
self.assertEqual(backend.use_tls, explicit_use_tls)
def test_backend_honors_explicit_use_ssl_true(self):
use_ssl = True
explicit_use_ssl = False
self.configuration.use_ssl = use_ssl
self.configuration.save()
backend = ConfiguredEmailBackend(
use_ssl = explicit_use_ssl
)
self.assertEqual(backend.use_ssl, explicit_use_ssl)
def test_backend_honors_explicit_fail_silently_true(self):
fail_silently = True
explicit_fail_silently = False
self.configuration.fail_silently = fail_silently
self.configuration.save()
backend = ConfiguredEmailBackend(
fail_silently = explicit_fail_silently
)
self.assertEqual(backend.fail_silently, explicit_fail_silently)
def test_backend_honors_explicit_use_tls_false(self):
use_tls = False
explicit_use_tls = True
self.configuration.use_tls = use_tls
self.configuration.save()
backend = ConfiguredEmailBackend(
use_tls = explicit_use_tls
)
self.assertEqual(backend.use_tls, explicit_use_tls)
def test_backend_honors_explicit_use_ssl_false(self):
use_ssl = False
explicit_use_ssl = True
self.configuration.use_ssl = use_ssl
self.configuration.save()
backend = ConfiguredEmailBackend(
use_ssl = explicit_use_ssl
)
self.assertEqual(backend.use_ssl, explicit_use_ssl)
def test_backend_honors_explicit_fail_silently_false(self):
fail_silently = False
explicit_fail_silently = True
self.configuration.fail_silently = fail_silently
self.configuration.save()
backend = ConfiguredEmailBackend(
fail_silently = explicit_fail_silently
)
self.assertEqual(backend.fail_silently, explicit_fail_silently)
def test_backend_honors_explicit_timeout(self):
timeout = 12345
explicit_timeout = 54321
self.configuration.timeout = timeout
self.configuration.save()
backend = ConfiguredEmailBackend(
timeout = explicit_timeout
)
self.assertEqual(backend.timeout, explicit_timeout)
|
import logging
import sys
LOGGER_NAME = 'jira-bot'
_logger = logging.getLogger(LOGGER_NAME)
_logger.setLevel(logging.INFO)
# create logger formatter
_formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
# log to stdout
_stdout_handler = logging.StreamHandler(sys.stdout)
_stdout_handler.setFormatter(_formatter)
_logger.addHandler(_stdout_handler)
# log the same to a file
_file_handler = logging.FileHandler('log.txt')
_file_handler.setFormatter(_formatter)
_logger.addHandler(_file_handler)
def info(message, *args, **kwargs):
_logger.info(message, *args, **kwargs)
def warning(message, *args, **kwargs):
_logger.warning(message, *args, **kwargs)
|
times=int(input())
for i in range(times):
a,b=input().split()
if len(a)!=len(b):
print(f'{a}, {b} have different lengths')
else:
length=len(a)
pa=[]
for e in range(length):
for c in range(e+1,length):
if a[e]==a[c]:
pa.append('+'+str(c-e))
break
if len(pa)!=e+1:
pa.append(str(0))
pb=[]
for e in range(length):
for c in range(e+1,length):
if b[e]==b[c]:
pb.append('+'+str(c-e))
break
if len(pb)!=e+1:
pb.append(str(0))
if set(pa)!=set(pb):
print(f'{a}, {b} are not isomorphs')
else:
print(f"{a}, {b} are isomorphs with repetition pattern {' '.join(pa)}")
|
import pprint
import os
import sys
import json
def get_all_json(path):
l=[]
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith('.json') and not file.startswith('.'):
l.append(os.path.join(root, file))
return l
class Visitor:
def __init__(self,mode,lang):
self.mode=mode
self.lang=lang
self.target_fields=['name','description','title','text','title2','pages']
self.visit0=getattr(self,mode)
def parse(self,lang_key,base,index):
self.lang[lang_key]=base[index]
def render(self,lang_key,base,index):
if lang_key in self.lang:
base[index]=self.lang[lang_key]
def visit(self,node,prefix):
if type(node)==dict:
for key in node:
if key in self.target_fields:
val=node[key]
lang_key="%s.%s"%(prefix,key)
if type(val)==str:
self.visit0(lang_key,node,key)
elif type(val)==list:
self.visit(val,lang_key)
elif type(node)==list:
for i in range(len(node)):
self.visit(node[i],"%s.%d"%(prefix,i))
if __name__=="__main__":
mode=sys.argv[1]
assert mode=='parse' or mode=='render'
#path=assets/.../patchouli_books/.../en_us/
path=sys.argv[2]
l=get_all_json(path)
json_file=sys.argv[3]
lang={}
if mode=='render':
with open(json_file) as f:
lang=json.load(f)
visitor=Visitor(mode,lang)
for filepath in l:
with open(filepath) as f:
root=json.load(f)
prefix=os.path.relpath(filepath,path).replace('/','.')[:-5]
visitor.visit(root,prefix)
#直接在原文件位置写的,用的时候要注意
if mode=='render':
with open(filepath,'w') as f:
f.write(json.dumps(root,indent=4))
if mode=='parse':
with open(json_file,'w') as f:
f.write(json.dumps(lang,indent=4))
|
#!/usr/bin/python
import pandas as pd
import sys
def main():
# fle_path = '/home/alex/workspace/ReportGen/python/BigBenchTimes.csv'
file_path = sys.argv[1]
df = pd.read_csv(file_path, sep=';')
df.to_excel("BigBenchTimes.xlsx")
print df.to_string()
if __name__ == '__main__':
main()
|
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Q, Search
from distant_supervision.ds_utils import CorpusGenUtils
class ElasticClient(object):
# Example document from the elasticsearch index
# {
# "_index":"enwiki",
# "_type":"sentence",
# "_id":"AXCbniQUQZzz3GiznsR6",
# "_score":21.83448,
# "_source":{
# "pageid":22177894,
# "position":3,
# "text":"Madrid, Spain: Assembly of Madrid.",
# "pagetitle":"List of Presidents of the Assembly of Madrid",
# "after":"Retrieved 30 January 2019.",
# "before":"\"Relación de Presidentes\" (in Spanish)."
# }
# }
def __init__(self,
host: str = "localhost",
port: int = 9200,
index_name: str = "enwiki",
field_names=["title", "text"]):
self.client = Elasticsearch([host], port=port, timeout=45)
self.fields = field_names
self.index_name = index_name
def query_sentences(self, page_title: str, term_a: str, term_b: str, size: int = 20):
term_a, term_b = term_a.lower(), term_b.lower()
s = Search(using=self.client, index=self.index_name)[0:size]
s.query = Q('bool', must=[Q('match', pageid=page_title), Q('match_phrase', text=term_a),
Q('match_phrase', text=term_b)])
s.execute()
results = list()
for hit in s:
sent_text = hit.text.lower()
if term_a in sent_text and term_b in sent_text:
results.append((hit.position, hit.text, hit.pagetitle, '{}_{}'.format(hit.pageid, hit.position)))
filtered = list()
for result in results:
if result[2].lower() == page_title.lower():
filtered.append(result)
if filtered:
results = filtered
return sorted(results, key=lambda result: result[0])
def query_flexible(self, page_id: int, search_terms: list, size: int = 500):
s = Search(using=self.client, index=self.index_name)[0:size]
should_list = list()
for term in search_terms:
should_list.append(Q('match_phrase', text=term))
s.query = Q('bool',
must=[Q('match', pageid=page_id)],
should=should_list,
minimum_should_match=1)
results = list()
try:
s.execute()
except Exception as ex:
print("Elasticsearch error: {}".format(str(ex)))
return results
for hit in s:
suface_form = None
text = hit.text.lower()
# we are trying to find the surface form of the most relevant term in the ranked list of search terms
for term in search_terms:
if term.lower() in text:
suface_form = term
break
if suface_form:
results.append((hit.position, suface_form, hit.text, hit.pagetitle, '{}_{}'.format(hit.pageid, hit.position)))
return sorted(results, key=lambda result: result[0])
@classmethod
def get_best_matching_setence(cls, subj_sentences, obj_sentences, sub_terms, obj_terms, count: int = 3):
final_sentences = list()
subj_indices = {subj_sent[0]:subj_sent for subj_sent in subj_sentences}
# obj_indices = {obj_sent[0]:obj_sent for obj_sent in obj_sentences}
subj_term_sent_id = dict()
obj_term_sent_id = dict()
for sent in subj_sentences:
term = sent[1]
sent_ids = subj_term_sent_id.get(term, set())
sent_ids.add(sent[0])
subj_term_sent_id[term] = sent_ids
for sent in obj_sentences:
term = sent[1]
sent_ids = obj_term_sent_id.get(term, set())
sent_ids.add(sent[0])
obj_term_sent_id[term] = sent_ids
for obj_term in obj_terms:
if obj_term in obj_term_sent_id:
obj_sent_ids = sorted(obj_term_sent_id[obj_term])
subj_sent_ids = list()
for sent_id in obj_sent_ids:
if sent_id in subj_indices:
subj_sent = subj_indices[sent_id]
subj_term = subj_sent[1]
subj_term_index = sub_terms.index(subj_term)
subj_sent_ids.append([subj_sent, subj_term_index])
if subj_sent_ids:
subj_sent_ids = sorted(subj_sent_ids, key=lambda x: x[1])
selected_sent_id = subj_sent_ids[0][0][0]
selected_sentence = subj_indices[selected_sent_id]
sub_term_list = list()
for sub_term in sub_terms:
if sub_term in selected_sentence[2].lower():
sub_term_list.append(sub_term)
obj_term_list = list()
for obj_term in obj_terms:
if obj_term.lower() in selected_sentence[2].lower():
obj_term_list.append(obj_term)
modified_sentence = [selected_sentence[0], sub_term_list, obj_term_list, selected_sentence[2],
selected_sentence[3], selected_sentence[4]]
final_sentences.append(modified_sentence)
if len(final_sentences) > count:
return final_sentences
return final_sentences
if __name__ == '__main__':
es_client = ElasticClient()
labels = CorpusGenUtils.get_link_text(sparql_endpoint=CorpusGenUtils.dbpedia_201610,
dbpedia_uri='http://dbpedia.org/resource/Barack_Obama')
sorted_labels = CorpusGenUtils.sort_by_similarity("Barack Obama", labels)
print(sorted_labels)
sub_terms = [term[0] for term in sorted_labels]
subj_sentences = es_client.query_flexible(534366, sub_terms)
date_variants = CorpusGenUtils.get_all_date_variants('1961-08-04')
print(date_variants)
obj_sentences = es_client.query_flexible(534366, date_variants)
final_sentences = ElasticClient.get_best_matching_setence(subj_sentences, obj_sentences, sub_terms, date_variants)
for sent in final_sentences:
print(sent)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import pytest
@pytest.hookspec(firstresult=True)
def pytest_send_upload_request(upload_url: str, files: list, config: dict):
""" send upload request """
|
import numpy as np
import pandas as pd
from sparklines import sparklines
def sparklines_str(col, bins=10):
bins = np.histogram(col[col.notnull()], bins=bins)[0]
return "".join(sparklines(bins))
def df_types_and_stats(df: pd.DataFrame) -> pd.DataFrame:
missing = df.isnull().sum().sort_index()
missing_pc = (missing / len(df) * 100).round(2)
types_and_missing = pd.DataFrame(
{
'type': df.sort_index().dtypes,
'#nas': missing,
'%nas': missing_pc,
'card': df.agg('nunique').sort_index(),
},
index=df.columns.sort_values()
)
dist = df.agg([sparklines_str]).T.sort_index()
desc = df.describe(include='all').T.sort_index()
return pd.concat([dist, types_and_missing, desc], axis=1, sort=True)
|
import logging
from pathlib import Path
from newton import Newton
import plot
import matplotlib.pyplot as plt
import numpy as np
def obj_func(x : np.array) -> float:
# Six hump function
# http://scipy-lectures.org/intro/scipy/auto_examples/plot_2d_minimization.html
return ((4 - 2.1*x[0]**2 + x[0]**4 / 3.) * x[0]**2 + x[0] * x[1] + (-4 + 4*x[1]**2) * x[1] **2)
def gradient(x : np.array) -> np.array:
return np.array([
8*x[0] - 4 * 2.1 * x[0]**3 + 2 * x[0]**5 + x[1],
x[0] - 8 * x[1] + 16 * x[1]**3
])
def hessian(x : np.array) -> np.array:
return np.array([
[
2 * (4 - 6 * 2.1 * x[0]**2 + 5 * x[0]**4),
1
],
[
1,
-8 + 48 * x[1]**2
]])
def is_pos_def(x : np.array) -> bool:
return np.all(np.linalg.eigvals(x) > 0)
def reg_inv_hessian(x : np.array) -> np.array:
# Check pos def
hes = hessian(x)
if is_pos_def(hes):
return np.linalg.inv(hes)
else:
# Regularize
identity = np.eye(len(x))
eps = 1e-8
hes_reg = hes + eps * identity
eps_max = 100.0
while not is_pos_def(hes_reg) and eps <= eps_max:
eps *= 10.0
hes_reg = hes + eps * identity
if eps > eps_max:
print(hes_reg)
print(is_pos_def(hes_reg))
raise ValueError("Failed to regularize Hessian!")
return np.linalg.inv(hes_reg)
def get_random_uniform_in_range(x_rng : np.array, y_rng : np.array) -> np.array:
p = np.random.rand(2)
p[0] *= x_rng[1] - x_rng[0]
p[0] += x_rng[0]
p[1] *= y_rng[1] - y_rng[0]
p[1] += y_rng[0]
return p
if __name__ == "__main__":
opt = Newton(
obj_func=obj_func,
gradient_func=gradient,
reg_inv_hessian=reg_inv_hessian
)
opt.log.setLevel(logging.INFO)
x_rng = [-2,2]
y_rng = [-1,1]
fig_dir = "figures"
Path(fig_dir).mkdir(parents=True, exist_ok=True)
# params_init = get_random_uniform_in_range(x_rng, y_rng)
# params_init = np.array([-1.55994695, -0.31833122])
params_init = np.array([-1.1,-0.5])
converged, no_opt_steps, final_update, traj, line_search_factors = opt.run(
no_steps=10,
params_init=params_init,
tol=1e-8,
store_traj=True
)
print("Converged: %s" % converged)
print(traj)
print(line_search_factors)
trajs = {}
trajs[0] = traj
endpoints, counts = plot.get_endpoints_and_counts(trajs)
for i in range(0,len(endpoints)):
print(endpoints[i], " : ", counts[i])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# plot.plot_3d_endpoint_lines(ax, trajs)
plot.plot_3d(ax, x_rng,y_rng,obj_func)
plt.savefig(fig_dir+"/3d.png", dpi=200)
plt.figure()
plot.plot_obj_func(x_rng, y_rng, obj_func)
plot.plot_trajs(x_rng, y_rng, trajs, [0])
plt.title("Trajs")
plt.savefig(fig_dir+"/trajs.png", dpi=200)
plt.figure()
plot.plot_obj_func(x_rng, y_rng, obj_func)
# plot.plot_quiver(x_rng, y_rng, trajs)
plot.plot_endpoint_counts(trajs)
plt.title("Endpoints")
plt.savefig(fig_dir+"/endpoints.png", dpi=200)
plt.figure()
plot.plot_histogram(x_rng,y_rng,trajs,50)
plot.plot_endpoint_counts(trajs)
plt.title("Endpoints")
plt.savefig(fig_dir+"/histogram.png", dpi=200)
# plt.show()
plt.close('all')
# Line search
plt.figure()
plot.plot_line_search(line_search_factors, traj, obj_func)
plt.title("Line search")
plt.savefig(fig_dir+"/line_search.png", dpi=200)
|
#!/usr/bin/env python3
# Run OSSF Scorecard (https://github.com/ossf/scorecard) against Envoy dependencies.
#
# Usage:
#
# tools/dependency/ossf_scorecard.sh <path to repository_locations.bzl> \
# <path to scorecard binary> \
# <output CSV path>
#
# You will need to checkout and build the OSSF scorecard binary independently and supply it as a CLI
# argument.
#
# You will need to set a GitHub access token in the GITHUB_AUTH_TOKEN environment variable. You can
# generate personal access tokens under developer settings on GitHub. You should restrict the scope
# of the token to "repo: public_repo".
#
# The output is CSV suitable for import into Google Sheets.
from collections import namedtuple
import csv
import json
import os
import subprocess as sp
import sys
import utils
Scorecard = namedtuple('Scorecard', [
'name',
'contributors',
'active',
'ci_tests',
'pull_requests',
'code_review',
'fuzzing',
'security_policy',
'releases',
])
# Thrown on errors related to release date.
class OssfScorecardError(Exception):
pass
# We skip build, test, etc.
def IsScoredUseCategory(use_category):
return len(
set(use_category).intersection([
'dataplane_core', 'dataplane_ext', 'controlplane', 'observability_core',
'observability_ext'
])) > 0
def Score(scorecard_path, repository_locations):
results = {}
for dep, metadata in sorted(repository_locations.items()):
if not IsScoredUseCategory(metadata['use_category']):
continue
results_key = metadata['project_name']
formatted_name = '=HYPERLINK("%s", "%s")' % (metadata['project_url'], results_key)
github_project_url = utils.GetGitHubProjectUrl(metadata['urls'])
if not github_project_url:
na = 'Not Scorecard compatible'
results[results_key] = Scorecard(name=formatted_name,
contributors=na,
active=na,
ci_tests=na,
pull_requests=na,
code_review=na,
fuzzing=na,
security_policy=na,
releases=na)
continue
raw_scorecard = json.loads(
sp.check_output(
[scorecard_path, f'--repo={github_project_url}', '--show-details', '--format=json']))
checks = {c['CheckName']: c for c in raw_scorecard['Checks']}
# Generic check format.
def Format(key):
score = checks[key]
status = score['Pass']
confidence = score['Confidence']
return f'{status} ({confidence})'
# Releases need to be extracted from Signed-Releases.
def ReleaseFormat():
score = checks['Signed-Releases']
if score['Pass']:
return Format('Signed-Releases')
details = score['Details']
release_found = details is not None and any('release found:' in d for d in details)
if release_found:
return 'True (10)'
else:
return 'False (10)'
results[results_key] = Scorecard(name=formatted_name,
contributors=Format('Contributors'),
active=Format('Active'),
ci_tests=Format('CI-Tests'),
pull_requests=Format('Pull-Requests'),
code_review=Format('Code-Review'),
fuzzing=Format('Fuzzing'),
security_policy=Format('Security-Policy'),
releases=ReleaseFormat())
print(raw_scorecard)
print(results[results_key])
return results
def PrintCsvResults(csv_output_path, results):
headers = Scorecard._fields
with open(csv_output_path, 'w') as f:
writer = csv.writer(f)
writer.writerow(headers)
for name in sorted(results):
writer.writerow(getattr(results[name], h) for h in headers)
if __name__ == '__main__':
if len(sys.argv) != 4:
print(
'Usage: %s <path to repository_locations.bzl> <path to scorecard binary> <output CSV path>'
% sys.argv[0])
sys.exit(1)
access_token = os.getenv('GITHUB_AUTH_TOKEN')
if not access_token:
print('Missing GITHUB_AUTH_TOKEN')
sys.exit(1)
path = sys.argv[1]
scorecard_path = sys.argv[2]
csv_output_path = sys.argv[3]
spec_loader = utils.repository_locations_utils.load_repository_locations_spec
path_module = utils.LoadModule('repository_locations', path)
try:
results = Score(scorecard_path, spec_loader(path_module.REPOSITORY_LOCATIONS_SPEC))
PrintCsvResults(csv_output_path, results)
except OssfScorecardError as e:
print(f'An error occurred while processing {path}, please verify the correctness of the '
f'metadata: {e}')
|
import time
import os
from talon.voice import Context, Key, press
from talon import ctrl, clip, applescript
from . import browser
DOWNLOAD_PATH = "~/Music"
def youtube_download_audio(m):
youtube_download(video=False)
def youtube_download_video(m):
youtube_download(video=True)
def youtube_download(video=True):
press("escape")
press("cmd-l")
press("cmd-c")
time.sleep(0.1)
url = clip.get()
print(f"url: {url}")
press("escape")
command = f"youtube-dl "
if not video:
command += "--extract-audio "
command += "{url}"
print(f"command: {command}")
return applescript.run(
f"""
tell application "Terminal"
do script "cd {os.path.expanduser(DOWNLOAD_PATH)}; {command}; exit"
end tell
"""
)
context = Context(
"youtube", func=browser.url_matches_func("https://youtube.com/.*")
)
context.keymap(
{
"download audio": youtube_download_audio,
"download video": youtube_download_video,
"speed up": browser.send_to_page(">"),
"speed down": browser.send_to_page("<"),
"full screen": browser.send_to_page("f"),
}
)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 7 14:58:47 2019
@author: bdgecyt
"""
import sys
import time
import datetime
import argparse
import cv2
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
import wrapper
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--video_file", type=str, default="C:\\Users\\bdgecyt\\Desktop\\constructionimages\\Ch2_20190301124233.mp4", help="path to dataset")
# parser.add_argument("--model_def", type=str, default="config/yolov3.cfg", help="path to model definition file")
# parser.add_argument("--weights_path", type=str, default="weights/yolov3.weights", help="path to weights file")
# parser.add_argument("--class_path", type=str, default="data/coco.names", help="path to class label file")
# parser.add_argument("--conf_thres", type=float, default=0.8, help="object confidence threshold")
# parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression")
# parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
# parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation")
# parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
# parser.add_argument("--checkpoint_model", type=str, help="path to checkpoint model")
opt = parser.parse_args()
print(opt)
# Bounding-box colors
cmap = plt.get_cmap("tab20b")
colors = [cmap(i) for i in np.linspace(0, 1, 20)]
print("\nPerforming drop line estimation:")
prev_time = time.time()
videofile = opt.video_file
cap = cv2.VideoCapture(videofile)
assert cap.isOpened(), 'Cannot capture source'
vp = (800,900)
ob1 = 1000
ob2 = 500
frames = 0
start = time.time()
while cap.isOpened():
ret, frame = cap.read()
# print(frame.shape[:,:])
print(ret)
if ret:
vps = wrapper.dealAImage(frame,"data/result/",False,False,False)
# for line in lines:
# cv2.line(frame, line[0], line[1], (0, 0, 255), 2)
for vp in vps:
cv2.circle(frame, (int(vp[0]), int(vp[1])), 20, (0,255,0), 3)
cv2.circle(frame, (ob1,ob2), 20, (0,255,0), 3)
cv2.line(frame, (int(vp[0]), int(vp[1])), (ob1,ob2), (0,255,0), 2)
frames += 1
ob1 -= 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
cv2.imshow("frame", frame)
key = cv2.waitKey(1)
if key == 27:
break
continue
else:
frames += 1
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start)))
cv2.imshow("frame", frame)
key = cv2.waitKey(1)
if key == 27:
break
continue
cv2.destroyAllWindows()
cap.release()
|
"""pip dependencies collector."""
from base_collectors import JSONFileSourceCollector
from model import Entities, Entity, SourceResponses
class PipDependencies(JSONFileSourceCollector):
"""pip collector for dependencies."""
async def _parse_entities(self, responses: SourceResponses) -> Entities:
"""Override to parse the dependencies from the JSON."""
installed_dependencies: list[dict[str, str]] = []
for response in responses:
installed_dependencies.extend(await response.json(content_type=None))
return Entities(
Entity(
key=f'{dependency["name"]}@{dependency.get("version", "?")}',
name=dependency["name"],
version=dependency.get("version", "unknown"),
latest=dependency.get("latest_version", "unknown"),
)
for dependency in installed_dependencies
)
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
WilliamWallaceDialog
A QGIS plugin
This plugin do a supervised classification
-------------------
begin : 2016-05-17
git sha : $Format:%H$
copyright : (C) 2016 by Gillian
email : gillian.milani@geo.uzh.ch
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os
from PyQt4 import QtGui, uic, QtCore, QtSql
s = QtCore.QSettings()
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'choose_db_dialog_base.ui'))
class ChooseDbDialog(QtGui.QDialog, FORM_CLASS):
def __init__(self, parent = None):
"""Constructor."""
super(ChooseDbDialog, self).__init__(parent)
self.setupUi(self)
listOfConnections = self.getPostgisConnections()
self.fillComboBox(listOfConnections)
currentConnection = s.value('WallacePlugins/connectionName')
if currentConnection is not None:
index = self.comboBox.findData(currentConnection)
self.comboBox.setCurrentIndex(index)
def fillComboBox(self, list):
self.comboBox.addItem('', None)
for name in list:
self.comboBox.addItem(name, name)
def getPostgisConnections(self):
keyList = []
for key in s.allKeys():
if key.startswith('PostgreSQL/connections'):
if key.endswith('database'):
connectionName = key.split('/')[2]
keyList.append(connectionName)
return keyList
|
import sys
html_template = file('charts-template.html', 'r').read()
file('charts.html', 'w').write(html_template.replace('__CHART_DATA_GOES_HERE__', sys.stdin.read()))
|
# get_data.py
import requests
import json
print("REQUESTING SOME DATA FROM THE INTERNET...")
print("")
request_url = "https://raw.githubusercontent.com/prof-rossetti/intro-to-python/master/data/products.json"
response = requests.get(request_url)
print(type(response))
print("")
print(response.status_code)
print("")
parsed_response = json.loads(response.text)
print(parsed_response)
print("")
for d in parsed_response:
print(d["name"])
print(d["id"])
print("")
print( parsed_response[0]["name"])
print("DONE WITH THE PRODUCTS DATA\n")
print("NOW MOVING TO GRADES DATA\n")
request_url1 = "https://raw.githubusercontent.com/prof-rossetti/intro-to-python/master/data/gradebook.json"
response1 = requests.get(request_url1)
print(response1.status_code)
print("")
parsed_response1 = json.loads(response1.text)
#local variables
grades_list = []
subtotal = 0.0
average = 0.0
grades_list = parsed_response1["students"]
for d in grades_list:
subtotal += d["finalGrade"]
#list comprehension method
#grades_list = [student["finalGrade"] for student in pased_response1["students"]]
average = subtotal / len(grades_list)
average = str(round(average, 2))
print("The average grade is:", f"{average}%")
print("")
|
import json
import nibabel as nib
import numpy as np
from PIL import Image
from pathlib import Path
from sklearn.model_selection import train_test_split
from typing import Tuple
def load_raw_volume(path: Path) -> Tuple[np.ndarray, np.ndarray]:
"""
Loads volume of skull's scan
:param path: path to scan of skull
:return raw_data: raw volume data of skull scan
"return data.affine: affine transformation from skull scan
"""
data: nib.Nifti1Image = nib.load(str(path))
data = nib.as_closest_canonical(data)
raw_data = data.get_fdata(caching='unchanged', dtype=np.float32)
return raw_data, data.affine
def load_labels_volume(path: Path) -> np.ndarray:
"""
Loads label volume from given path
:param path: path to labeled scan of skull
:return: raw data of label's volume
"""
label_raw_data = load_raw_volume(path)[0].astype(np.uint8)
return label_raw_data
def save_labels(data: np.ndarray, affine: np.ndarray, path: Path):
"""
Saves labels in nibabel format
:param data: 3D array with label
:param affine: affine transformation from input nibabel image
:param path: path to save label
:return:
"""
nib.save(nib.Nifti1Image(data, affine), str(path))
def split_first_dataset(train_set_path: Path):
"""
Splits train set from 1st dataset into train and validation set
:param train_set_path: path to folder containing train scans where all labels and scans are in the same directory
:return train_scans: list of tuples containing path for train scan and path for corresponding mask
:return val_scans: list of tuples containing path for validation scan and path for corresponding mask
"""
scan_list = [] # List of tuples containing scans and their masks
file_extension = ".nii.gz"
mask_partial_filename = "_mask"
for scan_full_path in train_set_path.iterdir():
scan_filename = scan_full_path.name[:-len(file_extension)] # Deleting .nii.gz from file name
if mask_partial_filename not in scan_filename:
scan_mask_filename = scan_filename + mask_partial_filename + file_extension
scan_mask_full_path = scan_full_path.parent / Path(scan_mask_filename)
scan_list.append((scan_full_path, scan_mask_full_path))
train_scans, val_scans = train_test_split(scan_list, random_state=42, train_size=0.8)
print(f"Number of train scans from first dataset: {len(train_scans)}")
print(f"Number of validation scans from first dataset: {len(val_scans)}")
return train_scans, val_scans
def split_second_dataset(train_set_path: Path):
"""
Splits train set from 2nd dataset into train and validation set
:param train_set_path: path to folder where each train scan has separate folder containing scan and label
:return train_scans: list of tuples containing path for train scan and path for corresponding mask
:return val_scans: list of tuples containing path for validation scan and path for corresponding mask
"""
scan_filename = "T1w.nii.gz"
mask_filename = "mask.nii.gz"
scan_list = [] # List of tuples containing scans and their masks
for scan_folder_path in train_set_path.iterdir():
scan_full_path = scan_folder_path / scan_filename
mask_full_path = scan_folder_path / mask_filename
scan_list.append((scan_full_path, mask_full_path))
train_scans, val_scans = train_test_split(scan_list, random_state=42, train_size=0.8)
print(f"Number of train scans from second dataset: {len(train_scans)}")
print(f"Number of validation scans from second dataset: {len(val_scans)}")
return train_scans, val_scans
def save_scan_to_xyz_slices(scan: Tuple, save_path: Path, scan_number: int, axis="x"):
"""
Splits scan and scan's label to separate x, y, z slices and then saves it to separate files.
:param axis: axis to split
:param scan_number: scan number to store
:param scan: Tuple containing path to scan and path to corresponding label
:param save_path: Path to folder where slices will be stored. The folder should have following structure:
main folder/
affine/
x/
labels/images/
scans/images/
y/
labels/images/
scans/images/
z/
labels/images/
scans/images/
:return:
"""
file_extension = ".nii.gz"
scan_name = scan[0].name[:-len(file_extension)] + "_" + str(scan_number)
raw_volume, affine = load_raw_volume(scan[0])
mask_volume = load_labels_volume(scan[1])
x_scans = get_axes_slices_from_volume(raw_volume=raw_volume, axis=axis)
x_scans = normalize_slice_values(x_scans)
x_labels = get_axes_slices_from_volume(raw_volume=mask_volume, axis=axis)
print(f"\r Saving scan: {scan_name}")
for scan_number, (scan, label) in enumerate(zip(x_scans, x_labels)):
path = save_path / Path(axis) / Path("scans/images") / Path(scan_name + str(scan_number) + ".png")
save_image_to_png(scan, path)
path = save_path / Path(axis) / Path("labels/images") / Path(scan_name + str(scan_number) + ".png")
save_image_to_png(label, path)
path = save_path / Path("affine") / Path(scan_name)
np.save(path, affine)
def save_test_scan_to_xyz_slices(scan: Path, base_test_save_path: Path, axis="x", dataset_number=1):
"""
Splits test scan to separate x, y, z slices and then saves it to separate files.
:param axis: axis to split
:param dataset_number: Number of dataset. Either first or second
:param scan: path to scan
:param base_test_save_path: Path to folder where slices will be stored. The folder should have following structure:
main folder/
FirstDataset/
SecondDataset/
:return:
"""
if dataset_number == 1:
file_extension = ".nii.gz"
scan_name = scan.name[:-len(file_extension)]
raw_volume, affine = load_raw_volume(scan)
shape = {"x": raw_volume.shape[0],
"y": raw_volume.shape[1],
"z": raw_volume.shape[2]}
x_scans = get_axes_slices_from_volume(raw_volume=raw_volume, axis=axis)
x_scans = normalize_slice_values(x_scans)
print(f"\r Saving scan: {scan_name}")
for scan_number, scan in enumerate(x_scans):
path = base_test_save_path / Path("FirstDataset") / scan_name / Path("images") / Path(scan_name + str("{0:03}".format(scan_number)) + ".png")
save_image_to_png(scan, path)
path = base_test_save_path / Path("FirstDataset") / scan_name / Path(scan_name)
np.save(path, affine)
with open(str(path) + '.json', "w") as write_file:
json.dump(shape, write_file)
elif dataset_number == 2:
scan_name = scan.name
raw_volume, affine = load_raw_volume(scan / Path("T1w.nii.gz"))
shape = {"x": raw_volume.shape[0],
"y": raw_volume.shape[1],
"z": raw_volume.shape[2]}
x_scans = get_axes_slices_from_volume(raw_volume=raw_volume, axis=axis)
x_scans = normalize_slice_values(x_scans)
print(f"\r Saving scan: {scan_name}")
for scan_number, scan in enumerate(x_scans):
path = base_test_save_path / Path("SecondDataset") / scan_name / Path("images") / Path(scan_name + str("{0:03}".format(scan_number)) + ".png")
save_image_to_png(scan, path)
path = base_test_save_path / Path("SecondDataset") / scan_name / Path(scan_name)
np.save(path, affine)
with open(str(path) + '.json', "w") as write_file:
json.dump(shape, write_file)
def get_axes_slices_from_volume(raw_volume: np.ndarray, axis="x"):
"""
Returns slices from given axis
:param axis: axis from which return slice
:param raw_volume: Scan volume
:return: list of slices from given axis
"""
if axis == "x":
x_slices = []
for current_slice in range(raw_volume.shape[0]):
x_slices.append(raw_volume[current_slice])
return x_slices
elif axis == "y":
y_slices = []
for current_slice in range(raw_volume.shape[1]):
y_slices.append(raw_volume[:, current_slice])
return y_slices
elif axis == "z":
z_slices = []
for current_slice in range(raw_volume.shape[2]):
z_slices.append(raw_volume[:, :, current_slice])
return z_slices
else:
raise ValueError("Only x, y, or z axis is available")
def normalize_slice_values(scan_slices: list):
"""
Normalizes slice image values for each axis to range 0-255 and dtype np.uint8
:param scan_slices: list of slices for each axis
:return: normalized list of slices for each axis
"""
xyz_scan_slices = []
for ax_scan_slice in scan_slices:
data = ((ax_scan_slice.astype(np.float32)) / np.max(ax_scan_slice))
data = data * 255
data = data.astype(np.uint8)
xyz_scan_slices.append(data)
return xyz_scan_slices
def save_image_to_png(image: np.ndarray, save_path: Path):
"""
Saves image to .png format on given path
:param image: image to save
:param save_path: path where image will be saved
:return:
"""
try:
img = Image.fromarray(image)
img.save(save_path, format="PNG", optimize=False, compress_level=0)
except FileNotFoundError as e:
save_path.parent.mkdir(parents=True, exist_ok=True)
img = Image.fromarray(image)
img.save(save_path, format="PNG", optimize=False, compress_level=0)
def load_affine(affine_path: Path):
"""
Load affine matrix from specific path
:param affine_path: path to file with affine matrix
:return: affine matrix
"""
affine = np.load(str(affine_path))
return affine
|
# Copyright 2018 Takahiro Ishikawa. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
from clispy.type.sequence import *
from clispy.type.array import *
class UnitTestCase(unittest.TestCase):
def testArrayObjectRegistry(self):
na = np.array([1, 2, 3])
nb = np.array([1, 2, 3])
a = Array(na)
b = Array(na)
c = Array(nb)
self.assertTrue(a is b)
self.assertFalse(a is c)
def testArray(self):
na = np.array([1, 2, 3])
a = Array(na)
self.assertIsInstance(a, T)
self.assertIsInstance(a, Array)
self.assertEqual(str(a), '#(1 2 3)')
self.assertTrue(a.value is na)
def testArrayTowDimensional(self):
nb = np.array([[1, 2, 3], [4, 5, 6]])
b = Array(nb)
self.assertIsInstance(b, T)
self.assertIsInstance(b, Array)
self.assertEqual(str(b), '#2A((1 2 3) (4 5 6))')
self.assertTrue(b.value is nb)
def testArrayMultiDimensional(self):
nc = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
c = Array(nc)
self.assertIsInstance(c, T)
self.assertIsInstance(c, Array)
self.assertEqual(str(c), '#3A(((1 2) (3 4)) ((5 6) (7 8)))')
self.assertTrue(c.value is nc)
def testArrayTypeOf(self):
na = np.array([1, 2, 3])
a_t = Array(na).type_of()
self.assertIsInstance(a_t, Symbol)
self.assertEqual(a_t.value, 'ARRAY')
def testArrayClassOf(self):
na = np.array([1, 2, 3])
a_c = Array(na).class_of()
self.assertIsInstance(a_c, BuiltInClass)
self.assertIsInstance(a_c.type_of(), Symbol)
def testVectorObjectRegistry(self):
va = np.array([1, 2, 3])
vb = np.array([1, 2, 3])
a = Vector(va)
b = Vector(va)
c = Vector(vb)
self.assertTrue(a is b)
self.assertFalse(a is c)
def testVector(self):
va = np.array([1, 2, 3])
v = Vector(va)
self.assertIsInstance(v, T)
self.assertIsInstance(v, Array)
self.assertIsInstance(v, Sequence)
self.assertIsInstance(v, Vector)
self.assertEqual(str(v), '#(1 2 3)')
self.assertTrue(v.value is va)
def testVectorTypeOf(self):
va = np.array([1, 2, 3])
v_t = Vector(va).type_of()
self.assertIsInstance(v_t, Symbol)
self.assertEqual(v_t.value, 'VECTOR')
def testVectorClassOf(self):
va = np.array([1, 2, 3])
v_c = Vector(va).class_of()
self.assertIsInstance(v_c, BuiltInClass)
self.assertIsInstance(v_c.type_of(), Symbol)
def testStringObjectRegistry(self):
a = String("string_a")
b = String("string_a")
c = String("string_b")
self.assertTrue(a is b)
self.assertFalse(a is c)
def testString(self):
s = String('string')
self.assertIsInstance(s, T)
self.assertIsInstance(s, Array)
self.assertIsInstance(s, Sequence)
self.assertIsInstance(s, Vector)
self.assertIsInstance(s, String)
self.assertEqual(str(s), '"string"')
self.assertEqual(s.value, 'string')
|
from .custom_library import CustomLibrary
from .feature_library import ConcatLibrary
from .fourier_library import FourierLibrary
from .identity_library import IdentityLibrary
from .polynomial_library import PolynomialLibrary
__all__ = [
"ConcatLibrary",
"CustomLibrary",
"FourierLibrary",
"IdentityLibrary",
"PolynomialLibrary",
]
|
from fastapi import FastAPI
from fastapi import status
from dynaconf import settings
from src import db
from src import schemas
API_URL = "/api/v1"
app = FastAPI(
description="example of API based on FastAPI and SqlAlchemy frameworks",
docs_url=f"{API_URL}/docs/",
openapi_url=f"{API_URL}/openapi.json",
redoc_url=f"{API_URL}/redoc/",
title="TMS API",
version="1.0.0",
)
@app.post(f"{API_URL}/blog/post/", status_code=status.HTTP_201_CREATED)
async def new_post(payload: schemas.PostApiSchema) -> schemas.PostApiSchema:
new_post = payload.data
obj = db.create_post(new_post)
(obj, nr_likes) = db.get_single_post(obj.id)
post = schemas.PostSchema(
id=obj.id,
author_id=obj.author_id,
content=obj.content,
nr_likes=str(nr_likes),
)
response = schemas.PostApiSchema(data=post)
return response
@app.get(f"{API_URL}/blog/post/")
async def all_posts() -> schemas.PostListApiSchema:
objects = db.get_all_posts()
posts = [
schemas.PostSchema(
id=post.id,
author_id=post.author_id,
content=post.content,
nr_likes=nr_likes,
)
for (post, nr_likes) in objects
]
response = schemas.PostListApiSchema(data=posts)
return response
@app.get(f"{API_URL}/blog/post/{{post_id}}")
async def single_post(post_id: int) -> schemas.PostApiSchema:
response_kwargs = {}
(obj, nr_likes) = db.get_single_post(post_id)
if obj:
response_kwargs["data"] = schemas.PostSchema(
id=obj.id,
author_id=obj.author_id,
content=obj.content,
nr_likes=nr_likes,
)
else:
response_kwargs["errors"] = [f"post with id={post_id} does not exist"]
response = schemas.PostApiSchema(**response_kwargs)
return response
@app.get(f"{API_URL}/user/")
async def all_users() -> schemas.UserListApiSchema:
objects = db.get_all_users()
users = [
schemas.UserSchema(
id=user.id,
username=user.username,
email=user.email,
)
for user in objects
]
response = schemas.UserListApiSchema(data=users)
return response
@app.get(f"{API_URL}/user/{{user_id}}")
async def single_user(user_id: int) -> schemas.UserApiSchema:
response_kwargs = {}
obj = db.get_single_user(user_id)
if obj:
response_kwargs["data"] = schemas.UserSchema(
id=obj.id,
username=obj.username,
email=obj.email,
)
else:
response_kwargs["errors"] = [f"user with id={user_id} does not exist"]
response = schemas.UserApiSchema(**response_kwargs)
return response
"""async def like(post_id: int) -> MyApiResponseSchema:
resp = MyApiResponseSchema()
try:
with closing(Session()) as session:
# post = session.query(Post).filter(Post.id == post_id).first()
if 1:
# post.nr_likes += 1
# session.add(post)
# session.commit()
like = LikeSchema(post_id=post_id, nr_likes=1)
resp.ok = True
resp.data = {"like": like}
else:
resp.errors = [f"post with id={post_id} was not found"]
except Exception as err:
resp.errors = [str(err), f"unhandled exception: {traceback.format_exc()}"]
raise
return resp"""
if __name__ == "__main__" and settings.MODE_DEBUG:
import uvicorn
uvicorn.run(app, host="127.0.0.1", port=8888)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Transient Scanning Technique implementation
# Sebastien Lemaire: <sebastien.lemaire@soton.ac.uk>
import numpy as np
import multiprocessing as mp
from matplotlib import pyplot
class pyTST:
"""
Class performing and processing the Transient Scanning Technique
To get a TST one can either:
* Load signal from a file
>>> tst.load_data_file(filename, signal_col=1)
>>> tst.compute_TST()
* Load signal from an array
>>> tst.load_data_array(signal_array)
>>> tst.compute_TST()
* Load directly from a txt file (previously exported with tst.export_to_txt)
>>> tst.import_from_txt(filename)
Once the TST is computed or loaded, one can either plot it or export it to file
>>> tst.plot()
>>> tst.export_to_txt(filename)
"""
def load_data_array(self, signal_array, time_array=None, tstep=1):
"""
Load time signal data from python array
Parameters
----------
signal_array : array of float
signal to use for the TST
time_array : array of float, optional
time stamps assiciated with signal_array
tstep : float, optional
time step used when time_array is not provided
"""
self.signal_array = signal_array
if time_array is None:
self.time_array = (np.array(range(len(self.signal_array)))+1)*tstep
else:
self.time_array = time_array
def load_data_file(self, filename, signal_column, time_column=None, tstep=1, **kwargs):
"""
Load time signal data text file
Parameters
----------
filename : str
filename where the data is located
signal_column : int
index of the column where the signal is located
time_column : int, optional
index of the time column in the file
tstep : float, optional
multiplier for the time_column, useful to convert a counter column to real time steps,
or timestep used when time_column is not provided
**kwargs
any other parameter is passed directly to numpy.loadtxt
"""
if time_column is None:
usecols = (signal_column, )
else:
usecols = (signal_column, time_column)
timedata = np.loadtxt(filename, usecols=usecols, **kwargs)
if time_column is None:
self.signal_array = timedata
self.time_array = (np.array(range(len(self.signal_array)))+1)*tstep
else:
self.signal_array = timedata[:, 0]
self.time_array = timedata[:, 1]*tstep
def compute_TST(self, step_size=10, analyse_end=False, nproc=None):
"""
Actual computation of the Transient Scanning Technique
Parameters
----------
step_size : int, optional
size of the steps for the TST, data length/step_size computations will be performed
analyse_end : bool, optional
analyse the end of the signal instead of the begining, (TST-B instead of TST-A)
nproc : int, optional
number of process to use for the parallel computation,
if not provided the maximum available will be used
"""
if analyse_end:
data = self.signal_array[::-1]
time = self.time_array[::-1]
else:
data = self.signal_array
time = self.time_array
if nproc is None:
nproc = mp.cpu_count()
if step_size is None:
step_size = 10
nb_slices = int(np.ceil(len(data)/step_size))
print("{} slices to compute".format(nb_slices))
pool = mp.Pool(processes=nproc)
result = pool.map(variance_stats,
[ data[step_size*istart:] for istart in range(nb_slices) ],
chunksize=max(2, int(nb_slices/nproc/10)))
pool.close()
pool.join()
self.mean_array = [ row[0] for row in result ]
self.u95_array = [ row[1] for row in result ]
self.step_time_array = np.array([ time[step_size*istart] for istart in range(nb_slices) ])
if analyse_end:
self.step_time_array = self.step_time_array[::-1]
def export_to_txt(self, filename):
"""
Export computed data to text file, can be loaded with import_from_txt
Parameters
----------
filename : str
filename of the file to save
"""
export_array = np.column_stack((self.step_time_array, self.u95_array, self.mean_array),)
np.savetxt(filename, export_array,
header="t, u95, mean")
def import_from_txt(self, filename):
"""
Import data that was exported with export_to_txt
Parameters
----------
filename : str
filename of the file to import
"""
timedata = np.loadtxt(filename)
self.step_time_array = timedata[:, 0]
self.u95_array = timedata[:, 1]
self.mean_array = timedata[:, 2]
def plot(self, filename=None, interactive=True):
"""
Plot the TST results previously computed
Parameters
----------
filename : str, optional
if provided, the plot will be exported to filename
interactive : bool, optional
True if the signal is also ploted and the discarded time is highlighted in orange,
double clicking on the plot will move the cursor and update the signal plot
"""
if interactive:
fig, (ax1, ax2) = pyplot.subplots(2,1)
else:
fig, ax2 = pyplot.subplots()
# Display the grid (t, 1/t)
grid_t = np.array([self.step_time_array[0]/2, self.step_time_array[-1]*2])
for i in range(-20,20):
factor = 10**(i/2)
ax2.loglog(grid_t,
factor/grid_t,
color='grey', alpha=0.5, linewidth=0.5)
if interactive:
def update_cursor(index):
min_u95 = self.u95_array[-index-1]
discard_time = self.step_time_array[-1] - self.step_time_array[index]
hline.set_ydata(min_u95)
vline.set_xdata(self.step_time_array[index])
text.set_text('u95={:2e}\nt={}'.format(min_u95, discard_time))
print("t={}, mean={:e} ± {:e}".format(discard_time, self.mean_array[-index-1], min_u95))
split_index = np.searchsorted(self.time_array, discard_time)
ax1_vertline.set_xdata(self.time_array[split_index])
ax1_startup_signal.set_xdata(self.time_array[0:split_index])
ax1_startup_signal.set_ydata(self.signal_array[0:split_index])
ax1_rest_signal.set_xdata(self.time_array[split_index:])
ax1_rest_signal.set_ydata(self.signal_array[split_index:])
ax2_startup_signal.set_xdata(self.step_time_array[index:])
ax2_startup_signal.set_ydata(self.u95_array[(self.step_time_array.size-index-1)::-1])
ax2_rest_signal.set_xdata(self.step_time_array[:index])
ax2_rest_signal.set_ydata(self.u95_array[:(self.step_time_array.size-index-1):-1])
def onclick(event):
# only act on double click
if not event.dblclick:
return
if event.inaxes == ax2:
index = min(np.searchsorted(self.step_time_array, event.xdata), len(self.step_time_array) - 1)
elif event.inaxes == ax1:
index = min(np.searchsorted(self.step_time_array, self.step_time_array[-1] - event.xdata), len(self.step_time_array) - 1)
else:
return
update_cursor(index)
pyplot.draw()
# Signal input
ax1_vertline = ax1.axvline(0, color='k', lw=0.8, ls='--', alpha=0.6)
ax1_startup_signal = ax1.plot([], [], color='C1', alpha=0.8)[0]
ax1_rest_signal = ax1.plot([], [], color='C0')[0]
# TST plot
hline = ax2.axhline(color='k', lw=0.8, ls='--', alpha=0.6)
vline = ax2.axvline(color='k', lw=0.8, ls='--', alpha=0.6)
ax2_startup_signal = ax2.loglog([], [], color='C1', alpha=0.8)[0]
ax2_rest_signal = ax2.loglog([], [], color='C0')[0]
text = ax1.text(0.05, 1.1, '', transform=ax1.transAxes)
update_cursor(np.argmin(self.u95_array[::-1]))
cid = fig.canvas.mpl_connect('button_press_event', onclick)
ax2.set_ylim(top=np.max(self.u95_array)*2,
bottom= np.min(self.u95_array)/2)
ax2.set_xlim(right=self.step_time_array[-1]*2,
left=self.step_time_array[0]/2)
ax2.set_xlabel("t")
ax2.set_ylabel("95% uncertainty (u95)")
pyplot.tight_layout()
if interactive:
ax1.set_xlabel("t")
ax1.set_ylabel("signal")
ax1.set_xlim(right=self.time_array[-1],
left=self.time_array[0])
ax1.set_ylim(top=max(self.signal_array),
bottom=min(self.signal_array))
if filename is None:
pyplot.show()
else:
print("Figure exported to {}".format(filename))
pyplot.savefig(filename)
if interactive:
return fig, (ax1, ax2)
else:
return fig, ax2
def variance_stats(data):
"""
Calculate variance statistics based on:
Brouwer, J., Tukker, J., & van Rijsbergen, M. (2013). Uncertainty Analysis of Finite Length Measurement Signals. The 3rd International Conference on Advanced Model Measurement Technology for the EU Maritime Industry, February.
Brouwer, J., Tukker, J., & van Rijsbergen, M. (2015). Uncertainty Analysis and Stationarity Test of Finite Length Time Series Signals. 4th International Conference on Advanced Model Measurement Technology for the Maritime Industry.
Brouwer, J., Tukker, J., Klinkenberg, Y., & van Rijsbergen, M. (2019). Random uncertainty of statistical moments in testing: Mean. Ocean Engineering, 182(April), 563–576. https://doi.org/10.1016/j.oceaneng.2019.04.068
Parameters
----------
data : array of float
time signal to get the variance uncertainty from
Returns
----------
mean: float
mean of the signal
u95: float
95% confidence bound (1.96* expected standard deviation of the mean)
"""
N = len(data)
mean = np.mean(data)
# Estimate autocovariance
Sxx = (np.abs(np.fft.fft(data - mean, N*2))**2)/N # autospectral density function
Cxx = np.fft.ifft(Sxx) # autocovariance function (Wiener-Khinchine)
Cxx = np.real(Cxx[0:N]) # crop till Nyquist point
# Variance estimators
iArr = np.abs(range(1-N,N)) # indexing for -T to T integral
var_x_av = 0.5/N*np.sum((1.0 - iArr*1.0/N)*Cxx[iArr]) # variance estimate for mean value (including factor 0.5 for bias correction)
# expanded uncertainty factor for normal distribution with 95% confidence
u95 = 1.96*np.sqrt(var_x_av)
return mean, u95
|
import json
carroJson = '{"marca": "honda", "modelo": "HRV", "cor": "prata"}' #isso é um json
print(carroJson)
carros = json.loads(carroJson) #e aqui converte de json para dictionary
print(carros)
print(carros['marca'])
print(carros['modelo'])
for x, y in carros.items():
print(f'{x} = {y}')
print()
print()
print()
carrosDictionary = { #isso é um dictionary
"marca": "honda",
"modelo": "HRV",
"cor": "prata"
}
carrosJson = json.dumps(carros) #e aqui converte de dictionary para json
print(carrosJson)
|
import turtle as t
zel = float(input("What is your Zel: "))
for i in range(4):
t.fd(zel)
t.lt(90)
t.done()
|
# Copyright (c) 2015 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from collections import OrderedDict
from django.urls import reverse
from silver.tests.api.specs.plan import spec_plan
from silver.tests.api.utils.path import absolute_url
def spec_subscription(subscription, detail=False):
return OrderedDict([
("id", subscription.id),
("url", absolute_url(reverse("subscription-detail", args=[subscription.customer.id,
subscription.id]))),
("plan", (spec_plan(subscription.plan) if detail else
absolute_url(reverse("plan-detail", args=[subscription.plan.id])))),
("customer", absolute_url(reverse("customer-detail", args=[subscription.customer.id]))),
("trial_end", str(subscription.trial_end) if subscription.trial_end else None),
("start_date", str(subscription.start_date) if subscription.start_date else None),
("cancel_date", str(subscription.cancel_date) if subscription.cancel_date else None),
("ended_at", str(subscription.ended_at) if subscription.ended_at else None),
("state", subscription.state),
("reference", subscription.reference),
("updateable_buckets", subscription.updateable_buckets()),
("meta", subscription.meta),
("description", subscription.description)
])
|
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
import sklearn
from sklearn.model_selection import train_test_split
# from sklearn import svm
# from sklearn.linear_model import LinearRegression
import seaborn as sns
# TODO:Loading dataset
df = pd.read_csv('data/boston/housing.data',
sep='\s+',
header=None)
# Setting columns to dataset
lab_CRIM = 'Per capita crime rate by town'
lab_ZN = 'Proportion of residential land zoned for lots over 25,000 sq.ft.'
lab_INDUS = 'Proportion of non-retail business acres per town'
lab_CHAS = 'Charles River dummy variable (1 if tract bounds river; 0 otherwise)'
lab_NOX = ' nitric oxides concentration (parts per 10 million)'
lab_RM = 'Average number of rooms per dwelling'
lab_AGE = 'Proportion of owner-occupied units built prior to 1940'
lab_DIS = 'Weighted distances to five Boston employment centres'
lab_RAD = 'Index of accessibility to radial highways'
lab_TAX = 'Full-value property-tax rate per $10,000'
lab_PTRATIO = 'Pupil-teacher ratio by town'
lab_B = '1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town'
lab_LSTAT = ' % lower status of the population'
lab_MEDV = 'Median value of owner-occupied homes in $1000 '
df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
# TODO:Creating figure
prices = df.drop('MEDV', axis=1)
features = df['MEDV']
print(f'Dataset X shape: {prices.shape}')
print(f'Dataset y shape: {features.shape}')
# splitting the dataset into : train and test
x_train, x_test, y_train, y_test = train_test_split(prices, features, test_size = 0.4 , random_state = 0)
#TODO 0.40 means 40% of the dataset
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)
# TODO: Print splited the dataset
print(f"x_train.shape: {x_train.shape}, y_train.shape: {y_train.shape}")
print(f"x_test.shape: {x_test.shape}, y_test.shape: {y_test.shape}")
|
class AudioFile:
def __init__(self, filename):
if not filename.endswith(self._ext):
raise Exception("Invalid file format")
self._filename = filename
def play(self):
raise NotImplementedError("Not implemented")
class MP3File(AudioFile):
_ext ="mp3"
def play(self):
print("Playing {} as mp3".format(self._filename))
class WavFile(AudioFile):
_ext ="wav"
def play(self):
print("Playing {} as wav".format(self._filename))
class OggFile(AudioFile):
_ext ="ogg"
def play(self):
print("Playing {} as ogg".format(self._filename))
|
# Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract
# DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC, the U.S. Government
# retains certain rights in this software.
import RemoteComputationInterface
import subprocess
import os
import configparser
import threading
import time
class RemoteSlurmComputation(RemoteComputationInterface):
def connect(host, username=None, password=None, token=None):
pass
def get_session(sid):
pass
def disconnect(sid):
pass
def run_command(command):
results = {}
command = command.split(' ')
p = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
results["output"], results["errors"] = p.communicate
return results
def generate_job_file(params):
with open(params["job_file_path"]) as job_file:
job_file.write("#!/bin/bash\n\n")
job_file.write("#SBATCH --account=%s\n" % params["account_id"])
job_file.write("#SBATCH --job-name=%s\n" % params["job_name"])
job_file.write("#SBATCH --partition=%s\n\n" % params["partition"])
job_file.write("#SBATCH --nodes=%s\n" % params["number_of_nodes"])
job_file.write("#SBATCH --ntasks-per-node=%s\n" % params["number_of_tasks_per_node"])
job_file.write("#SBATCH --time=%s:%s:%s\n" % (params["time_hours"], params["time_minutes"], params["time_seconds"]))
for c in params["commands"]:
job_file.write("%s\n" % c)
def submit_job(job_file_path):
results = run_command("sbatch %s" % job_file_path)
jid = [int(s) for s in results["output"].split() if s.isdigit()][0]
return jid
def check_job(jid):
results = run_command("checkjob %s" % jid)
status = "UNKNOWN"
for line in results["output"]:
if "State" in line:
try:
status = line.split(':')[1].strip().upper()
except Exception as e:
status = "UNKNOWN"
break
return status
def check_job_thread(interval, jid, success, fail, logger, stop_event):
retry_count = 5
while True:
try:
status = check_job(jid)
except Exception as e:
logger("Something went wrong while checking on job %s status, trying again..." % jid)
retry_counter = retry_counter - 1
if retry_counter == 0:
fail("Something went wrong while checking on job %s status: check for the generated files when the job completes" % jid)
stop_event.set()
break
status = "ERROR"
time.sleep(60)
pass
logger("Job %s returned with status %s" % (jid, status))
if status == "RUNNING":
retry_counter = 5
if status == "CANCELLED" or status == "REMOVED":
fail("Job %s was cancelled" % jid)
stop_event.set()
break
if status == "VACATED":
fail("Job %s was vacated due to system failure" % jid)
stop_event.set()
break
if status == "REMOVED":
fail("Job %s was removed by the scheduler due to exceeding walltime or violating another policy" % jid)
stop_event.set()
break
if status == "COMPLETED":
success()
stop_event.set()
break
if status == "FAILED" or status == "UNKNOWN" or status == "NOTQUEUED":
retry_counter = retry_counter - 1
if retry_counter == 0:
fail("Job %s has failed" % jid)
stop_event.set()
break
# in case something went wrong and still willing to try, wait for 30
# seconds and try another check
time.sleep(30)
# interval between each of the checks
time.sleep(interval)
def check_job_loop(interval, jid, success, fail, logger):
stop_event = threading.Event()
t = threading.Thread(target=check_job_thread, args=(interval, jid, success, fail, logger, stop_event))
t.start()
def cancel_job(jid):
results = run_command("scancel %s" % jid)
# TODO check results["errors"] for actual errors, if any return False
# instead
return True
def pause_job(jid):
results = run_command("scontrol suspend %s" % jid)
# TODO check results["errors"] for actual errors, if any return False
# instead
return True
def resume_job(jid):
results = run_command("scontrol resume %s" % jid)
# TODO check results["errors"] for actual errors, if any return False
# instead
return True
def get_job_output(path, jid):
f = path + "slurm-%s.out" % jid
if os.path.isFile(f):
results = run_command("cat %s" % f)
else:
return "The file %s does not exist." % f
return results["output"]
def set_slycatrc(config):
rc = os.path.expanduser('~') + ("/.slycatrc")
rc_file = open(rc, "w+")
parser = configparser.RawConfigParser()
for section_key in config:
if not parser.has_section(section_key):
parser.add_section(section_key)
section = config[section_key]
for option_key in section:
if not str(section[option_key]) == "":
parser.set(section_key, option_key, "\"%s\"" % section[option_key])
parser.write(rc_file)
rc_file.close()
# TODO if anything goes wrong return false instead
return True
def get_slycatrc():
results = {}
rc = os.path.expanduser('~') + ("/.slycatrc")
if os.path.isfile(rc):
try:
parser = configparser.RawConfigParser()
parser.read(rc)
config = { section: { key: eval(value) for key, value in parser.items(section) } for section in parser.sections() }
results["ok"] = True
results["config"] = config
except Exception as e:
results["ok"] = False
results["errors"] = "%s" % e
else:
results["ok"] = False
results["errors"] = "The user does not have a .slycatrc file under their home directory"
return results
|
import os
import shutil
import json
import logging
from django.conf import settings
from django.core.mail import send_mail, EmailMessage
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from mptt.models import MPTTModel, TreeForeignKey
from mptt.utils import tree_item_iterator
from . import toolkit as utk
from . import global_setting as gs
__author__ = 'jbui'
log = logging.getLogger(__name__)
def user_file_path(instance, filename):
"""
User File Path
:param instance:
:param filename:
:return:
"""
d = timezone.now()
return '{0}/{1}/{2}/{3}/{4}'.format(
instance.user.id,
d.year,
d.month,
d.day,
filename)
class TreeFolder(MPTTModel):
"""
Tree folder is used to link a file tree structure that is used to replicate what will be stored in
the servers. The user will create a new folder (or remove) and then progress afterwards.
:type name: folder name
:type parent: parent key
:type user: user model
:type is_locked: If folder/files locked from changes.
:type created: created date
:type modified: modified date
"""
name = models.CharField(max_length=255)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', default=0)
user = models.ForeignKey(settings.AUTH_USER_MODEL)
is_locked = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True, null=False, blank=True)
modified = models.DateTimeField(auto_now_add=True, null=False, blank=True)
def __str__(self):
"""
:return:
"""
return 'Folder: %s' % self.name
def save(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
self.modified = timezone.now()
super(TreeFolder, self).save(*args, **kwargs)
class MPTTMeta:
"""
That MPTTMeta class adds some tweaks to django-mptt - in this case, just order_insertion_by.
This indicates the natural ordering of the data in the tree.
"""
order_insertion_by = ['name']
def get_file_type(self):
"""
Return the folder file type.
:return:
"""
if hasattr(self, 'projectfolder'):
return self.projectfolder.get_file_type()
else:
return 'folder'
def is_valid(self, error, **kwargs):
"""
Is valid for the user.
:param error:
:param kwargs:
:return:
"""
valid = True
# Need to make sure that the parent folder key is the same user as the current folder key.
if self.parent:
if self.parent.user != self.user:
valid = False
error['user'] = 'Folder does not belong to user.'
if kwargs.get('path'):
parent = TreeProfile.get_tree_folder(self.user, kwargs.get('path'))
if not parent:
valid = False
error['path'] = '%s is not valid' % kwargs.get('path')
else:
self.parent = parent
name = kwargs.get('name')
if parent and name:
# Path already exists.
for folder in parent.get_children():
if folder.name == name:
error['name'] = 'Path already exists: %s%s%s' % (parent.virtual_folder, os.pathsep, name)
valid = False
return valid
def get_path(self):
"""
Get the path of the folder including the home folder.
:return:
"""
path = self.name
new_folder = self.parent
while new_folder:
path = os.path.join(new_folder.name, path)
new_folder = new_folder.parent
return path
@property
def virtual_folder(self):
"""
Return the virtual folder of the path.
:return:
"""
folders = [self.name]
new_folder = self.parent
while new_folder:
folders.append(new_folder.name)
new_folder = new_folder.parent
path = ""
for name in folders[:-1]:
path = os.path.join(name, path)
return path
def create_folder(self):
"""
Create the folder of the path.
"""
path = os.path.join(gs.LOCATION_USER_STORAGE, self.get_path())
if not os.path.isdir(path):
os.mkdir(path)
def delete_folder(self):
"""
Get the path with the delete folder.
"""
path = os.path.join(gs.LOCATION_USER_STORAGE, self.get_path())
if os.path.isdir(path):
shutil.rmtree(path)
self.delete()
class TreeProfile(models.Model):
"""
Tree Profile is used to link with django user. This gives the user the ability to create a MPTT file structure
in the database quickly.
The User Profile model inherits from Django's Model class and linked to the base User class through a one-to-one
relationship.
:type user: user model
:type root_folder: folder root
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL)
root_folder = models.ForeignKey(TreeFolder, null=True, blank=True, default=True)
def __str__(self):
return self.user.username
def get_children(self):
"""
Get children
:return:
"""
root = self.root_folder
return utk.tree_item_to_dict(root)
def get_jstree(self):
"""
:return:
"""
# { id : 'ajson1', parent : '#', text : 'Simple root node', state: { opened: true} },
# { id : 'ajson2', parent : '#', text : 'Root node 2', state: { opened: true} },
# { id : 'ajson3', parent : 'ajson2', text : 'Child 1', state: { opened: true} },
# { id : 'ajson4', parent : 'ajson2', text : 'Child 2' , state: { opened: true}}
root = self.root_folder
jstree = [dict(
id=root.id,
parent='#',
text=root.name,
state=dict(opened=True)
)]
utk.jstree_item_to_dict(root, jstree)
return jstree
@staticmethod
def get_tree_folder(user, path):
"""
Get the tree folder given the path.
:param user:
:param path:
:return:
"""
folder = None
uprof = TreeProfile.objects.get(user=user)
root_folder = uprof.root_folder
if root_folder:
folder = root_folder
paths = utk.split_path(path)
for folder_name in paths[:]:
if folder_name == '' or folder_name == user.username:
continue
else:
for cur_folder in folder.get_children():
if cur_folder.name == folder_name:
folder = cur_folder
# Found the folder, so we leave the folder.
break
# If we can't find the folder, then we exit loop.
if not folder:
return None
return folder
@property
def root_path(self):
"""
Root path.
:return:
"""
return self.root_folder.name
@property
def root_virtual_path(self):
"""
Root virtual path.
:return:
"""
return os.path.join(self.root_folder.name)
def create_root(self):
"""
Create a root node in the database, and the folder in the storage disk.
"""
self.root_folder = TreeFolder.objects.create(user=self.user, name='root', parent=None)
def delete_root(self):
"""
Delete the root folder with everything underneath.
"""
pass
def create_tree_folder(self, name, parent):
"""
Create tree folder.
:param name: Name of folder
:param parent: Parent tree folder.
:return:
"""
folder = TreeFolder.objects.create(name=name, user=self.user, parent=parent)
folder.save()
return folder
def create_folder(self, path, force_path=True):
"""
Given a path, create a TreeFolder.
:param path: path of the folder to create.
:param force_path: if the intermediary folder does not exists, create it
"""
texts = utk.split_path(path)
new_folder = self.root_folder
folder_path = self.root_path
for folder in texts[1:]:
# Look inside the storage to see if the system has the folder.
folder_found = False
# Get the folders item.
for folder_item in new_folder.get_children():
if folder_item.name == folder:
new_folder = folder_item
if utk.is_dir(folder_path, folder):
folder_path = os.path.join(folder_path, folder)
folder_found = True
else:
if force_path:
folder_path = utk.make_dir(folder_path, folder)
folder_found = True
else:
return False
# Exit loop
break
# If there is no children folder - force the folder create.
if not folder_found:
if force_path:
# Create a new folder.
new_folder = TreeFolder.objects.create(name=folder, parent=new_folder, is_locked=False)
folder_path = utk.make_dir(folder_path, folder)
else:
return False
return True
def delete_folder(self, folder):
"""
Delete a folder given a path.
:param folder: path of the folder to delete.
"""
if isinstance(folder, TreeFolder):
trash = Trash.objects.create(profile=self, folder=folder, previous_folder=folder.parent.id)
trash.save()
folder.parent = None
folder.save()
else:
#TODO: Check if it's a primary key
#TODO: Otherwise check if it's a path.
pass
return True
def get_folder(self, path):
"""
Return the tree folder given the path.
:param path:
:return:
"""
folder_names = utk.split_path(path)
folder = self.root_folder
for name in folder_names[1:]:
for folder_child in folder.get_children():
if folder_child.name == name:
folder = folder_child
pass
return folder
def get_path(self, path):
"""
Pass a path and then we parse it to the real path.
:param path:
:return:
"""
texts = utk.split_path(path)
texts[0] = self.root_folder.name
return os.sep.join(texts)
def get_folder_json(self, show_files):
"""
Get the json folder structure.
:param show_files:
:return:
"""
data = {
'data': utk.tree_item_to_dict(self.root_folder, show_files)
}
# Change the first root node label to the current user name.
data['data']['text'] = self.user.username
return json.dumps(data)
class ProjectFolder(TreeFolder):
"""
Project folder.
:type app_type: application type
"""
app_type = models.IntegerField(choices=gs.JOB_TYPE, default=1)
def get_file_type(self):
"""
Return the folder file type.
:return:
"""
return 'project_folder'
def get_path(self):
"""
Get the path of the folder including the home folder.
:return:
"""
path = self.name
new_folder = self.parent
while new_folder:
path = os.path.join(new_folder.name, path)
new_folder = new_folder.parent
return path
class TreeFile(models.Model):
"""
Parent tree file for application type file.
File will only exists within project folders, ensuring that there is no subdirectory outside
of the the project folder app.
:type name:
:type user:
:type folder: project folder model
:type is_executable: check if the files is executable.
:type is_locked: folder/files locked from changes.
:type created:
:type modified:
"""
name = models.CharField(max_length=255, null=True, blank=True)
user = models.ForeignKey(User)
folder = models.ForeignKey(ProjectFolder, null=True, blank=True)
is_executable = models.BooleanField(default=False, blank=True)
is_locked = models.BooleanField(default=False)
created = models.DateTimeField(null=False, blank=True, auto_now_add=True)
modified = models.DateTimeField(null=False, blank=True, auto_now_add=True)
def __str__(self):
return 'File: %s' % self.name
class Meta:
abstract = True
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
# Update modified date.
self.modified = timezone.now()
super(TreeFile, self).save(force_insert=force_insert, force_update=force_update, using=using,
update_fields=update_fields)
def is_valid(self, error, **kwargs):
return True
@property
def real_path(self):
"""
Find the real path of the code.
:return:
"""
return os.path.join(gs.LOCATION_USER_STORAGE, self.folder.get_path(), self.get_file_name())
@property
def virtual_path(self):
"""
Virtual path.
:return:
"""
return os.path.join(self.folder.get_path(), self.get_file_name())
def create_file(self):
"""
Create a new file.
"""
root_folder = self.folder
def get_file_name(self):
"""
Base class needs to override this method.
OVERRIDE THIS METHOD
:return:
"""
return self.name
def delete_file(self):
pass
class Trash(models.Model):
"""
Trash folder.
:type profile: one-to-many relationship
:type prev: original parent folder
"""
profile = models.ForeignKey(TreeProfile, on_delete=models.CASCADE)
prev = models.ForeignKey(TreeFolder, null=True, blank=True)
class InputFile(TreeFile):
"""
Input File.
:type name:
:type user:
:type folder: project folder model - one input file equals to one project folder
:type is_executable: check if the files is executable.
:type is_locked: folder/files locked from changes.
:type created:
:type modified:
"""
name = models.CharField(max_length=255, null=True, blank=True)
user = models.ForeignKey(User)
folder = models.OneToOneField(ProjectFolder, on_delete=models.CASCADE)
is_executable = models.BooleanField(default=False, blank=True)
is_locked = models.BooleanField(default=False)
created = models.DateTimeField(null=False, blank=True, auto_now_add=True)
modified = models.DateTimeField(null=False, blank=True, auto_now_add=True)
def header(self):
return "#!^%s^!#"
def folder_name(self):
return "%s_%d_%s" % (self.header(), self.id, self.name)
class Meta:
abstract = True
@property
def real_folder(self):
"""
Read folder.
:return:
"""
return os.path.join(gs.LOCATION_USER_STORAGE, self.folder.get_path())
@property
def virtual_folder(self):
"""
Virtual folder
:return:
"""
return os.path.join(self.folder.get_path())
@property
def real_path(self):
"""
Find the real path of the code.
:return:
"""
return os.path.join(self.real_folder, self.get_file_name())
# @property
# def virtual_path(self):
# """
# Virtual path of the input path.
# :return:
# """
# return os.path.join(self.virtual_folder, self.get_file_name())
def create_input_folder(self):
"""
Create input folder.
"""
path = self.real_folder
if not os.path.isdir(path):
os.mkdir(path)
class ImageFile(TreeFile):
"""
Create an image file.
:type file_type:
:type photo:
"""
file_type = models.IntegerField(choices=gs.IMAGE_TYPE, default=-1)
photo = models.ImageField(upload_to=user_file_path)
class GeneralFile(TreeFile):
"""
Create results field for the files that exist in the storage bin.
:type file_type:
:type file:
"""
file_type = models.IntegerField(choices=gs.FILE_TYPE, default=-1)
file = models.FileField(upload_to=user_file_path, default='default.txt')
def set_ext(self, ext_name):
"""
determine the extensions from the last name.
:param ext_name:
"""
for id, name in gs.FILE_TYPE:
if name == ext_name.lower()[1:]:
self.file_type = id
break
def get_file_name(self):
"""
Return the filename with extension.
:return:
"""
return self.name + '.' + gs.FILE_TYPE[self.file_type][1]
def get_file_type(self):
"""
Return file type.
:return:
"""
return gs.FILE_TYPE[self.file_type][1]
def get_mime(self):
"""
Return the mime type for the file.
:return:
"""
return gs.get_mime(self.file_type)
def send_message(self, email):
"""
Send message of the file.
:param email: email address
:return:
"""
subject = 'Subject here'
message = 'Here is the message'
try:
attachment = self.folder
mail = EmailMessage(subject, message, settings.DEFAULT_FROM_EMAIL, [email])
mail.send()
except SystemError:
log.error('Send Message.')
|
"""MMT_STACK Configs."""
from enum import Enum
from typing import Optional
import pydantic
class DeploymentStrategyEnum(str, Enum):
application = 'application'
pipeline = 'pipeline'
class StackSettings(pydantic.BaseSettings):
"""Application settings"""
name: str = "maap-mmt"
stage: str = "production"
owner: Optional[str]
client: Optional[str]
# AWS ECS settings
min_ecs_instances: int = 2
max_ecs_instances: int = 10
task_cpu: int = 1024
task_memory: int = 2048
# Needed for CodePipeline
codestar_connection_arn: Optional[str]
# Necessary for HTTPS load balancer
certificate_arn: str
permissions_boundary_name: Optional[str]
vpc_id: Optional[str]
deployment_strategy: DeploymentStrategyEnum
class Config:
"""model config"""
env_file = ".env"
env_prefix = "MMT_STACK_"
|
import argparse
import logging
from . import main
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
parser = argparse.ArgumentParser(
"Case study of generating a Markov chain with RNN.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"mode", choices=["train", "sample"],
help="The mode to run. Use `train` to train a new model"
" and `sample` to sample a sequence generated by an"
" existing one.")
parser.add_argument(
"save_path", default="chain",
help="The path to save the training process.")
parser.add_argument(
"--steps", type=int, default=1000,
help="Number of steps to samples.")
parser.add_argument(
"--num-batches", default=1000, type=int,
help="Train on this many batches.")
args = parser.parse_args()
main(**vars(args))
|
import os
LOG_LEVEL = os.environ.get("LOG_LEVEL", "INFO")
DROPBOX_ACCESS_TOKEN = os.environ["DROPBOX_ACCESS_TOKEN"]
DROPBOX_ROOT_FOLDER = os.environ["DROPBOX_ROOT_FOLDER"]
IFQ_USERNAME = os.environ["IFQ_USERNAME"]
IFQ_PASSWORD = os.environ["IFQ_PASSWORD"]
SLACK_WEBHOOK_URL = os.environ["SLACK_WEBHOOK_URL"]
SLACK_SIGNING_SECRET = os.environ["SLACK_SIGNING_SECRET"]
TOGGL_WORKSPACE_ID = os.environ["TOGGL_WORKSPACE_ID"]
TOGGL_USER_AGENT = os.environ["TOGGL_USER_AGENT"]
TOGGL_API_TOKEN = os.environ["TOGGL_API_TOKEN"]
SNS_COMMANDS_TOPIC_ARN = os.environ["SNS_COMMANDS_TOPIC_ARN"]
|
from binascii import hexlify
from .definitions import COMMON_PROLOGUES
class FunctionCandidate(object):
def __init__(self, binary_info, addr):
self.bitness = binary_info.bitness
self.addr = addr
rel_start_addr = addr - binary_info.base_addr
self.bytes = binary_info.binary[rel_start_addr:rel_start_addr + 5]
self.lang_spec = None
self.call_ref_sources = []
self.finished = False
self.is_symbol = False
self.is_gap_candidate = False
self.is_tailcall = False
self.alignment = 0
if addr % 4 == 0:
self.alignment = 4
elif addr % 16 == 0:
self.alignment = 16
self.analysis_aborted = False
self.abortion_reason = ""
self._score = None
self._tfidf_score = None
self._confidence = None
self.function_start_score = None
self.is_stub = False
self.is_initial_candidate = False
def setTfIdf(self, tfidf_score):
self._tfidf_score = tfidf_score
def getTfIdf(self):
return round(self._tfidf_score, 3)
def getConfidence(self):
if self._confidence is None:
# based on evaluation over Andriesse, Bao, and Plohmann data sets
weighted_confidence = 0.298 * (1 if self.hasCommonFunctionStart() else 0)
if self._tfidf_score is not None:
weighted_confidence += (
0.321 * (1 if self._tfidf_score < 0 else 0) +
0.124 * (1 if self._tfidf_score < -2 else 0) +
0.120 * (1 if self._tfidf_score < -4 else 0) +
0.101 * (1 if self._tfidf_score < -1 else 0) +
0.025 * (1 if self._tfidf_score < -8 else 0)
)
# above experiments show that multiple inbound call references are basically always indeed functions
if len(self.call_ref_sources) > 1:
self._confidence = 1.0
# initially recognized candidates are also almost always functions as they follow this heuristic
elif self.is_initial_candidate:
self._confidence = round(0.5 + 0.5 * (weighted_confidence), 3)
else:
self._confidence = round(weighted_confidence, 3)
return self._confidence
def hasCommonFunctionStart(self):
for length in sorted([int(l) for l in COMMON_PROLOGUES], reverse=True):
byte_sequence = self.bytes[:length]
if byte_sequence in COMMON_PROLOGUES["%d" % length][self.bitness]:
return True
return False
def getFunctionStartScore(self):
if self.function_start_score is None:
for length in sorted([int(l) for l in COMMON_PROLOGUES], reverse=True):
byte_sequence = self.bytes[:length]
if byte_sequence in COMMON_PROLOGUES["%d" % length][self.bitness]:
self.function_start_score = COMMON_PROLOGUES["%d" % length][self.bitness][byte_sequence]
break
self.function_start_score = self.function_start_score if self.function_start_score else 0
return self.function_start_score
def addCallRef(self, source_addr):
if source_addr not in self.call_ref_sources:
self.call_ref_sources.append(source_addr)
self._score = None
def removeCallRefs(self, source_addrs):
for addr in source_addrs:
if addr in self.call_ref_sources:
self.call_ref_sources.remove(addr)
self._score = None
def setIsTailcallCandidate(self, is_tailcall):
self.is_tailcall = is_tailcall
def setInitialCandidate(self, initial):
self.is_initial_candidate = initial
def setIsGapCandidate(self, gap):
self.is_gap_candidate = gap
def setLanguageSpec(self, lang_spec):
self.lang_spec = lang_spec
self._score = None
def setIsSymbol(self, is_symbol):
self.is_symbol = is_symbol
self._score = None
def setIsStub(self, is_stub):
self.is_stub = is_stub
self._score = None
def setAnalysisAborted(self, reason):
self.finished = True
self.analysis_aborted = True
self.abortion_reason = reason
def setAnalysisCompleted(self):
self.finished = True
def isFinished(self):
return self.finished
def calculateScore(self):
score = 0
score += 10000 if self.is_symbol else 0
score += 1000 if self.is_stub else 0
score += 100 if self.lang_spec is not None else 0
score += self.getFunctionStartScore()
num_call_refs = len(self.call_ref_sources)
if num_call_refs >= 10:
call_ref_score = 10 + int(num_call_refs / 10)
else:
call_ref_score = num_call_refs
score += 10 * call_ref_score
score += 1 if self.alignment else 0
return score
def getScore(self):
if self._score is None:
self._score = self.calculateScore()
return self._score
def __lt__(self, other):
own_score = self.getScore()
other_score = other.getScore()
if own_score == other_score:
return self.addr > other.addr
return own_score < other_score
def getCharacteristics(self):
is_aligned = "a" if self.alignment else "-"
is_finished = "f" if self.finished else "-"
is_gap = "g" if self.is_gap_candidate else "-"
is_initial = "i" if self.is_initial_candidate else "-"
is_lang_spec = "l" if self.lang_spec is not None else "-"
is_prologue = "p" if self.hasCommonFunctionStart() else "-"
is_ref = "r" if self.call_ref_sources else "-"
is_symbol = "s" if self.is_symbol else "-"
is_tailcall = "t" if self.is_tailcall else "-"
is_stub = "u" if self.is_stub else "-"
is_aborted = "x" if self.analysis_aborted else "-"
characteristics = is_initial + is_symbol + is_stub + is_aligned + is_lang_spec + is_prologue + is_ref + is_tailcall + is_gap + is_finished + is_aborted
return characteristics
def __str__(self):
characteristics = self.getCharacteristics()
prologue_score = "%d" % self.getFunctionStartScore()
ref_summary = "{}".format(len(self.call_ref_sources)) if len(self.call_ref_sources) != 1 else "{}: 0x{:x}".format(len(self.call_ref_sources), self.call_ref_sources[0])
return "0x{:x}: {} -> {} (total score: {}), inref: {} | {}".format(self.addr, hexlify(self.bytes), prologue_score, self.getScore(), ref_summary, characteristics)
def toJson(self):
return {
"addr": self.addr,
"bytes": self.bytes.hex(),
"alignment": self.alignment,
"reason": self.abortion_reason,
"num_refs": len(self.call_ref_sources),
"characteristics": self.getCharacteristics(),
"prologue_score": self.getFunctionStartScore(),
"score": self.calculateScore(),
"confidence": self.getConfidence()
}
|
"""
lec 8 , functions
"""
def cal_pi(m,n):
#def my_function(a,b=0):
# print('a is ',a)
# print('b is ',b)
# return a + b
#print(my_function(a=1))
#ex1
#def calculate_abs(a):
# if type (a) is str:
# return ('wrong data type')
# if a > 0:
# return a
# if a < 0:
# return -a
#print(calculate_abs('a'))
#ex2
#def function_1(m,n):
#
# result=0
# for i in range (n,m+1):
# result=result+i
# return result
#print(function_1(m=5,n=3))
#ex3
#def function_2(m,n):
# result=1
# for i in range (n,m+1):
# result=result*i
# print(i)
# print(result)
#return result
#print(function_2(m=5,n=3))
#ex4
def cal_f(m):
if m == 0:
return(1)
else:
return m * cal_f(m-1)
print(cal_f(3))
def cal_pr(m,n):
return(cal_f(m)/cal_f(m-n))
print (cal_pr(5,3))
|
#!/usr/bin/env python
from datetime import datetime, timedelta
from typing import List
from .io import load, loadkeogram, download # noqa: F401
def datetimerange(start: datetime, stop: datetime, step: timedelta) -> List[datetime]:
return [start + i * step for i in range((stop - start) // step)]
|
import time
import random
def timeit(func, *args):
t1 = time.time()
ret = func(*args)
cost_time = (time.time() - t1) * 1000
print("cost time: %sms" % cost_time)
try:
randint_wrap = random.randint
except:
# micropython
def randint_wrap(a, b):
return a + random.getrandbits(32) % (b-a)
def main(n):
print("main: n=%d" % n)
for i in range(n):
randint_wrap(1,1000)
def test_range(n):
print("test_range: n=%d" % n)
for i in range(n):
pass
timeit(test_range, 100000)
timeit(main, 100000)
|
"""URL configuration for djoser reset password functionality"""
from django.urls import path
from mitol.authentication.views.djoser_views import CustomDjoserAPIView
urlpatterns = [
path(
"password_reset/",
CustomDjoserAPIView.as_view({"post": "reset_password"}),
name="password-reset-api",
),
path(
"password_reset/confirm/",
CustomDjoserAPIView.as_view({"post": "reset_password_confirm"}),
name="password-reset-confirm-api",
),
path(
"set_password/",
CustomDjoserAPIView.as_view({"post": "set_password"}),
name="set-password-api",
),
]
|
# Copyright (C) 2013 David Rusk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
__author__ = "David Rusk <drusk@uvic.ca>"
import logging
import os
import time
from osstrends import pipeline
def current_dir():
return os.path.dirname(os.path.abspath(__file__))
def main():
logname = "datapipeline.log"
dir = current_dir()
fullpath = os.path.join(dir, logname)
# Keep the old log files.
if os.path.exists(fullpath):
os.rename(fullpath,
os.path.join(dir, "%s.%d" % (logname, int(time.time()))))
logging.basicConfig(filename=logname, level=logging.INFO,
format="%(levelname)s %(asctime)-15s %(message)s")
pipeline.execute()
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 15 02:04:24 2018
@author: Kundan
"""
import matplotlib.pyplot as plt
import csv
x = []
y = []
with open('dataset.txt','r') as csvfile:
plots = csv.reader(csvfile, delimiter=' ')
for row in plots:
x.append(float(row[0]))
y.append(float(row[1]))
plt.scatter(x,y)
#plt.scatter(x, color='b')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
|
"""Create caches of read-prep steps for Virtool analysis workflows."""
# pylint: disable=redefined-outer-name
# pylint: disable=too-many-arguments
import shutil
from pathlib import Path
from typing import Dict, Any, Optional
import virtool_core.caches.db
from virtool_workflow import fixture
from virtool_workflow.analysis import utils
from virtool_workflow.analysis.analysis_info import AnalysisArguments
from virtool_workflow.execute import FunctionExecutor
from virtool_workflow.storage.utils import copy_paths
from virtool_workflow_runtime.db import VirtoolDatabase
from virtool_workflow_runtime.db.fixtures import Collection
TRIMMING_PROGRAM = "skewer-0.2.2"
@fixture
async def cache_document(
trimming_parameters: Dict[str, Any],
sample_id: str,
caches: Collection,
) -> Optional[Dict[str, Any]]:
"""Fetch the cache document for a given sample if it exists."""
cache_document = await caches.find_one({
"hash": virtool_core.caches.db.calculate_cache_hash(trimming_parameters),
"missing": False,
"program": TRIMMING_PROGRAM,
"sample.id": sample_id,
})
if cache_document:
cache_document["id"] = cache_document["_id"]
return cache_document
async def fetch_cache(
cache_document: Dict[str, Any],
cache_path: Path,
reads_path: Path,
run_in_executor: FunctionExecutor
):
"""Copy cached read files to the reads_path."""
cached_read_paths = utils.make_read_paths(
reads_dir_path=cache_path/cache_document["_id"],
paired=cache_document["paired"]
)
await copy_paths(
{path: reads_path/path.name for path in cached_read_paths}.items(),
run_in_executor
)
async def create_cache_document(
database: VirtoolDatabase,
analysis_args: AnalysisArguments,
trimming_parameters: Dict[str, Any]
):
"""
Create a new cache document in the database.
This document will be used to check for the presence of cached prepared reads.
:param database: The Virtool database object
:param analysis_args: The AnalysisArguments fixture
:param trimming_parameters: The trimming parameters (see virtool_workflow.analysis.trimming)
:return:
"""
cache = await virtool_core.caches.db.create(
database,
analysis_args.sample_id,
trimming_parameters,
analysis_args.paired
)
await database["analyses"].update_one({"_id": analysis_args.analysis_id}, {
"$set": {
"cache": {
"id": cache["id"]
}
}
})
return cache
async def create_cache(
fastq: Dict[str, Any],
database: VirtoolDatabase,
analysis_args: AnalysisArguments,
trimming_parameters: Dict[str, Any],
trimming_output_path: Path,
cache_path: Path,
):
"""Create a new cache once the trimming program and fastqc have been run."""
cache = await create_cache_document(database, analysis_args, trimming_parameters)
await database["caches"].update_one({"_id": cache["id"]}, {"$set": {
"quality": fastq
}
})
shutil.copytree(trimming_output_path, cache_path/cache["id"])
|
import os
def get_specs_dir():
return os.path.join(get_data_dir(), 'cs251tk', 'data')
def get_data_dir():
return os.getenv('XDG_DATA_HOME', os.path.join(os.path.expanduser('~'), '.local', 'share'))
|
"""This module contains the general information for ConfigSearchResult ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class ConfigSearchResultConsts():
IS_RENAMEABLE_FALSE = "false"
IS_RENAMEABLE_NO = "no"
IS_RENAMEABLE_TRUE = "true"
IS_RENAMEABLE_YES = "yes"
PARENT_ORG_TYPE_DOMAIN_ORG = "DomainOrg"
PARENT_ORG_TYPE_ORG_ORG = "OrgOrg"
PARENT_ORG_TYPE_UNSPECIFIED = "Unspecified"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
POLICY_OWNER_UNSPECIFIED = "unspecified"
class ConfigSearchResult(ManagedObject):
"""This is ConfigSearchResult class."""
consts = ConfigSearchResultConsts()
naming_props = set([u'convertedDn', u'domainId'])
mo_meta = MoMeta("ConfigSearchResult", "configSearchResult", "policy-[converted_dn]-domain-[domain_id]", VersionMeta.Version112a, "InputOutput", 0x3f, [], ["read-only"], [], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version112a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"converted_dn": MoPropertyMeta("converted_dn", "convertedDn", "string", VersionMeta.Version112a, MoPropertyMeta.NAMING, 0x2, 1, 510, None, [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version112a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version112a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"domain_group": MoPropertyMeta("domain_group", "domainGroup", "string", VersionMeta.Version112a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"domain_id": MoPropertyMeta("domain_id", "domainId", "uint", VersionMeta.Version112a, MoPropertyMeta.NAMING, 0x8, None, None, None, [], []),
"domain_name": MoPropertyMeta("domain_name", "domainName", "string", VersionMeta.Version112a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"is_renameable": MoPropertyMeta("is_renameable", "isRenameable", "string", VersionMeta.Version112a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version112a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"object_type": MoPropertyMeta("object_type", "objectType", "string", VersionMeta.Version112a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"parent_org_type": MoPropertyMeta("parent_org_type", "parentOrgType", "string", VersionMeta.Version112a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["DomainOrg", "OrgOrg", "Unspecified"], []),
"policy_dn": MoPropertyMeta("policy_dn", "policyDn", "string", VersionMeta.Version112a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version112a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["local", "pending-policy", "policy", "unspecified"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version112a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version112a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"convertedDn": "converted_dn",
"descr": "descr",
"dn": "dn",
"domainGroup": "domain_group",
"domainId": "domain_id",
"domainName": "domain_name",
"isRenameable": "is_renameable",
"name": "name",
"objectType": "object_type",
"parentOrgType": "parent_org_type",
"policyDn": "policy_dn",
"policyOwner": "policy_owner",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, converted_dn, domain_id, **kwargs):
self._dirty_mask = 0
self.converted_dn = converted_dn
self.domain_id = domain_id
self.child_action = None
self.descr = None
self.domain_group = None
self.domain_name = None
self.is_renameable = None
self.name = None
self.object_type = None
self.parent_org_type = None
self.policy_dn = None
self.policy_owner = None
self.status = None
ManagedObject.__init__(self, "ConfigSearchResult", parent_mo_or_dn, **kwargs)
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2016, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A model that posts that all columns are independently Gaussian with
unknown parameters.
The parameters are taken from the normal and inverse-gamma conjuate
prior.
This module implements the :class:`bayeslite.IBayesDBMetamodel`
interface for the NIG-Normal model.
"""
import math
import random
import bayeslite.core as core
import bayeslite.metamodel as metamodel
from bayeslite.exception import BQLError
from bayeslite.math_util import logmeanexp
from bayeslite.metamodel import bayesdb_metamodel_version
from bayeslite.sqlite3_util import sqlite3_quote_name
from bayeslite.util import cursor_value
nig_normal_schema_1 = '''
INSERT INTO bayesdb_metamodel (name, version) VALUES ('nig_normal', 1);
CREATE TABLE bayesdb_nig_normal_column (
generator_id INTEGER NOT NULL REFERENCES bayesdb_generator(id),
colno INTEGER NOT NULL,
count INTEGER NOT NULL,
sum REAL NOT NULL,
sumsq REAL NOT NULL,
PRIMARY KEY(generator_id, colno),
FOREIGN KEY(generator_id, colno)
REFERENCES bayesdb_generator_column(generator_id, colno)
);
CREATE TABLE bayesdb_nig_normal_model (
generator_id INTEGER NOT NULL REFERENCES bayesdb_generator(id),
colno INTEGER NOT NULL,
modelno INTEGER NOT NULL,
mu REAL NOT NULL,
sigma REAL NOT NULL,
PRIMARY KEY(generator_id, colno, modelno),
FOREIGN KEY(generator_id, modelno)
REFERENCES bayesdb_generator_model(generator_id, modelno),
FOREIGN KEY(generator_id, colno)
REFERENCES bayesdb_nig_normal_column(generator_id, colno)
);
'''
nig_normal_schema_2 = '''
UPDATE bayesdb_metamodel SET version = 2 WHERE name = 'nig_normal';
CREATE TABLE bayesdb_nig_normal_deviation (
population_id INTEGER NOT NULL REFERENCES bayesdb_population(id),
generator_id INTEGER NOT NULL REFERENCES bayesdb_generator(id),
deviation_colno INTEGER NOT NULL,
observed_colno INTEGER NOT NULL,
PRIMARY KEY(generator_id, deviation_colno),
FOREIGN KEY(generator_id, deviation_colno)
REFERENCES bayesdb_variable(generator_id, colno),
FOREIGN KEY(population_id, observed_colno)
REFERENCES bayesdb_variable(population_id, colno)
);
'''
class NIGNormalMetamodel(metamodel.IBayesDBMetamodel):
"""Normal-Inverse-Gamma-Normal metamodel for BayesDB.
The metamodel is named ``nig_normal`` in BQL::
CREATE GENERATOR t_nig FOR t USING nig_normal(..)
Internally, the NIG Normal metamodel add SQL tables to the
database with names that begin with ``bayesdb_nig_normal_``.
"""
def __init__(self, hypers=(0, 1, 1, 1), seed=0):
self.hypers = hypers
self.prng = random.Random(seed)
def name(self): return 'nig_normal'
def register(self, bdb):
with bdb.savepoint():
version = bayesdb_metamodel_version(bdb, self.name())
if version is None:
bdb.sql_execute(nig_normal_schema_1)
version = 1
if version == 1:
bdb.sql_execute(nig_normal_schema_2)
version = 2
if version != 2:
raise BQLError(bdb, 'NIG-Normal already installed'
' with unknown schema version: %d' % (version,))
def create_generator(self, bdb, generator_id, schema, **kwargs):
# XXX Do something with the schema.
insert_column_sql = '''
INSERT INTO bayesdb_nig_normal_column
(generator_id, colno, count, sum, sumsq)
VALUES (:generator_id, :colno, :count, :sum, :sumsq)
'''
population_id = core.bayesdb_generator_population(bdb, generator_id)
table = core.bayesdb_population_table(bdb, population_id)
for colno in core.bayesdb_variable_numbers(bdb, population_id, None):
column_name = core.bayesdb_variable_name(bdb, population_id, colno)
stattype = core.bayesdb_variable_stattype(
bdb, population_id, colno)
if not stattype == 'numerical':
raise BQLError(bdb, 'NIG-Normal only supports'
' numerical columns, but %s is %s'
% (repr(column_name), repr(stattype)))
(count, xsum, sumsq) = data_suff_stats(bdb, table, column_name)
bdb.sql_execute(insert_column_sql, {
'generator_id': generator_id,
'colno': colno,
'count': count,
'sum': xsum,
'sumsq': sumsq,
})
# XXX Make the schema a little more flexible.
if schema == [[]]:
return
for clause in schema:
if not (len(clause) == 3 and \
isinstance(clause[0], str) and \
clause[1] == 'deviation' and \
isinstance(clause[2], list) and \
len(clause[2]) == 1 and \
isinstance(clause[2][0], str)):
raise BQLError(bdb, 'Invalid nig_normal clause: %r' %
(clause,))
dev_var = clause[0]
obs_var = clause[2][0]
if not core.bayesdb_has_variable(bdb, population_id, None,
obs_var):
raise BQLError(bdb, 'No such variable: %r' % (obs_var,))
obs_colno = core.bayesdb_variable_number(bdb, population_id, None,
obs_var)
dev_colno = core.bayesdb_add_latent(bdb, population_id,
generator_id, dev_var, 'numerical')
bdb.sql_execute('''
INSERT INTO bayesdb_nig_normal_deviation
(population_id, generator_id, deviation_colno,
observed_colno)
VALUES (?, ?, ?, ?)
''', (population_id, generator_id, dev_colno, obs_colno))
def drop_generator(self, bdb, generator_id):
with bdb.savepoint():
self.drop_models(bdb, generator_id)
delete_columns_sql = '''
DELETE FROM bayesdb_nig_normal_column
WHERE generator_id = ?
'''
bdb.sql_execute(delete_columns_sql, (generator_id,))
delete_deviations_sql = '''
DELETE FROM bayesdb_nig_normal_deviation
WHERE generator_id = ?
'''
bdb.sql_execute(delete_deviations_sql, (generator_id,))
def initialize_models(self, bdb, generator_id, modelnos):
insert_sample_sql = '''
INSERT INTO bayesdb_nig_normal_model
(generator_id, colno, modelno, mu, sigma)
VALUES (:generator_id, :colno, :modelno, :mu, :sigma)
'''
self._set_models(bdb, generator_id, modelnos, insert_sample_sql)
def drop_models(self, bdb, generator_id, modelnos=None):
with bdb.savepoint():
if modelnos is None:
delete_models_sql = '''
DELETE FROM bayesdb_nig_normal_model
WHERE generator_id = ?
'''
bdb.sql_execute(delete_models_sql, (generator_id,))
else:
delete_models_sql = '''
DELETE FROM bayesdb_nig_normal_model
WHERE generator_id = ? AND modelno = ?
'''
for modelno in modelnos:
bdb.sql_execute(delete_models_sql, (generator_id, modelno))
def analyze_models(self, bdb, generator_id, modelnos=None, iterations=1,
max_seconds=None, ckpt_iterations=None, ckpt_seconds=None,
program=None):
if program is not None:
# XXX
raise NotImplementedError('nig_normal analysis programs')
# Ignore analysis timing control, because one step reaches the
# posterior anyway.
# NOTE: Does not update the model iteration count. This would
# manifest as failing to count the number of inference
# iterations taken. Since inference converges in one step,
# this consists of failing to track the metadata of whether
# that one step was done or not.
update_sample_sql = '''
UPDATE bayesdb_nig_normal_model SET mu = :mu, sigma = :sigma
WHERE generator_id = :generator_id
AND colno = :colno
AND modelno = :modelno
'''
if modelnos is None:
# This assumes that models x columns forms a dense
# rectangle in the database, which it should.
modelnos = self._modelnos(bdb, generator_id)
self._set_models(bdb, generator_id, modelnos, update_sample_sql)
def _set_models(self, bdb, generator_id, modelnos, sql):
collect_stats_sql = '''
SELECT colno, count, sum, sumsq FROM
bayesdb_nig_normal_column WHERE generator_id = ?
'''
with bdb.savepoint():
cursor = bdb.sql_execute(collect_stats_sql, (generator_id,))
for (colno, count, xsum, sumsq) in cursor:
stats = (count, xsum, sumsq)
for modelno in modelnos:
(mu, sig) = self._gibbs_step_params(self.hypers, stats)
bdb.sql_execute(sql, {
'generator_id': generator_id,
'colno': colno,
'modelno': modelno,
'mu': mu,
'sigma': sig,
})
def _modelnos(self, bdb, generator_id):
modelnos_sql = '''
SELECT DISTINCT modelno FROM bayesdb_nig_normal_model
WHERE generator_id = ?
'''
with bdb.savepoint():
return [modelno for (modelno,) in bdb.sql_execute(modelnos_sql,
(generator_id,))]
def simulate_joint(
self, bdb, generator_id, modelnos, rowid, targets, _constraints,
num_samples=1, accuracy=None):
# Note: The constraints are irrelevant because columns are
# independent in the true distribution (except in the case of
# shared, unknown hyperparameters), and cells in a column are
# independent conditioned on the latent parameters mu and
# sigma. This method does not expose the inter-column
# dependence induced by approximating the true distribution
# with a finite number of full-table models.
with bdb.savepoint():
if modelnos is None:
modelnos = self._modelnos(bdb, generator_id)
modelno = self.prng.choice(modelnos)
(mus, sigmas) = self._model_mus_sigmas(bdb, generator_id, modelno)
return [[self._simulate_1(bdb, generator_id, mus, sigmas, colno)
for colno in targets]
for _ in range(num_samples)]
def _simulate_1(self, bdb, generator_id, mus, sigmas, colno):
if colno < 0:
dev_colno = colno
cursor = bdb.sql_execute('''
SELECT observed_colno FROM bayesdb_nig_normal_deviation
WHERE generator_id = ? AND deviation_colno = ?
''', (generator_id, dev_colno))
obs_colno = cursor_value(cursor)
return self.prng.gauss(0, sigmas[obs_colno])
else:
return self.prng.gauss(mus[colno], sigmas[colno])
def _model_mus_sigmas(self, bdb, generator_id, modelno):
# TODO Filter in the database by the columns I will actually use?
# TODO Cache the results using bdb.cache?
params_sql = '''
SELECT colno, mu, sigma FROM bayesdb_nig_normal_model
WHERE generator_id = ? AND modelno = ?
'''
cursor = bdb.sql_execute(params_sql, (generator_id, modelno))
mus = {}
sigmas = {}
for (colno, mu, sigma) in cursor:
assert colno not in mus
mus[colno] = mu
assert colno not in sigmas
sigmas[colno] = sigma
return (mus, sigmas)
def logpdf_joint(self, bdb, generator_id, modelnos, rowid, targets,
_constraints,):
# Note: The constraints are irrelevant for the same reason as
# in simulate_joint.
(all_mus, all_sigmas) = self._all_mus_sigmas(bdb, generator_id)
def model_log_pdf(modelno):
mus = all_mus[modelno]
sigmas = all_sigmas[modelno]
def logpdf_1((colno, x)):
return self._logpdf_1(bdb, generator_id, mus, sigmas, colno, x)
return sum(map(logpdf_1, targets))
# XXX Ignore modelnos and aggregate over all of them.
modelwise = [model_log_pdf(m) for m in sorted(all_mus.keys())]
return logmeanexp(modelwise)
def _logpdf_1(self, bdb, generator_id, mus, sigmas, colno, x):
if colno < 0:
dev_colno = colno
cursor = bdb.sql_execute('''
SELECT observed_colno FROM bayesdb_nig_normal_deviation
WHERE generator_id = ? AND deviation_colno = ?
''', (generator_id, dev_colno))
obs_colno = cursor_value(cursor)
return logpdf_gaussian(x, 0, sigmas[obs_colno])
else:
return logpdf_gaussian(x, mus[colno], sigmas[colno])
def _all_mus_sigmas(self, bdb, generator_id):
params_sql = '''
SELECT colno, modelno, mu, sigma FROM bayesdb_nig_normal_model
WHERE generator_id = :generator_id
''' # TODO Filter in the database by the columns I will actually use?
with bdb.savepoint():
cursor = bdb.sql_execute(params_sql, (generator_id,))
all_mus = {}
all_sigmas = {}
for (colno, modelno, mu, sigma) in cursor:
if modelno not in all_mus:
all_mus[modelno] = {}
if modelno not in all_sigmas:
all_sigmas[modelno] = {}
assert colno not in all_mus[modelno]
all_mus[modelno][colno] = mu
assert colno not in all_sigmas[modelno]
all_sigmas[modelno][colno] = sigma
return (all_mus, all_sigmas)
def column_dependence_probability(self, bdb, generator_id, modelnos, colno0,
colno1):
# XXX Fix me!
return 0
def column_mutual_information(self, bdb, generator_id, modelnos, colnos0,
colnos1, constraints, numsamples):
# XXX Fix me!
return [0]
def row_similarity(self, bdb, generator_id, modelnos, rowid, target_rowid,
colnos):
# XXX Fix me!
return 0
def predict_confidence(self, bdb, generator_id, modelnos, rowid, colno,
numsamples=None):
if colno < 0:
return (0, 1) # deviation of mode from mean is zero
if modelnos is None:
modelnos = self._modelnos(bdb, generator_id)
modelno = self.prng.choice(modelnos)
mus, _sigmas = self._model_mus_sigmas(bdb, generator_id, modelno)
return (mus[colno], 1.)
def insert(self, bdb, generator_id, item):
(_, colno, value) = item
# Theoretically, I am supposed to detect and report attempted
# repeat observations of already-observed cells, but since
# there is no per-row latent structure, I will just treat all
# row ids as fresh and not keep track of it.
update_sql = '''
UPDATE bayesdb_nig_normal_column
SET count = count + 1, sum = sum + :x, sumsq = sumsq + :xsq
WHERE generator_id = :generator_id
AND colno = :colno
'''
# This is Venture's SuffNormalSPAux.incorporate
with bdb.savepoint():
bdb.sql_execute(update_sql, {
'generator_id': generator_id,
'colno': colno,
'x': value,
'xsq': value * value
})
def remove(self, bdb, generator_id, item):
(_, colno, value) = item
update_sql = '''
UPDATE bayesdb_nig_normal_column
SET count = count - 1, sum = sum - :x, sumsq = sumsq - :xsq
WHERE generator_id = :generator_id
AND colno = :colno
'''
# This is Venture's SuffNormalSPAux.unincorporate
with bdb.savepoint():
bdb.sql_execute(update_sql, {
'generator_id': generator_id,
'colno': colno,
'x': value,
'xsq': value * value
})
def infer(self, *args): return self.analyze_models(*args)
def _gibbs_step_params(self, hypers, stats):
# This is Venture's UNigNormalAAALKernel.simulate packaged differently.
(mn, Vn, an, bn) = posterior_hypers(hypers, stats)
new_var = self._inv_gamma(an, bn)
new_mu = self.prng.gauss(mn, math.sqrt(new_var*Vn))
ans = (new_mu, math.sqrt(new_var))
return ans
def _inv_gamma(self, shape, scale):
return float(scale) / self.prng.gammavariate(shape, 1.0)
HALF_LOG2PI = 0.5 * math.log(2 * math.pi)
def logpdf_gaussian(x, mu, sigma):
deviation = x - mu
ans = - math.log(sigma) - HALF_LOG2PI \
- (0.5 * deviation * deviation / (sigma * sigma))
return ans
def data_suff_stats(bdb, table, column_name):
# This is incorporate/remove in bulk, reading from the database.
qt = sqlite3_quote_name(table)
qcn = sqlite3_quote_name(column_name)
# TODO Do this computation inside the database?
gather_data_sql = '''
SELECT %s FROM %s
''' % (qcn, qt)
cursor = bdb.sql_execute(gather_data_sql)
count = 0
xsum = 0
sumsq = 0
for (item,) in cursor:
count += 1
xsum += item
sumsq += item * item
return (count, xsum, sumsq)
def posterior_hypers(hypers, stats):
# This is Venture's CNigNormalOutputPSP.posteriorHypersNumeric
(m, V, a, b) = hypers
[ctN, xsum, xsumsq] = stats
Vn = 1 / (1.0/V + ctN)
mn = Vn*((1.0/V)*m + xsum)
an = a + ctN / 2.0
bn = b + 0.5*(m**2/float(V) + xsumsq - mn**2/Vn)
ans = (mn, Vn, an, bn)
return ans
|
"""
MacroecoDesktop script for making standalone executable
"""
import sys as _sys
from macroeco import desktop
if len(_sys.argv) > 1:
desktop(_sys.argv[1])
else:
desktop()
|
from django.contrib import admin
from .models import OrderCampProduct
admin.site.register(OrderCampProduct)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.