content
stringlengths 5
1.05M
|
|---|
start = int(input('enter lower limit of the range='))
end = int(input('enter upper limit of the range='))
for i in range(start, end+1):
if i>1:
for j in range(2,i):
if(i % j==0):
break
else:
print(i)
|
# plot a histogram of scores for cse 473 project 1
# author: nicholas ruhland
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import colors
import sys
import os
import seaborn as sns
sns.set()
if len(sys.argv) != 2:
print 'Usage: python plot_scores.py <student code output directory>'
exit()
_, d = sys.argv
if d[-1] != '/':
d += '/'
# Read in scores from student output files
all_txt = sorted(i for i in os.listdir(d) if i.endswith('.txt'))
scores = {}
for txt in all_txt:
with open(d + txt, 'r') as f:
data = f.read()
if 'expanded nodes:' in data:
remainder=data[data.index('expanded nodes:')+len('expanded nodes:'):]
output_val = remainder.split()[0]
if output_val.isdigit():
scores[txt[:-4]] = int(output_val)
best_score = min(scores.values())
best_names = [i for i in scores.iteritems() if i[1] == best_score]
print 'Best score students:', best_names
# Create histogram plot of scores
x = scores.values()
n_bins=20
N, bins, patches = plt.hist(x, bins=n_bins)
fracs = N.astype(float) / N.max()
norm = colors.Normalize(fracs.min(), fracs.max())
for thisfrac, thispatch in zip(fracs, patches):
color = plt.cm.Reds(norm(thisfrac))
thispatch.set_facecolor(color)
plt.title('Project 1 - Question 7 expanded nodes')
plt.show()
|
from flask import Flask, jsonify, request
app = Flask(__name__) # Gives a unique name
stores = [
{
'name': 'MyStore',
'items': [
{
'name': 'My Item',
'price': 15.99
}
]
}
]
"""
@app.route('/') # Route of the endpoint 'http://www.google.com/'
def home():
return "Hello, world!"
"""
# POST /store. data: {name: }
@app.route('/store', methods=['POST'])
def create_store():
request_data = request.get_json()
new_store = {'name': request_data['name'], 'items': []}
stores.append(new_store)
return jsonify(new_store)
# GET /store/<string:name>
@app.route('/store/<string:name>')
def get_store(name):
store = list(filter(lambda store: store['name'] == name, stores))
if store == []:
return jsonify({'message': 'store not found'})
else:
return jsonify(store)
# GET /store
@app.route('/store')
def get_stores():
return jsonify({'stores': stores})
# POST /store/<string:name>/item
@app.route('/store/<string:name>/item', methods=['POST'])
def create_item_in_store(name):
request_data = request.get_json()
store = list(filter(lambda store: store['name'] == name, stores))
new_item = {'name': request_data['name'], 'price': request_data['price']}
if store == []:
return jsonify({'message': 'store not found'})
store[0]['items'].append(new_item)
return jsonify(new_item)
# GET /store/<string:name>/item
@app.route('/store/<string:name>/item')
def get_items_in_store(name):
store = list(filter(lambda store: store['name'] == name, stores))
if store == []:
return jsonify({'message': 'store not found'})
else:
return jsonify({'items': store[0]['items']})
app.run(host= '0.0.0.0', port=5000)
|
import unittest
import test_setup
from core.test_block import TestBlock
from core.test_transaction import TestTransaction
from core.test_blockchain import TestBlockchain
import yadacoin.core.config
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False)
|
import copy
from unittest import TestCase
from theano.compile.pfunc import pfunc
from theano import gradient
from theano import tensor
from theano.tests import unittest_tools
import numpy
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.cuda_available == False:
raise SkipTest('Optional package cuda disabled')
import theano.sandbox.cuda as tcn
from theano.tensor.signal.downsample import (DownsampleFactorMax,
DownsampleFactorMaxGrad, DownsampleFactorMaxGradGrad)
import theano.compile.mode
from theano.tensor.tests.test_blas import BaseGemv, TestBlasStrides, TestGer
from theano.sandbox.cuda.blas import gpu_gemv_no_inplace, gpu_gemv_inplace
from theano.sandbox.cuda.blas import gpu_ger_inplace, gpu_ger_no_inplace
from theano.sandbox.cuda.blas import batched_dot
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode(
'FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
# The CPU tests already compare C/Py, so we only check C/GPU
mode_with_gpu = copy.copy(mode_with_gpu)
mode_without_gpu = copy.copy(mode_without_gpu)
mode_with_gpu.check_py_code = False
mode_without_gpu.check_py_code = False
def my_rand(*shape):
return theano._asarray(numpy.random.rand(*shape), dtype='float32')
class TestBatchedDot(TestCase):
def test_batched_dot_correctness(self):
def cmp(a_shp, b_shp):
a=numpy.random.randn(*a_shp).astype(numpy.float32)
b=numpy.random.randn(*b_shp).astype(numpy.float32)
x=tensor.ftensor3()
y=tensor.ftensor3()
f=theano.function([x,y], batched_dot(x,y), mode=mode_with_gpu)
z0=numpy.asarray(f(a,b))
ga = cuda_ndarray.CudaNdarray(a)
gb = cuda_ndarray.CudaNdarray(b)
z1=numpy.asarray(f(ga,gb))
z_test = numpy.sum(a[:,:,:,None]*b[:,None,:,:],axis=-2)
unittest_tools.assert_allclose(z0, z_test)
unittest_tools.assert_allclose(z1, z_test)
cmp((5,4,3), (5,3,2))
cmp((5,3,3), (5,3,3))
cmp((5,2,6), (5,6,3))
# Test dimensions of 0
cmp((0,2,6), (0,6,3))
cmp((5,0,3), (5,3,2))
cmp((5,4,0), (5,0,2))
cmp((5,4,3), (5,3,0))
cmp((0,0,0), (0,0,0))
# Test dimensions of 1
cmp((1,2,6), (1,6,3))
cmp((5,1,3), (5,3,2))
cmp((5,4,1), (5,1,2))
cmp((5,4,3), (5,3,1))
def test_batched_dot_errors(self):
def fail(a_shp, b_shp):
a=numpy.random.randn(*a_shp).astype(numpy.float32)
b=numpy.random.randn(*b_shp).astype(numpy.float32)
x=tensor.ftensor3()
y=tensor.ftensor3()
f=theano.function([x,y], batched_dot(x,y), mode=mode_with_gpu)
z = f(a,b)
# Different batch size
self.assertRaises(RuntimeError, fail, (5,4,3), (6,3,2))
# Shape mismatch
self.assertRaises(RuntimeError, fail, (5,4,3), (5,2,2))
def test_batched_dot_gradient(self):
unittest_tools.verify_grad(
batched_dot,
[numpy.random.randn(5,7,2).astype(numpy.float32),
numpy.random.randn(5,2,6).astype(numpy.float32)],
mode=mode_with_gpu)
def test_dot22():
def cmp(a_shp, b_shp):
a0 = my_rand(*a_shp)
a = tcn.shared_constructor(a0, 'a')
b = tensor.fmatrix()
f = pfunc([b], [], updates=[(a, tensor.dot(a, b))], mode=mode_with_gpu)
bval = my_rand(*b_shp)
f(bval)
assert numpy.allclose(numpy.dot(a0, bval), a.get_value())
# Try with a matrix equal to a0, but with strides in both dims
a.set_value(a0)
a.set_value(
a.get_value(borrow=True,
return_internal_type=True)[::-1, ::-1],
borrow=True)
f(bval)
cmp((3, 4), (4, 5))
cmp((0, 4), (4, 5))
cmp((3, 4), (4, 0))
cmp((3, 0), (0, 5))
cmp((0, 4), (4, 0))
cmp((0, 0), (0, 0))
def test_dot22scalar():
def cmp(a_shp, b_shp):
a = tensor.fmatrix()
b = tensor.fmatrix()
scalar = tensor.fscalar()
av = my_rand(*a_shp)
bv = my_rand(*b_shp)
f = theano.function(
[a, b],
tensor.dot(a, b) * numpy.asarray(4, 'float32'),
mode=mode_with_gpu)
f2 = theano.function(
[a, b],
tensor.dot(a, b) * numpy.asarray(4, 'float32'))
t = f.maker.fgraph.toposort()
assert any([isinstance(n.op, tcn.blas.GpuDot22Scalar) for n in t])
# assert any([isinstance(n.op, tcn.basic_ops.GpuAllocEmpty)
# for n in t])
assert numpy.allclose(f(av, bv), f2(av, bv))
f = theano.function([a, b, scalar], tensor.dot(a, b) * scalar,
mode=mode_with_gpu)
f2 = theano.function([a, b, scalar], tensor.dot(a, b) * scalar)
t = f.maker.fgraph.toposort()
assert any([isinstance(n.op, tcn.blas.GpuDot22Scalar) for n in t])
# assert any([isinstance(n.op, tcn.basic_ops.GpuAllocEmpty)
# for n in t])
assert numpy.allclose(f(av, bv, 0.5), f2(av, bv, 0.5))
f = theano.function([a, b, scalar],
tensor.blas._dot22scalar(a, b, scalar),
mode=mode_with_gpu)
f2 = theano.function([a, b, scalar], tensor.dot(a, b) * scalar)
t = f.maker.fgraph.toposort()
assert len(t) == 4
assert isinstance(t[0].op, tcn.GpuFromHost)
assert isinstance(t[1].op, tcn.GpuFromHost)
assert isinstance(t[2].op, tcn.blas.GpuDot22Scalar)
assert isinstance(t[3].op, tcn.HostFromGpu)
assert numpy.allclose(f(av, bv, 0.5), f2(av, bv, 0.5))
cmp((3, 4), (4, 5))
cmp((0, 4), (4, 5))
cmp((3, 4), (4, 0))
cmp((3, 0), (0, 5))
cmp((0, 4), (4, 0))
cmp((0, 0), (0, 0))
def test_gemm():
def cmp(a_shp, b_shp):
a0 = my_rand(*a_shp)
a = tcn.shared_constructor(a0, 'a')
b = tensor.fmatrix('b')
c = tensor.fmatrix('c')
f = pfunc([b, c], [], updates=[(a, tensor.dot(a, b) + tensor.exp(c))],
mode=mode_with_gpu)
assert any([node.op == tcn.blas.gpu_gemm_inplace
for node in f.maker.fgraph.toposort()])
bval = my_rand(*b_shp)
cval = my_rand(a_shp[0], b_shp[1])
f(bval, cval)
assert numpy.allclose(numpy.dot(a0, bval) + numpy.exp(cval),
a.get_value())
# Try with a matrix equal to a0, but with strides in both dims
a.set_value(a0)
a.set_value(
a.get_value(borrow=True,
return_internal_type=True)[::-1, ::-1],
borrow=True)
f(bval, cval)
cmp((3, 4), (4, 5))
cmp((0, 4), (4, 5))
cmp((3, 4), (4, 0))
cmp((3, 0), (0, 5))
cmp((0, 4), (4, 0))
cmp((0, 0), (0, 0))
def test_gemm_no_inplace():
def cmp(a_shp, b_shp):
a0 = my_rand(*a_shp)
a = tcn.shared_constructor(a0, 'a')
cval = my_rand(a_shp[0], b_shp[1])
c = tcn.shared_constructor(cval.copy(), 'c')
b = tcn.fmatrix('b')
b2 = tcn.fmatrix('b2')
f = pfunc(
[b, b2],
[tensor.dot(a, b2) + c],
updates=[(a, tensor.dot(a, b) + c)],
mode=mode_with_gpu)
assert any([node.op == tcn.blas.gpu_gemm_no_inplace
for node in f.maker.fgraph.toposort()])
bval = my_rand(*b_shp)
bval2 = my_rand(*b_shp)
rval = f(bval, bval2)
assert numpy.allclose(numpy.dot(a0, bval) + cval, a.get_value())
assert numpy.allclose(numpy.dot(a0, bval2) + cval, rval)
# Try with a matrix equal to a0, but with strides in both dims
a.set_value(a0)
a.set_value(
a.get_value(borrow=True,
return_internal_type=True)[::-1, ::-1],
borrow=True)
f(bval, bval2)
cmp((3, 4), (4, 5))
cmp((0, 4), (4, 5))
cmp((3, 4), (4, 0))
cmp((3, 0), (0, 5))
cmp((0, 4), (4, 0))
cmp((0, 0), (0, 0))
class TestBlasStridesGpu(TestBlasStrides):
dtype = 'float32'
shared = staticmethod(tcn.shared_constructor)
mode = mode_with_gpu
if 0:
# This is commented out because it doesn't make sense...
# tcn.blas has no op called DownsampleFactorMax
# tcn.blas has an op called GpuDownsampleFactorMax, but that op requires arguments that are
# CudaNdarrayType variables... so rethink this test?
def test_maxpool():
"""TODO: test the gpu version!!! """
for d0, d1, r_true, r_false in [(4, 4, [[[[5, 7], [13, 15]]]], [[[[5, 7], [13, 15]]]]),
(5, 5, [[[[6, 8], [ 16, 18], [ 21, 23]]]],
[[[[6, 8, 9], [ 16, 18, 19], [ 21, 23, 24]]]])]:
for border, ret in [(True, r_true), (False, r_false)]:
ret = numpy.array(ret)
a = tcn.blas.DownsampleFactorMax((2, 2), border)
dmatrix4 = tensor.TensorType("float32", (False, False, False, False))
b = dmatrix4()
f = pfunc([b], [a(b)], mode=mode_with_gpu)
bval = numpy.arange(0, d0*d1).reshape(1, 1, d0, d1)
r = f(bval)[0]
# print bval, bval.shape, border
# print r, r.shape
assert (ret == r).all()
def test_downsample():
shps = [(1, 1, 1, 12),
(1, 1, 2, 2),
(1, 1, 1, 1),
(1, 1, 4, 4),
(1, 1, 10, 11),
(1, 2, 2, 2),
(3, 5, 4, 4),
(25, 1, 7, 7),
(1, 1, 12, 12),
(1, 1, 2, 14),
(1, 1, 12, 14),
(1, 1, 14, 14),
(1, 1, 16, 16),
(1, 1, 18, 18),
(1, 1, 24, 24),
(1, 6, 24, 24),
(10, 1, 24, 24),
(10, 6, 24, 24),
(30, 6, 12, 12),
(30, 2, 24, 24),
(30, 6, 24, 24),
(10, 10, 10, 11),
(1, 1, 10, 1025),
(1, 1, 10, 1023),
(1, 1, 1025, 10),
(1, 1, 1023, 10),
(65536, 1, 10, 10),
(1, 65536, 10, 10),
]
numpy.random.RandomState(unittest_tools.fetch_seed()).shuffle(shps)
for shp in shps:
for ds in (2, 2), (3, 2), (1, 1):
if ds[0] > shp[2]:
continue
if ds[1] > shp[3]:
continue
# GpuDownsampleFactorMax doesn't like having more than 512 columns
# in the output tensor.
if float(shp[3]) / ds[1] > 512:
continue
for ignore_border in (True, False):
# print 'test_downsample', shp, ds, ignore_border
ds_op = DownsampleFactorMax(ds, ignore_border=ignore_border)
a = tcn.shared_constructor(my_rand(*shp), 'a')
f = pfunc([], ds_op(tensor.as_tensor_variable(a)),
mode=mode_with_gpu.excluding('cudnn'))
f2 = pfunc([], ds_op(tensor.as_tensor_variable(a)),
mode=mode_without_gpu)
assert any([isinstance(node.op,
tcn.blas.GpuDownsampleFactorMax)
for node in f.maker.fgraph.toposort()])
assert any([isinstance(node.op, DownsampleFactorMax)
for node in f2.maker.fgraph.toposort()])
assert numpy.allclose(f(), f2())
# The grad is too slow on GT220 GPU
# This cause the computer to freeze...
# Remove this when it gets optimized enough
# This only bypass the last 2 checks
# Those tests where passing in all Mode on a GTX470
if shp[0] > 30000 or shp[1] > 30000:
continue
g = pfunc(
[],
tensor.grad(ds_op(tensor.as_tensor_variable(a)).sum(),
a),
mode=mode_with_gpu.excluding('cudnn'))
g2 = pfunc(
[],
tensor.grad(ds_op(tensor.as_tensor_variable(a)).sum(),
a),
mode=mode_without_gpu)
assert any([isinstance(node.op,
tcn.blas.GpuDownsampleFactorMaxGrad)
for node in g.maker.fgraph.toposort()])
assert any([isinstance(node.op, DownsampleFactorMaxGrad)
for node in g2.maker.fgraph.toposort()])
assert numpy.allclose(g(), g2()), shp
ggf = gradient.Lop(tensor.grad((ds_op(
tensor.as_tensor_variable(a))**2).sum(), a), a, a)
ref_mode = copy.copy(mode_without_gpu)
ref_mode.check_py_code = False
gpu_mode = copy.copy(mode_with_gpu)
gpu_mode.check_py_code = False
gg = pfunc([], ggf, mode=gpu_mode)
gg2 = pfunc([], ggf, mode=ref_mode)
assert any([isinstance(node.op,
tcn.blas.GpuDownsampleFactorMaxGradGrad)
for node in gg.maker.fgraph.toposort()])
assert any([isinstance(node.op, DownsampleFactorMaxGradGrad)
for node in gg2.maker.fgraph.toposort()])
assert numpy.allclose(gg(), gg2()), shp
# We already check that the gpu version return
# the same value as the gpu version for
# GpuDownsampleFactorMaxGrad. So no need to call
# verify_grad here.
class TestGpuGemv(TestCase, BaseGemv,
unittest_tools.TestOptimizationMixin):
mode = mode_with_gpu
dtype = 'float32'
gemv = gpu_gemv_no_inplace
gemv_inplace = gpu_gemv_inplace
# Mimic shared constructors registry
@staticmethod
def shared(val):
# If we don't put shared on the GPU, we won't be able to test
# the no inplace version as the added transfer will make them inplace.
try:
return tcn.shared_constructor(val)
except TypeError:
return theano.shared(val)
class TestGpuGemvNoTransfer(TestCase, BaseGemv,
unittest_tools.TestOptimizationMixin):
mode = mode_with_gpu
dtype = 'float32'
# Mimic shared constructors registry
@staticmethod
def shared(val):
try:
return tcn.shared_constructor(val)
except TypeError:
return theano.shared(val)
# In this test, inputs are not always transfered to GPU
gemv = gpu_gemv_no_inplace
gemv_inplace = gpu_gemv_inplace
class TestVectorMatrixDot(TestCase):
# Tolerance factor used in this tests
atol = 1e-6
##########################
def test_dot_vm(self):
''' Test vector dot matrix '''
v = theano.shared(numpy.array(numpy.random.rand(2), dtype='float32'))
m = theano.shared(numpy.array(numpy.random.rand(2, 5),
dtype='float32'))
no_gpu_f = theano.function([], theano.dot(v, m), mode=mode_without_gpu)
gpu_f = theano.function([], theano.dot(v, m), mode=mode_with_gpu)
# gpu_f2 is needed to test the case when the input is not on the gpu
# but the output is moved to the gpu.
gpu_f2 = theano.function([], tcn.gpu_from_host(theano.dot(v, m)),
mode=mode_with_gpu)
# Assert they produce the same output
assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol)
assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol)
# Assert that the gpu version actually uses gpu
assert sum([node.op is gpu_gemv_inplace for node in
gpu_f.maker.fgraph.toposort()]) == 1
assert sum([node.op is gpu_gemv_inplace for node in
gpu_f2.maker.fgraph.toposort()]) == 1
# Check double-strided m
m.set_value(
m.get_value(borrow=True,
return_internal_type=True)[::-1, ::-1],
borrow=True)
assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol)
assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol)
def test_dot_mv(self):
''' Test matrix dot vector '''
v = theano.shared(numpy.array(numpy.random.rand(2), dtype='float32'))
m = theano.shared(numpy.array(numpy.random.rand(5, 2),
dtype='float32'))
no_gpu_f = theano.function([], theano.dot(m, v), mode=mode_without_gpu)
gpu_f = theano.function([], theano.dot(m, v), mode=mode_with_gpu)
# gpu_f2 is needed to test the case when the input is not on the gpu
# but the output is moved to the gpu.
gpu_f2 = theano.function([], tcn.gpu_from_host(theano.dot(m, v)),
mode=mode_with_gpu)
# Assert they produce the same output
assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol)
assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol)
# Assert that the gpu version actually uses gpu
assert sum([node.op is gpu_gemv_inplace for node in
gpu_f.maker.fgraph.toposort()]) == 1
assert sum([node.op is gpu_gemv_inplace for node in
gpu_f2.maker.fgraph.toposort()]) == 1
def test_gemv1(self):
''' test vector1+dot(matrix,vector2) '''
v1 = theano.tensor._shared(numpy.array(numpy.random.rand(2),
dtype='float32'))
v2 = theano.tensor._shared(numpy.array(numpy.random.rand(5),
dtype='float32'))
m = theano.tensor._shared(numpy.array(numpy.random.rand(5, 2),
dtype='float32'))
no_gpu_f = theano.function([], v2 + theano.dot(m, v1),
mode=mode_without_gpu)
gpu_f = theano.function([], v2 + theano.dot(m, v1), mode=mode_with_gpu)
# gpu_f2 is needed to test the case when the input is not on the gpu
# but the output is moved to the gpu.
gpu_f2 = theano.function([], tcn.gpu_from_host(v2 + theano.dot(m, v1)),
mode=mode_with_gpu)
# Assert they produce the same output
assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol)
assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol)
# Assert that the gpu version actually uses gpu
assert sum([node.op is gpu_gemv_inplace for node in
gpu_f2.maker.fgraph.toposort()]) == 1
assert sum([node.op is gpu_gemv_inplace for node in
gpu_f.maker.fgraph.toposort()]) == 1
def test_gemv2(self):
''' test vector1+dot(vector2,matrix) '''
v1 = theano.shared(numpy.array(numpy.random.rand(5), dtype='float32'))
v2 = tensor._shared(numpy.array(numpy.random.rand(2), dtype='float32'))
m = theano.shared(numpy.array(numpy.random.rand(5, 2),
dtype='float32'))
no_gpu_f = theano.function([], v2 + theano.dot(v1, m),
mode=mode_without_gpu)
gpu_f = theano.function([], v2 + theano.dot(v1, m),
mode=mode_with_gpu)
# gpu_f2 is needed to test the case when the input is not on the gpu
# but the output is moved to the gpu.
gpu_f2 = theano.function([], tcn.gpu_from_host(v2 + theano.dot(v1, m)),
mode=mode_with_gpu)
# Assert they produce the same output
assert numpy.allclose(no_gpu_f(), gpu_f(), atol=self.atol)
assert numpy.allclose(no_gpu_f(), gpu_f2(), atol=self.atol)
# Assert that the gpu version actually uses gpu
assert sum([node.op is gpu_gemv_inplace for node in
gpu_f2.maker.fgraph.toposort()]) == 1
assert sum([node.op is gpu_gemv_inplace for node in
gpu_f.maker.fgraph.toposort()]) == 1
class TestGpuGer(TestGer):
def setUp(self):
self.mode = mode_with_gpu
dtype = self.dtype = 'float32' # optimization isn't dtype-dependent
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.a = tensor.tensor(dtype=dtype, broadcastable=())
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.ger = gpu_ger_no_inplace
self.ger_destructive = gpu_ger_inplace
self.gemm = tcn.blas.gpu_gemm_no_inplace
# data on the gpu make the op always inplace
self.ger = gpu_ger_inplace
self.gemm = tcn.blas.gpu_gemm_inplace
class TestGpuGerNoTransfer(TestGer):
@staticmethod
def shared(val):
try:
return tcn.shared_constructor(val)
except TypeError:
return theano.shared(val)
def setUp(self):
self.mode = mode_with_gpu
dtype = self.dtype = 'float32' # optimization isn't dtype-dependent
self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))
self.a = tensor.tensor(dtype=dtype, broadcastable=())
self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))
self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))
# data on the gpu make the op always inplace
self.ger = gpu_ger_inplace
self.ger_destructive = gpu_ger_inplace
self.gemm = tcn.blas.gpu_gemm_inplace
class TestGpuGer_OpContract(TestCase, unittest_tools.T_OpContractMixin):
def setUp(self):
self.ops = [gpu_ger_no_inplace, gpu_ger_inplace]
def clone(self, op):
return tcn.blas.GpuGer(op.inplace)
|
import numpy as np
from time import sleep
from os import SEEK_END
from multiprocessing import Value
from .utils import align_to_page
import ctypes
class MemoryAllocator():
def __init__(self, fname, offset_start, page_size):
self.fname = fname
self.offset = align_to_page(offset_start, page_size)
self.next_page_allocated = Value(ctypes.c_uint64, 0)
self.next_page_written = Value(ctypes.c_uint64, 0)
self.page_size = page_size
self.page_offset = 0
self.my_page = -1
self.page_data = np.zeros(self.page_size, '<u1')
self.allocations = []
self.current_sample_id = None
def __enter__(self):
self.fp = open(self.fname, 'ab', buffering=0)
def set_current_sample(self, current_sample_id):
self.current_sample_id = current_sample_id
@property
def space_left_in_page(self):
# We don't have a page allocated yet
if self.my_page < 0:
return 0
return self.page_size - self.page_offset
def malloc(self, size):
# print(f"Allocating {size} bytes")
if size > self.page_size:
raise ValueError(f"Tried allocating {size} but" +
" page size is {self.page_size}")
if size > self.space_left_in_page:
self.flush_page()
# We book the next available page in the file
with self.next_page_allocated.get_lock():
self.my_page = self.next_page_allocated.value
self.next_page_allocated.value = self.my_page + 1
self.page_offset = 0
# This is a new page so we erate the content of the buffer
self.page_data.fill(0)
# We check if we already allocated space for this sample on
# the page that is now full
region_in_previous_page = False
while self.allocations and self.allocations[-1][0] == self.current_sample_id:
# We have to revert the allocations we did and we are giving
# up on this sample.
self.allocations.pop()
# We found at least memory region from the preivous page
region_in_previous_page = True
# The writer will restart from this freshly allocated page
if region_in_previous_page:
raise MemoryError("Not enough memory to fit the whole sample")
previous_offset = self.page_offset
self.page_offset += size
buffer = self.page_data[previous_offset:self.page_offset]
ptr = self.offset + self.my_page * self.page_size + previous_offset
# We return the pointer to the location in file and where to write
# the data
self.allocations.append((self.current_sample_id, ptr, size))
return ptr, buffer
def flush_page(self):
# If we haven't allocated any page we end there
if self.my_page < 0:
return
# We shouldn't have allocated a page and have nothing to write on it
assert self.page_offset != 0
# Wait until it's my turn to write
while self.next_page_written.value != self.my_page:
# Essentially a spin lock
# TODO we could replace it with like exponential backoff
sleep(0.001)
pass
# Now it's my turn to write
expected_file_offset = self.offset + self.my_page * self.page_size
# in order to be aligned with page size
# If this is the first page we have to pad with zeros
if self.my_page == 0:
# print("Padding headers to align with page size")
current_location = self.fp.seek(0, SEEK_END)
null_bytes_to_write = expected_file_offset - current_location
self.fp.write(np.zeros(null_bytes_to_write, dtype='<u1').tobytes())
# print(f"current file pointer is no {self.fp.tell()} and should be {expected_file_offset}")
self.fp.seek(expected_file_offset)
# print(f"Writing page {self.my_page} at offset {self.fp.tell()}")
self.fp.write(self.page_data.tobytes())
# print(f"Done writing {self.my_page} at offset {self.fp.tell()}")
# We warn other processes that they are free to write the next page
with self.next_page_written.get_lock():
self.next_page_written.value += 1
# Flush the last page and
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush_page()
self.fp.close()
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright (c) 2014 unfoldingWord
# http://creativecommons.org/licenses/MIT/
# See LICENSE file for details.
#
# Contributors:
# Jesse Griffin <jesse@distantshores.org>
#
'''
Writes a JSON catalog of in progress OBS translations based on door43.org.
'''
import os
import sys
import json
import shlex
import codecs
import urllib2
import datetime
from subprocess import *
pages = "/var/www/vhosts/door43.org/httpdocs/data/gitrepo/pages"
lang_names = u'http://td.unfoldingword.org/exports/langnames.json'
obs_cat = u'https://api.unfoldingword.org/obs/txt/1/obs-catalog.json'
obsinprogress = u'/var/www/vhosts/api.unfoldingword.org/httpdocs/obs/txt/1/obs-in-progress.json'
def getURL(url):
try:
request = urllib2.urlopen(url).read()
return request
except:
print ' => ERROR retrieving {0}\nCheck the URL'.format(url)
return
def runCommand(c):
'''
Runs a command in a shell. Returns output and return code of command.
'''
command = shlex.split(c)
com = Popen(command, shell=False, stdout=PIPE, stderr=PIPE)
comout = ''.join(com.communicate()).strip()
return comout, com.returncode
def writeJSON(outfile, p):
f = codecs.open(outfile, 'w', encoding='utf-8')
f.write(getDump(p))
f.close()
def getDump(j):
return json.dumps(j, sort_keys=True)
def main(cat, today, pub_cat):
pub_list = [x['language'] for x in pub_cat]
in_progress = []
out, ret = runCommand('find {0} -maxdepth 2 -type d -name obs'.format(
pages))
for line in out.split('\n'):
lc = line.split('/')[9]
if lc in pub_list: continue
for x in cat:
if lc == x['lc']:
ln = x['ln']
in_progress.append({ 'lc': lc, 'ln': ln })
in_progress.sort(key=lambda x: x['lc'])
in_progress.append({'date_modified': today})
writeJSON(obsinprogress, in_progress)
if __name__ == '__main__':
today = ''.join(str(datetime.date.today()).rsplit('-')[0:3])
cat = json.loads(getURL(lang_names))
pub_cat = json.loads(getURL(obs_cat))
main(cat, today, pub_cat)
|
import os, random
def _ifacePath(ifname):
return "/sys/class/net/%s" % ifname
def _brifPath(brname):
return os.path.join(_ifacePath(brname), "brif")
def ifaceExists(ifname):
return os.path.exists(_ifacePath(ifname))
def ifaceList():
return os.listdir(_ifacePath(""))
def bridgeExists(brname):
return os.path.exists(_brifPath(brname))
def bridgeList():
return filter(bridgeExists, ifaceList())
def bridgeInterfaces(brname):
return os.listdir(_brifPath(brname))
def ifaceBridge(ifname):
brlink = os.path.join(_ifacePath(ifname), "brport/bridge")
if not os.path.exists(brlink):
return None
return os.path.basename(os.readlink(brlink))
def trafficInfo(ifname):
if not ifaceExists(ifname):
return None, None
with open(os.path.join(_ifacePath(ifname), "statistics/tx_bytes")) as fp:
tx = int(fp.readline().strip())
with open(os.path.join(_ifacePath(ifname), "statistics/rx_bytes")) as fp:
rx = int(fp.readline().strip())
return rx, tx
def randomMac():
bytes = [random.randint(0x00, 0xff) for _ in xrange(6)]
bytes[0] = bytes[0] & 0xfc | 0x02 # local and individual
return ':'.join(map(lambda x: "%02x" % x, bytes))
|
from deap import base, creator, gp, tools
from deap import algorithms as algo
import numpy as np
import networkx as nx
from sklearn import preprocessing
from scipy.stats.stats import spearmanr
import ctypes as ctypes
import itertools as itertool
import copy
import pickle
from random import random, randint, sample, choice
import math
from collections import defaultdict
from itertools import chain
from operator import attrgetter
import gc
import pandas as pd
class ruleMaker:
def __makeToolBox(self, graph):
"""sets up GA toolbox from deap"""
weightTup = (-1.0,) # specify weights of the errors
for i in range(len(self.nodeList) - 1):
weightTup += (-1.0,)
# MAKE TYPES
creator.create(
"FitnessMin", base.Fitness, weights=weightTup
) # make a fitness minimization function #the objective function has to be MINIMIZED
creator.create(
"individual", list, fitness=creator.FitnessMin
) # create a class of individuals that are lists of floats
# INITIALIZATION
# register our bitsring generator and how to create an individual, population
toolbox = base.Toolbox() # build baseline toolbox
toolbox.register("genRandomBitString", self.__genBits) # , model=self)
toolbox.register(
"individual",
tools.initIterate,
creator.individual,
toolbox.genRandomBitString,
)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# REGISTER STATISTICS
# create statistics toolbox and give it functions
stats = tools.Statistics(key=lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
# REGISTER CROSSOVER, MUTATION, AND SELECTION FUNCTIONS
# finish registering the toolbox functions
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=self.params.bitFlipProb)
toolbox.register("select", self.__selNSGA2)
toolbox.register("similar", np.array_equal)
# ADD TOOLBOX TO OBJECT
self.toolbox = toolbox
self.stats = stats
def __init__(
self,
graph,
removeSelfEdges=False,
restrictIncomingEdges=True,
maxIncomingEdges=3,
groundTruth=False,
graphName="",
):
"""Initialize a ruleMaker object for rule inference with scBONITA - RD"""
if maxIncomingEdges < 3:
print(
"The maximum number of incoming edges has been set to less than 3. Meaningless results await you."
)
nodeList = list(
graph.nodes
) # define the node list simply as the nodes in the graph.
ruleGraph = nx.empty_graph(0, create_using=nx.DiGraph) # Create an empty graph
# remove self loops from the graph
if removeSelfEdges:
for node in nodeList:
repeat = True
while repeat:
repeat = False
if node in list(graph.successors(node)):
graph.remove_edge(node, node)
repeat = True
self.nodePositions = [
self.geneList.index(node) for node in nodeList if node in self.geneList
] # node positions in geneList
self.nodeList = nodeList
print("Nodelist: " + str(self.nodeList))
self.permList = []
# set up empty lists and dicts for later
self.rvalues = [] # stores the correlations
individualParse = (
[]
) # list of the number of shadow and nodes that contribute to each node, in order by index num
andNodeList = (
[]
) # a list of the shadow nodes that represent and relations between incoming edge
andNodeInvertList = (
[]
) # keeps track of which incoming nodes for each node need to be inverted
andLenList = (
[]
) # keeps track of how many nodes are coming into each shadow AND node
nodeDict = (
{}
) # i dentifies names of nodes with their index in the node list- provide name, get index
possibilityLister = []
possibilityInverter = []
succnum = []
for i in range(0, len(nodeList)):
nodeDict[
nodeList[i]
] = i # constructs the node dict so we can easily look up nodes
counter = int(0) # keeps track of where we are in the generic individual
for i in range(0, len(nodeList)):
predecessors_temp = list(
graph.predecessors(nodeList[i])
) # get NAMES of predecessors of node as documented in the original graph
successors_temp = list(
graph.successors(nodeList[i])
) # get NAMES of successors of node as documented in the original graph
succnum.append(len(successors_temp))
possibilitytemp = [nodeDict[predder] for predder in predecessors_temp]
possibilityLister.append(list(possibilitytemp))
# Find correlation between the predecessors and the node
nodeData = (
self.binMat[self.nodePositions[i], :].todense().tolist()[0]
) # find binarized expression data for node "i"
predCorr_temp = (
[]
) # temporarily store correlations between node "i" and all its predecessors
for k in predecessors_temp:
predIndex = self.geneList.index(
k
) # find index of predecessor in the geneList from the data
predData = (
self.binMat[predIndex, :].todense().tolist()[0]
) # find binarized expression data for predecessor "k"
mi, pvalue = spearmanr(nodeData, predData)
if np.isnan(mi):
predCorr_temp.append(0)
else:
predCorr_temp.append(mi) # store the calculated correlation
predecessors_final = sorted(
zip(predecessors_temp, predCorr_temp),
reverse=True,
key=lambda corrs: corrs[1],
)[
:3
] # find the top predecessors of the node "i"
self.rvalues.append(
sorted(predCorr_temp, reverse=True)[:3]
) # stores the correlations
self.permList.append([pred[0] for pred in predecessors_final])
for parent in predecessors_final:
if "interaction" in list(graph[parent[0]][nodeList[i]].keys()):
ruleGraph.add_edge(
parent[0],
nodeList[i],
weight=parent[1],
activity=graph[parent[0]][nodeList[i]]["interaction"],
)
if "signal" in list(graph[parent[0]][nodeList[i]].keys()):
ruleGraph.add_edge(
parent[0],
nodeList[i],
weight=parent[1],
activity=graph[parent[0]][nodeList[i]]["signal"],
)
# the following section constructs a list of possible node orders
# this is accomplished by finding all possible subsets of the list of predecessor nodes
withNones = zip(
[nodeList.index(corr_tuple[0]) for corr_tuple in predecessors_final],
itertool.repeat("empty"),
)
possibilities = list(itertool.product(*withNones))
for j in range(0, len(possibilities)):
possibilities[j] = list(possibilities[j])
while "empty" in possibilities[j]:
possibilities[j].remove("empty")
while [] in possibilities[j]:
possibilities[j].remove([])
while [] in possibilities:
possibilities.remove([])
# create a list of the activities of each node and store alongside the contributors to each and node for easy reference later
activities = [] # list to store activities of nodes (a vs i)
activity = []
for sequence in possibilities:
activity = []
for node in sequence:
# check the 'interaction' edge attribute
if "interaction" in list(graph[nodeList[node]][nodeList[i]].keys()):
if graph[nodeList[node]][nodeList[i]]["interaction"] == "a":
activity.append(False)
else:
if graph[nodeList[node]][nodeList[i]]["interaction"] == "i":
activity.append(True)
else:
if (
graph[nodeList[node]][nodeList[i]]["interaction"]
== "u"
):
print(
"Unknown interaction type, assigning activation..."
)
activity.append(False)
else:
if (
graph[nodeList[node]][nodeList[i]][
"interaction"
]
== "g"
):
print(
"Group edge/interaction type, assigning activation..."
)
activity.append(False)
else:
print(
"Unknown interaction, assigning activation..."
)
activity.append(False)
# check the 'signal' edge attribute
if "signal" in list(graph[nodeList[node]][nodeList[i]].keys()):
if graph[nodeList[node]][nodeList[i]]["signal"] == "a":
activity.append(False)
else:
if graph[nodeList[node]][nodeList[i]]["signal"] == "i":
activity.append(True)
else:
if graph[nodeList[node]][nodeList[i]]["signal"] == "u":
print(
"Unknown interaction type, assigning activation..."
)
activity.append(False)
else:
if (
graph[nodeList[node]][nodeList[i]]["signal"]
== "g"
):
print(
"Group edge/interaction type, assigning activation..."
)
activity.append(False)
else:
print(
"Unknown interaction, assigning activation..."
)
activity.append(False)
# If neither edge attribute is present, assign activation
if not "interaction" in list(
graph[nodeList[node]][nodeList[i]].keys()
) and not "signal" in list(
graph[nodeList[node]][nodeList[i]].keys()
):
print("Group edge/interaction type, assigning activation...")
activity.append(False)
activities.append(activity)
andNodeList.append(possibilities)
andNodeInvertList.append(activities)
andLenList.append(len(possibilities))
possibilityInverter.append(list(activity))
# construct the list of lengths of possibilties for each node, add to the counter that keeps track of how many bits are necessary
individualParse.append(counter)
counter = counter + len(possibilities)
self.size = counter
individualParse.append(counter)
self.individualParse = (
individualParse # index of start value of current node on the individual
)
self.andNodeList = andNodeList # shadow and node inputs
self.andNodeInvertList = andNodeInvertList # keeps track of which incoming nodes for each node need to be inverted
self.andLenList = (
andLenList # keeps track of length of above inputOrderList for each node
)
self.possibilityList = possibilityLister
self.possibilityInverter = possibilityInverter
self.nodeNum = len(nodeList)
self.params = self.Params()
self.params._Params__simParams()
self.__makeToolBox(graph)
self.ruleGraph = ruleGraph
self.nodeDict = nodeDict # identifies names of nodes with their index in the node list.. provide name, get index
self.successorNums = succnum
# nx.write_graphml(ruleGraph, graphName+"_ruleGraph.graphml")
print("\nIndividual parse: " + str(self.individualParse))
print("\nNodelist: " + str(self.nodeList))
print("\nNode positions: " + str(self.nodePositions))
print("\nPossibilityList: " + str(self.possibilityList))
def __update_upstream(self, node, newUpstreams):
withNones = zip(newUpstreams, itertool.repeat("empty"))
possibilities = list(itertool.product(*withNones))
for j in range(0, len(possibilities)):
possibilities[j] = list(possibilities[j])
while "empty" in possibilities[j]:
possibilities[j].remove("empty")
while [] in possibilities[j]:
possibilities[j].remove([])
while [] in possibilities:
possibilities.remove([])
# create a list of the activities of each node and store alongside the contributors to each and node for easy reference later
activities = [] # list to store activities of nodes (a vs i)
for sequence in possibilities:
activity = []
for node1 in sequence:
if (
self.possibilityInverter[self.possibilityList[node].index(node1)]
== "a"
):
activity.append(False)
else:
activity.append(True)
activities.append(activity)
self.andNodeList[node] = possibilities
self.andNodeInvertList[node] = activities
def __updateCpointers(self):
"""set up C pointers with correct lengths to pass to simulation software in C"""
tempandnoder = []
tempandinverter = []
for currentNode in range(len(self.nodeList)):
tempAndNodes = []
tempandNodeInvertList = []
if currentNode < len(self.nodeList):
tempAndNodes = [
xi + [-1] * (3 - len(xi)) for xi in self.andNodeList[currentNode]
]
tempandNodeInvertList = [
xi + [-1] * (3 - len(xi))
for xi in self.andNodeInvertList[currentNode]
]
while len(tempAndNodes) < 7:
tempAndNodes.append([0, 0, 0])
tempandNodeInvertList.append([0, 0, 0])
tempandnoder.append(tempAndNodes)
tempandinverter.append(tempandNodeInvertList)
# self.andNodeInvert = np.array(tempandinverter, dtype=np.intc, order="C")
self.andNodeInvert = np.array(tempandinverter, dtype=object, order="C")
# self.andNodes = np.array(tempandnoder, dtype=np.intc, order="C")
self.andNodes = np.array(tempandnoder, dtype=object, order="C")
def __genRandBits(self):
"""generates a random bitstring"""
arr = np.random.randint(2, size=(int(self.size),))
return list(arr)
def __findEnd(self, node):
if node == len(self.nodeList) - 1:
end = self.size
else:
end = self.individualParse[node + 1]
return end
def __cxTwoPointNode(self, ind1, ind2):
"""Executes a two-point crossover on the input :term:`sequence`
individuals. The two individuals are modified in place and both keep
their original length.
:returns: A tuple of two individuals.
This function uses the :func:`~random.randint` function from the Python
base :mod:`random` module.
Modified from deap to cross over between rules = needed to account for bistring only being one of two components of individual
"""
size = len(ind1[0].nodeList)
cxpointer1 = randint(1, size)
cxpointer2 = randint(1, size - 1)
# make sure pointers are in right order
if cxpointer2 >= cxpointer1:
cxpointer2 += 1
else: # Swap the two cx points
cxpointer1, cxpointer2 = cxpointer2, cxpointer1
cxpoint1 = ind1[0].individualParse[cxpointer1]
cxpoint2 = ind1[0].individualParse[cxpointer2]
# cross over both bitlists and the andNodeLists (as well as andNodeInvertLists)
ind1[1][cxpoint1:cxpoint2], ind2[1][cxpoint1:cxpoint2] = (
ind2[1][cxpoint1:cxpoint2],
ind1[1][cxpoint1:cxpoint2],
)
(
ind1[0].andNodeList[cxpointer1:cxpointer2],
ind2[0].andNodeList[cxpointer1:cxpointer2],
) = (
ind2[0].andNodeList[cxpointer1:cxpointer2],
ind1[0].andNodeList[cxpointer1:cxpointer2],
)
(
ind1[0].andNodeInvertList[cxpointer1:cxpointer2],
ind2[0].andNodeInvertList[cxpointer1:cxpointer2],
) = (
ind2[0].andNodeInvertList[cxpointer1:cxpointer2],
ind1[0].andNodeInvertList[cxpointer1:cxpointer2],
)
# update the arrays seen by C code updateBool
ind1[0]._ruleMaker__updateCpointers()
ind2[0]._ruleMaker__updateCpointers()
return ind1, ind2
def __findPopBest(self, population):
"""finds the lowest error individual in a population"""
saveVal = -1
minny = float("Inf")
for i in range(len(population)):
if np.sum(population[i].fitness.values) < minny:
minny = np.sum(population[i].fitness.values)
saveVal = i
ultimate = population[saveVal]
minvals = population[saveVal].fitness.values
return minvals, ultimate[1], ultimate[0]
def __NP(self, individual, model, cells, params, KOs, KIs, scSyncBoolC):
"""NP simulation code for synchronous simulation"""
cellArray = []
# set up knockin and knockout lists
knockins = np.zeros(len(model.nodeList), dtype=np.intc, order="C")
knockouts = np.zeros(len(model.nodeList), dtype=np.intc, order="C")
for knocker in KOs:
knockouts[knocker] = 1
for knocker in KIs:
knockins[knocker] = 1
# put objects in correct format for passing to C
nodeIndividual = np.array(individual, dtype=np.intc, order="C")
indLen = len(nodeIndividual)
andNodes = np.array(model.andNodes, dtype=np.intc, order="C")
nodeNum = len(model.nodeList)
andNodeInvert = np.array(model.andNodeInvert, dtype=np.intc, order="C")
individualParse = np.array(model.individualParse, dtype=np.intc, order="C")
andLenList = np.array(model.andLenList, dtype=np.intc, order="C")
nodePositions1 = model.nodePositions
nodePositionsC = np.array(nodePositions1, dtype=np.intc, order="C")
simSteps = self.params.simSteps
lenSamples1 = len(model.sampleList)
binMatC1 = self.binMat.toarray(order="C")
binMatC3 = np.transpose(
np.array(copy.deepcopy(binMatC1), order="C", dtype=np.intc)
)
binMatCPointer = ctypes.c_void_p(
binMatC3.ctypes.data
) # put input array as C pointer
# convert objects into C pointers
nodeIndividual1 = ctypes.c_void_p(nodeIndividual.ctypes.data)
indLen1 = ctypes.c_void_p(indLen)
andNodes1 = ctypes.c_void_p(andNodes.ctypes.data)
individualParse1 = ctypes.c_void_p(individualParse.ctypes.data)
andLenList1 = ctypes.c_void_p(andLenList.ctypes.data)
andNodeInvertList1 = ctypes.c_void_p(andNodeInvert.ctypes.data)
nodeNum1 = ctypes.c_void_p(nodeNum)
simSteps1 = ctypes.c_void_p(simSteps)
knockouts1 = ctypes.c_void_p(knockouts.ctypes.data)
knockins1 = ctypes.c_void_p(knockins.ctypes.data)
nodePositionsCPointer = ctypes.c_void_p(nodePositionsC.ctypes.data)
vals = np.full(
shape=(self.maxNodes, self.params.simSteps, self.maxSamples),
fill_value=2,
dtype=np.intc,
order="C",
) # initiate output array - sim data is nodes * sim steps * cells. Max sim steps hard coded to 200
valsubmit = ctypes.c_void_p(vals.ctypes.data) # put output array into C pointer
lenSamples = ctypes.c_void_p(lenSamples1)
scSyncBoolC(
valsubmit,
nodeIndividual1,
indLen1,
nodeNum1,
andLenList1,
individualParse1,
andNodes1,
andNodeInvertList1,
simSteps1,
knockouts1,
knockins1,
lenSamples,
binMatCPointer,
nodePositionsCPointer,
)
return vals
def __varOrAdaptive(
self, population, toolbox, lambda_, cxpb, mutpb, genfrac, mutModel
):
"""generates list of offspring to be compared... decides to do crossover or mutation"""
# def varOrAdaptive(population, toolbox, model, lambda_, cxpb, mutpb, genfrac, mutModel):
# algorithm for generating a list of offspring... copied and pasted from DEAP with modification for adaptive mutation
assert (cxpb + mutpb) <= 1.0, (
"The sum of the crossover and mutation "
"probabilities must be smaller or equal to 1.0."
)
offspring = []
for _ in range(lambda_):
op_choice = random()
if op_choice < cxpb: # Apply crossover
inds = []
for samp in sample(population, 2):
ind = toolbox.clone(samp)
inds.append(ind)
ind1, ind2 = inds
ind1, ind2 = self.__cxTwoPointNode(ind1, ind2)
del ind1.fitness.values
offspring.append(ind1)
elif op_choice < cxpb + mutpb: # Apply mutation
ind = toolbox.clone(choice(population))
(ind,) = self.__mutFlipBitAdapt(ind, genfrac, mutModel)
del ind.fitness.values
offspring.append(ind)
else: # shouldn't happen... clone existing individual
offspring.append(choice(population))
return offspring
def __selectMutNode(self, errors):
"""select node to mutate"""
normerrors = [
1.0 * error / np.sum(errors) for error in errors
] # normalize errors to get a probability that the node is modified
probs = np.cumsum(normerrors)
randy = random() # randomly select a node to mutate
return next(i for i in range(len(probs)) if probs[i] > randy)
def __mutFlipBitAdapt(self, indyIn, genfrac, mutModel):
"""mutation algorithm"""
errors = list(indyIn.fitness.values) # get errors
individual = indyIn[1]
model = indyIn[0]
# get rid of errors in nodes that can't be changed
errorNodes = 0
for j in range(len(errors)):
if model.andLenList[j] < 2:
errors[j] = 0
else:
errorNodes = errorNodes + 1
if np.sum(errors) < 0.05 * errorNodes or errorNodes == 0:
# condition selection on number of incoming edges + downstream edges
pseudoerrors = [
len(model.possibilityList[i])
if model.successorNums[i] == 0
else len(model.possibilityList[i]) * model.successorNums[i]
for i in range(len(model.nodeList))
]
# zero out nodes that can't be changed
for j in range(len(pseudoerrors)):
if model.andLenList[j] < 2:
pseudoerrors[j] = 0
focusNode = self.__selectMutNode(pseudoerrors)
else:
# if errors are relatively high, focus on nodes that fit the worst and have highest in-degree
# calculate probabilities for mutating each node
for i in range(len(errors)):
temper = model.successorNums[i]
if temper == 0:
errors[i] = errors[i] * len(model.possibilityList[i])
else:
errors[i] = errors[i] * len(model.possibilityList[i]) * temper
focusNode = self.__selectMutNode(errors)
# perform mutation
if model.andLenList[focusNode] > 1:
# find ends of the node of interest in the individual
start = model.individualParse[focusNode]
end = model._ruleMaker__findEnd(focusNode)
# mutate the inputs some of the time
if len(model.possibilityList[focusNode]) > 3 and random() < mutModel:
temppermup = [] # temporary upstream nodes
upstreamAdders = list(model.possibilityList[focusNode])
rvals = list(model.rvalues[focusNode])
while len(temppermup) < 3:
randy = random() # randomly select a node to mutate
tempsum = sum(rvals)
if tempsum == 0:
addNoder = randint(
0, len(rvals) - 1
) # int(math.floor(random()*len(upstreamAdders)))
# print(addNoder)
else:
recalc = np.cumsum([1.0 * rval / tempsum for rval in rvals])
# print(recalc)
addNoder = next(
i for i in range(len(recalc)) if recalc[i] > randy
)
# print(addNoder)
temppermup.append(upstreamAdders.pop(addNoder))
# print(rvals)
rvals.pop(addNoder)
model._ruleMaker__update_upstream(focusNode, temppermup)
model._ruleMaker__updateCpointers()
for i in range(start, end):
# print("i: " + str(i))
if random() < 2 / (end - start + 1):
individual[i] = 1
else:
individual[i] = 0
# ensure that there is at least one shadow and node turned on
if np.sum(individual[start:end]) == 0:
individual[start] = 1
indyIn[0] = model
indyIn[1] = individual
else:
print("did not actually check")
return (indyIn,)
def __genBits(self):
# generate random bitlist
startInd = list(self.__genRandBits())
counter = 0
# make sure bitlist isn't zero
while np.sum(startInd) == 0 and counter < float("Inf"):
startInd = list(self.__genRandBits())
counter += 1
# go through nodes and make sure that there are 1-5 ones in the random list
for node in range(0, len(self.nodeList)):
end = self.__findEnd(node)
start = self.individualParse[node]
if (end - start) > 1:
counter = 0
while np.sum(startInd[start:end]) > 5 and counter < float("Inf"):
chosen = math.floor(random() * (end - start))
startInd[start + int(chosen)] = 0
counter += 1
if np.sum(startInd[start:end]) == 0:
chosen = math.floor(random() * (end - start))
startInd[start + int(chosen)] = 1
elif (end - start) == 1:
startInd[start] = 1
return [copy.deepcopy(self), startInd]
def __sortNondominatedAdapt(self, individuals, k, first_front_only=False):
"""
Taken from deap and modified slightly to make pareto sorting less strict
Sort the first *k* *individuals* into different nondomination levels
using the "Fast Nondominated Sorting Approach" proposed by Deb et al.,
see [Deb2002]_. This algorithm has a time complexity of :math:`O(MN^2)`,
where :math:`M` is the number of objectives and :math:`N` the number of
individuals.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:param first_front_only: If :obj:`True` sort only the first front and
exit.
:returns: A list of Pareto fronts (lists), the first list includes
nondominated individuals.
.. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
non-dominated sorting genetic algorithm for multi-objective
optimization: NSGA-II", 2002.
"""
if k == 0:
return []
map_fit_ind = defaultdict(list)
for ind in individuals:
map_fit_ind[ind.fitness].append(ind)
fits = list(map_fit_ind)
current_front = []
next_front = []
dominating_fits = defaultdict(int)
dominated_fits = defaultdict(list)
# Rank first Pareto front
for i, fit_i in enumerate(fits):
for fit_j in fits[i + 1 :]:
if self.__dominated(fit_i, fit_j):
dominating_fits[fit_j] += 1
dominated_fits[fit_i].append(fit_j)
elif self.__dominated(fit_j, fit_i):
dominating_fits[fit_i] += 1
dominated_fits[fit_j].append(fit_i)
if dominating_fits[fit_i] == 0:
current_front.append(fit_i)
fronts = [[]]
for fit in current_front:
fronts[-1].extend(map_fit_ind[fit])
pareto_sorted = len(fronts[-1])
# Rank the next front until all individuals are sorted or
# the given number of individual are sorted.
if not first_front_only:
N = min(len(individuals), k)
while pareto_sorted < N:
fronts.append([])
for fit_p in current_front:
for fit_d in dominated_fits[fit_p]:
dominating_fits[fit_d] -= 1
if dominating_fits[fit_d] == 0:
next_front.append(fit_d)
pareto_sorted += len(map_fit_ind[fit_d])
fronts[-1].extend(map_fit_ind[fit_d])
current_front = next_front
next_front = []
return fronts
def __dominated(self, ind1, ind2):
"""TTaken from deap and modified slightly to make pareto sorting less strict.
Return true if each objective of *self* is not strictly worse than
the corresponding objective of *other* and at least one objective is
strictly better.
:param obj: Slice indicating on which objectives the domination is
tested. The default value is `slice(None)`, representing
every objectives.
"""
not_equal = False
mean1 = np.mean(ind1.wvalues)
mean2 = np.mean(ind2.wvalues)
std1 = np.std(ind1.wvalues)
if mean1 > mean2:
not_equal = True
elif mean1 < mean2:
return False
return not_equal
def __assignCrowdingDist(self, individuals):
"""taken from deap. Assign a crowding distance to each individual's fitness. The
crowding distance can be retrieve via the :attr:`crowding_dist`
attribute of each individual's fitness.
"""
if len(individuals) == 0:
return
distances = [0.0] * len(individuals)
crowd = [(ind.fitness.values, i) for i, ind in enumerate(individuals)]
nobj = len(individuals[0].fitness.values)
for i in range(nobj):
crowd.sort(key=lambda element: element[0][i])
distances[crowd[0][1]] = float("inf")
distances[crowd[-1][1]] = float("inf")
if crowd[-1][0][i] == crowd[0][0][i]:
continue
norm = nobj * float(crowd[-1][0][i] - crowd[0][0][i])
for prev, cur, next in zip(crowd[:-2], crowd[1:-1], crowd[2:]):
distances[cur[1]] += 1.0 * (next[0][i] - prev[0][i]) / norm
for i, dist in enumerate(distances):
individuals[i].fitness.crowding_dist = dist
def __selNSGA2(self, individuals, k):
"""Calculate fitness for an individual. NSGA2 selection taken from deap
Apply NSGA-II selection operator on the *individuals*. Usually, the
size of *individuals* will be larger than *k* because any individual
present in *individuals* will appear in the returned list at most once.
Having the size of *individuals* equals to *k* will have no effect other
than sorting the population according to their front rank. The
list returned contains references to the input *individuals*. For more
details on the NSGA-II operator see [Deb2002]_.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:returns: A list of selected individuals.
.. [Deb2002] Deb, Pratab, Agarwal, and Meyarivan, "A fast elitist
non-dominated sorting genetic algorithm for multi-objective
optimization: NSGA-II", 2002.
"""
pareto_fronts = self.__sortNondominatedAdapt(individuals, k)
for front in pareto_fronts:
self.__assignCrowdingDist(front)
chosen = list(chain(*pareto_fronts[:-1]))
k = k - len(chosen)
if k > 0:
sorted_front = sorted(
pareto_fronts[-1], key=attrgetter("fitness.crowding_dist"), reverse=True
)
chosen.extend(sorted_front[:k])
return chosen
def __bitList(self, n, x):
templist = [1 if digit == "1" else 0 for digit in bin(n)[::-1]]
while len(templist) < x:
templist.append(0)
while (len(templist)) > x:
templist.pop()
return templist
def writeModel(self, individual, model):
"""iterate over nodes to generate a BooleanNet representation for the entire model"""
addString = ""
for i in range(0, len(model.nodePositions)):
addString = addString + model._ruleMaker__writeNode(
i,
individual[model.individualParse[i] : model.individualParse[i + 1]],
model,
)
addString = addString + "\n"
return addString[:-1]
def __findInEdges(self, model, node):
"""find the incoming edges to each 'and' connection for a given node"""
inEdges = []
for lister in model.andNodeList[node]:
tempTup = tuple(lister)
inEdges.append(set(tempTup))
return inEdges
def __simplifyRule(self, rule, inEdges):
"""find the simplest form of a rule"""
for i in range(len(rule)):
if rule[i] == 1:
for j in range(len(rule)):
if rule[j] == 1 and not i == j:
if inEdges[i].issubset(inEdges[j]):
rule[j] = 0
return rule
def __writeNode(self, currentNode, nodeIndividual, model):
"""write out evaluation instructions in BooleanNet format. This follows the exact same code as updateNode (for switch=0), but writes a string instead of actually updating the values of the nodes"""
andNodes = model.andNodeList[
currentNode
] # find the list of shadow and nodes we must compute before computing value of current nodes
andNodeInvertList = model.andNodeInvertList[
currentNode
] # find list of lists of whether input nodes need to be inverted (corresponds to inputOrder)
writenode = (
"" + model.nodeList[currentNode] + " *= "
) # set up the initial string to use to write node
inEdges = self.__findInEdges(model, currentNode)
nodeIndividual = self.__simplifyRule(nodeIndividual, inEdges)
if model.andLenList[currentNode] == 0 or sum(nodeIndividual) == 0:
# print(writenode + ' ' + model.nodeList[currentNode])
return (
writenode + " " + model.nodeList[currentNode]
) # if no inputs, maintain value
elif len(andNodes) == 1:
# if only one input, then can either affect or not affect the node. so either keep the value or update to the single input's value
value = ""
# if only one input, then set to that number
if andNodeInvertList[0][0] == 0:
value = value + model.nodeList[andNodes[0][0]]
else:
value = value + "not " + model.nodeList[andNodes[0][0]]
print(writenode + value)
return writenode + value
else:
# update nodes with more than one input
# first deal with case of simple logic without need of linear regression
orset = []
# go through list of possible shadow and nodes to see which ones actually contribute
for andindex in range(len(nodeIndividual)):
newval = "("
if nodeIndividual[andindex] == 1:
# if a shadow and contributes, compute its value using its upstream nodes
if andNodeInvertList[andindex][0]:
newval = newval + "not "
newval = newval + self.nodeList[andNodes[andindex][0]]
for addnode in range(1, len(andNodes[andindex])):
newval = newval + " and "
if andNodeInvertList[andindex][addnode]:
newval = newval + " not "
newval = newval + self.nodeList[andNodes[andindex][addnode]]
orset.append(newval + ")")
# combine the shadow and nodes with or operations
writenode = writenode + orset.pop()
for val in orset:
writenode = writenode + " or " + val
# print(writenode)
return writenode
def __writeNode_BoolNet(self, currentNode, nodeIndividual, model):
"""write out evaluation instructions in BoolNet format.
This follows the exact same code as updateNode (for switch=0), but writes a string instead of actually updating the values of the nodes"""
andNodes = model.andNodeList[
currentNode
] # find the list of shadow and nodes we must compute before computing value of current nodes
andNodeInvertList = model.andNodeInvertList[
currentNode
] # find list of lists of whether input nodes need to be inverted (corresponds to inputOrder)
writenode = (
"" + model.nodeList[currentNode] + " , "
) # set up the initial string to use to write node
inEdges = self.__findInEdges(model, currentNode)
nodeIndividual = self.__simplifyRule(nodeIndividual, inEdges)
if model.andLenList[currentNode] == 0 or sum(nodeIndividual) == 0:
return (
writenode + " " + model.nodeList[currentNode]
) # if no inputs, maintain value
elif len(andNodes) == 1:
# if only one input, then can either affect or not affect the node. so either keep the value or update to the single input's value
value = ""
# if only one input, then set to that number
if andNodeInvertList[0][0] == 0:
value = value + model.nodeList[andNodes[0][0]]
else:
value = value + "!" + model.nodeList[andNodes[0][0]]
print(writenode + value)
return writenode + value
else:
# update nodes with more than one input
# first deal with case of simple logic without need of linear regression
orset = []
# go through list of possible shadow and nodes to see which ones actually contribute
for andindex in range(len(nodeIndividual)):
newval = ""
if nodeIndividual[andindex] == 1:
# if a shadow and contributes, compute its value using its upstream nodes
if andNodeInvertList[andindex][0]:
newval = newval + "!"
newval = newval + self.nodeList[andNodes[andindex][0]]
for addnode in range(1, len(andNodes[andindex])):
newval = newval + " & "
if andNodeInvertList[andindex][addnode]:
newval = newval + " !"
newval = newval + self.nodeList[andNodes[andindex][addnode]]
orset.append(newval)
# combine the shadow and nodes with or operations
writenode = writenode + orset.pop()
for val in orset:
writenode = writenode + " | " + val
# print(writenode)
return writenode
def writeModel_BoolNet(self, individual, model):
"""iterate over nodes to generate a BooleanNet representation for the entire model"""
addString = ""
for i in range(0, len(model.nodePositions)):
addString = addString + model.writeNode_BoolNet(
i,
individual[model.individualParse[i] : model.individualParse[i + 1]],
model,
)
addString = addString + "\n"
return addString[:-1]
def __eaMuPlusLambdaAdaptive(self, scSyncBoolC, graph, verbose=True):
params = self.params
toolbox = self.toolbox
mutModel = self.params.mutModel
logbook = tools.Logbook()
mu = self.params.mu
lambda_ = self.params.lambd
stats = self.stats
cxpb = self.params.crossoverProb
mutpb = self.params.mutationProb
ngen = self.params.generations
sampleList = self.sampleList
KOlist = self.knockoutLists
KIlist = self.knockinLists
population = self.toolbox.population(n=self.params.popSize)
logbook.header = ["gen", "nevals"] + (self.stats.fields if self.stats else [])
lastcheck = []
modellist = []
fitnesslist = []
popList = []
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
# print("Invalid individuals: " + str(invalid_ind))
updateBooler = ctypes.cdll.LoadLibrary("./simulator.so")
scSyncBoolC = updateBooler.scSyncBool
fitnesses = [
indy[0]._ruleMaker__evaluateByNode(indy[1], KOlist, KIlist, scSyncBoolC)
for indy in invalid_ind
]
print("Fitnesses: " + str(fitnesses))
print(len(fitnesses))
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
fitnesslist.append([list(ind.fitness.values) for ind in population])
popList.append([list(inder[1]) for inder in population])
modellist.append(
[
[
(modeler[0].size),
list(modeler[0].nodeList),
list(modeler[0].individualParse),
list(modeler[0].andNodeList),
list(modeler[0].andNodeInvertList),
list(modeler[0].andLenList),
list(modeler[0].nodeList),
dict(modeler[0].nodeDict),
]
for modeler in population
]
)
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
breaker = False
for ind in population:
if np.sum(ind.fitness.values) < 0.01 * len(ind.fitness.values):
breaker = True
if breaker:
return population, logbook
# Begin the generational process
for gen in range(1, ngen + 1):
offspring = self.__varOrAdaptive(
population, toolbox, lambda_, cxpb, mutpb, (1.0 * gen / ngen), mutModel
)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = [
indy[0]._ruleMaker__evaluateByNode(indy[1], KOlist, KIlist, scSyncBoolC)
for indy in invalid_ind
]
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Select the next generation population
population[:] = toolbox.select(population + offspring, mu)
fitnesslist.append([list(ind.fitness.values) for ind in population])
popList.append([list(inder[1]) for inder in population])
modellist.append(
[
[
(modeler[0].size),
list(modeler[0].nodeList),
list(modeler[0].individualParse),
list(modeler[0].andNodeList),
list(modeler[0].andNodeInvertList),
list(modeler[0].andLenList),
list(modeler[0].nodeList),
dict(modeler[0].nodeDict),
]
for modeler in population
]
)
# Update the statistics with the new population
record = stats.compile(population) if stats is not None else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
breaker = False
for ind in population:
if np.sum(ind.fitness.values) < 0.01 * len(ind.fitness.values):
breaker = True
saveInd = ind
if breaker:
errorTemp = saveInd.fitness.values
for value in errorTemp:
if value > 0.1:
breaker = False
if breaker:
outputList = [fitnesslist, popList, modellist]
return population, logbook
return population, logbook
def __evaluateByNode(
self,
individual,
KOlist,
KIlist,
cFunction,
localSearch=False,
importanceScores=False,
):
"""Includes Network Propagation"""
model = self
cellArray = []
knockins = np.zeros(len(model.nodeList), dtype=np.intc, order="C")
knockouts = np.zeros(len(model.nodeList), dtype=np.intc, order="C")
for knocker in KOlist:
knockouts[knocker] = 1
for knocker in KOlist:
knockins[knocker] = 1
# put objects in correct format for passing to C
nodeIndividual = np.array(individual, dtype=np.intc, order="C")
indLen = len(nodeIndividual)
andNodes = np.array(model.andNodes, dtype=np.intc, order="C")
nodeNum = len(model.nodeList)
andNodeInvert = np.array(model.andNodeInvert, dtype=object, order="C")
individualParse = np.array(model.individualParse, dtype=np.intc, order="C")
andLenList = np.array(model.andLenList, dtype=np.intc, order="C")
nodePositions1 = model.nodePositions
nodePositionsC = np.array(nodePositions1, dtype=np.intc, order="C")
simSteps = self.params.simSteps
lenSamples1 = len(model.sampleList)
binMatC3 = np.array(
copy.deepcopy(self.binMat.toarray(order="C")), order="C", dtype=np.intc
)
binMatCPointer = ctypes.c_void_p(
binMatC3.ctypes.data
) # put input array as C pointer
# convert objects into C pointers
nodeIndividual1 = ctypes.c_void_p(nodeIndividual.ctypes.data)
indLen1 = ctypes.c_void_p(indLen)
andNodes1 = ctypes.c_void_p(andNodes.ctypes.data)
individualParse1 = ctypes.c_void_p(individualParse.ctypes.data)
andLenList1 = ctypes.c_void_p(andLenList.ctypes.data)
andNodeInvertList1 = ctypes.c_void_p(andNodeInvert.ctypes.data)
nodeNum1 = ctypes.c_void_p(nodeNum)
simSteps1 = ctypes.c_void_p(simSteps)
knockouts1 = ctypes.c_void_p(knockouts.ctypes.data)
knockins1 = ctypes.c_void_p(knockins.ctypes.data)
nodePositionsCPointer = ctypes.c_void_p(nodePositionsC.ctypes.data)
vals = np.full(
shape=(100, self.maxNodes), fill_value=0, dtype=np.intc, order="C"
) # simData[STEP][NODE]
valsubmit = ctypes.c_void_p(vals.ctypes.data)
lenSamples = ctypes.c_void_p(lenSamples1)
localSearchC = ctypes.c_void_p(int(localSearch))
importanceScoresC = ctypes.c_void_p(int(importanceScores))
# errors = np.array(np.full(10000, fill_value=0, dtype=np.intc, order='C'))
# errorsSubmit=ctypes.c_void_p(errors.ctypes.data)
if localSearch:
# look at errors node wise
errors = np.array(
np.full(self.maxNodes, fill_value=0, dtype=np.intc, order="C")
)
errorsSubmit = ctypes.c_void_p(errors.ctypes.data)
cFunction(
valsubmit,
nodeIndividual1,
indLen1,
nodeNum1,
andLenList1,
individualParse1,
andNodes1,
andNodeInvertList1,
simSteps1,
knockouts1,
knockins1,
lenSamples,
binMatCPointer,
nodePositionsCPointer,
errorsSubmit,
localSearchC,
importanceScoresC,
) # in this case scSyncBoolC
errors = errors.tolist()
errors = errors[:nodeNum]
return errors
else:
if importanceScores:
importanceScores = np.array(
np.full(1, fill_value=0.0, dtype=np.float64, order="C")
)
importanceScoresC = ctypes.c_void_p(importanceScores.ctypes.data)
cFunction(
valsubmit,
nodeIndividual1,
indLen1,
nodeNum1,
andLenList1,
individualParse1,
andNodes1,
andNodeInvertList1,
simSteps1,
knockouts1,
knockins1,
lenSamples,
binMatCPointer,
nodePositionsCPointer,
importanceScoresC,
) # in this case importanceScore
return importanceScores.tolist()
else:
# look at errors by sample
errors = np.array(
np.full(self.maxSamples, fill_value=0, dtype=np.intc, order="C")
)
errorsSubmit = ctypes.c_void_p(errors.ctypes.data)
cFunction(
valsubmit,
nodeIndividual1,
indLen1,
nodeNum1,
andLenList1,
individualParse1,
andNodes1,
andNodeInvertList1,
simSteps1,
knockouts1,
knockins1,
lenSamples,
binMatCPointer,
nodePositionsCPointer,
errorsSubmit,
localSearchC,
importanceScoresC,
) # in this case scSyncBoolC
errors = errors.tolist()
return [sum(errors)]
def __processERS(self, equivsName):
"""Create an individual from the ERS generated by the local search, for importance score calculation"""
ersFile = open(str(equivsName), "rb")
ers = pickle.load(ersFile)
ersFile.close()
# randomly sample the ers to make an individual
individual = []
for i in range(len(ers)):
individual.extend(ers[i][randint(0, len(ers[i]) - 1)])
return individual
def __checkNodePossibilities(self, node, indy, KOlist, KIlist, scSyncBoolC):
model = self
tol = 0.0 # .01*len(newSSS) # set tolerance for equivalence
end = model._ruleMaker__findEnd(node) # find end of model for this node
start = model.individualParse[node] # find start of model for this node
truth = list(indy[start:end])
equivs = [truth]
if (end - start) == 0:
return truth, equivs, equivs, 0.0
indOptions = []
indErrors = []
# iterate over possibilities for this node
for i in range(1, 2 ** (end - start)):
tempultimate = list(indy)
tempInd = model._ruleMaker__bitList(i, len(truth))
tempultimate[start:end] = tempInd # set rule to one being checked
currentsumtemp = self._ruleMaker__evaluateByNode(
tempultimate, KOlist, KIlist, scSyncBoolC, localSearch=True
)
currentsum = currentsumtemp[
node
] # save the error found #subset complete error to
indOptions.append(tempInd)
indErrors.append(currentsum)
gc.collect()
minny = min(indErrors)
equivs = []
for i in range(len(indOptions)):
if indErrors[i] <= minny + tol: # changed from < to <=
equivs.append(indOptions[i])
truth = equivs[0]
return (truth, equivs, minny, indErrors)
def __calcImportance(self, equivs, model, importanceScore, graphName):
# Create holder for importance scores
importanceScoresDict = {}
importanceScoreStdev = {}
strat2_IS = {}
strat3_IS = {}
strat4_IS = {}
tempList = list(range(0, len(self.nodeList)))
# shuffle(tempList)
# print(tempList)
# print(len(tempList))
for node in range(0, len(self.nodeList)):
importanceScoresDict[self.nodeList[node]] = []
importanceScoreStdev[self.nodeList[node]] = 0.0
# Try 3 randomly sampled rule sets
i = 0
while i < 3:
individual = self._ruleMaker__processERS(graphName + "_equivs1.pickle")
for node in tempList:
print(
"Node: "
+ str(self.nodeList[node])
+ ", Node Position: "
+ str(node)
)
temp = self._ruleMaker__evaluateByNode(
individual,
[node],
[node],
importanceScore,
localSearch=False,
importanceScores=True,
)
print("Trial: " + str(i) + " Unprocessed IS: " + str(temp))
importanceScoresDict[self.nodeList[node]].append(temp[0])
i = i + 1
print(importanceScoresDict)
# Find maximum node importance score
maxScore = max(importanceScoresDict.values())
print("Max IS: " + str(maxScore))
minScore = min(importanceScoresDict.values())
print("Min IS: " + str(maxScore))
# Rescaling to [0,1] using featureReScale
for node in range(0, len(self.nodeList)):
importanceScoresDict[self.nodeList[node]] = (
importanceScoresDict[self.nodeList[node]][0] - minScore[0]
) / (maxScore[0] - minScore[0])
print(importanceScoreStdev)
ersFile = open(str(graphName + "_equivs1.pickle"), "rb")
ers = pickle.load(ersFile)
obsERS = {}
maxERS = {}
inDegreeNet = nx.read_graphml(graphName)
# Normalize by number of rule sets that were tried
for node in range(0, len(self.nodeList)):
obsERS[self.nodeList[node]] = len(ers[node])
inDegree = inDegreeNet.in_degree(self.nodeList[node])
if inDegree == 0:
maxERS[self.nodeList[node]] = 1
else:
inDegree = min(inDegree, 3)
maxERS[self.nodeList[node]] = (
2 ** (len(ers[node][0])) - 1
) # 2**(inDegree+1) - 1 #
# print(node)
# print(obsERS[self.nodeList[node]])
# print(maxERS[self.nodeList[node]])
# Strategy 3: scale IS by (maxERS - obsERS + 1)/max ERS
importanceScoresDict[self.nodeList[node]] = np.mean(
importanceScoresDict[self.nodeList[node]]
)
importanceScoresDict[self.nodeList[node]] = importanceScoresDict[
self.nodeList[node]
] * (
(maxERS[self.nodeList[node]] - obsERS[self.nodeList[node]] + 1)
/ maxERS[self.nodeList[node]]
)
# importanceScoresDict[self.nodeList[node]] = np.mean(importanceScoresDict[self.nodeList[node]])
# Strategy 1: divide by the standard deviation of the scores across rule set trials. This should be BEFORE rescaling to [0,1]
# importanceScoreStdev[scObject.nodeList[node]] = np.std(importanceScoresDict[scObject.nodeList[node]])
# newIS[scObject.nodeList[node]] = importanceScoresDict[scObject.nodeList[node]]/float(importanceScoreStdev[scObject.nodeList[node]])
# Strategy 2: scale IS by log2((obsERS + 1)/max ERS)
# strat2_IS[self.nodeList[node]] = importanceScoresDict[self.nodeList[node]] * (np.log2((obsERS[self.nodeList[node]] + 1)/maxERS[self.nodeList[node]]))
# Strategy 3: scale IS by (maxERS - obsERS + 1)/max ERS
# strat3_IS[self.nodeList[node]] = importanceScoresDict[self.nodeList[node]] * ((maxERS[self.nodeList[node]] - obsERS[self.nodeList[node]] + 1)/maxERS[self.nodeList[node]])
# Strategy 4: abs(strat2)/max(strat2)
# for node in range(0, len(self.nodeList)):
# strat4_IS[self.nodeList[node]] = abs(strat2_IS[self.nodeList[node]])/max([abs(val) for val in strat2_IS.values()])
# Print out file of importance scores
IS_df = pd.DataFrame(
importanceScoresDict.items(), columns=["Node", "Importance Score"]
)
# IS_df["Std"] = IS_df["Node"].map(importanceScoreStdev)
# IS_df["newScore"] = IS_df["Node"].map(newIS)
# IS_df["Strat2_IS"] = IS_df["Node"].map#(strat2_IS)
# IS_df["Strat3_IS"] = IS_df["Node"].map#(strat3_IS)
# IS_df["Strat4_IS"] = IS_df["Node"].map(strat4_IS)
IS_df["ObsERS"] = IS_df["Node"].map(obsERS)
IS_df["MaxERS"] = IS_df["Node"].map(maxERS)
IS_df.to_csv(
str(graphName + "_importanceScores.csv"),
sep=",",
encoding="utf-8",
index=False,
)
# Make graphml with importance scores as attributes
net = self.ruleGraph
nx.set_node_attributes(net, values=importanceScoresDict, name="importanceScore")
# nx.set_node_attributes(net, values=strat2_IS, name='strat2_IS')
# nx.set_node_attributes(net, values=strat3_IS, name='strat3_IS')
# nx.set_node_attributes(net, values=strat4_IS, name='strat4_IS')
nx.set_node_attributes(net, values=maxERS, name="maxERS")
nx.set_node_attributes(net, values=obsERS, name="obsERS")
# add abundance as attribute to graph
binMat2 = self.binMat.A
abundance = {}
abundance_sd = {}
numZeros = {}
numOnes = {}
for node in list(importanceScoresDict.keys()):
node_index = self.geneList.index(node)
expression = binMat2[node_index, :].tolist()
abundance[node] = np.mean(expression)
abundance_sd[node] = np.std(expression)
expression = np.array(expression)
numZeros[node] = (expression == 0).sum()
numOnes[node] = (expression == 1).sum()
nx.set_node_attributes(net, values=abundance, name="abundanceMean")
nx.set_node_attributes(net, values=abundance_sd, name="abundanceStdev")
nx.set_node_attributes(net, values=numZeros, name="abundanceZeros")
nx.set_node_attributes(net, values=numOnes, name="abundanceOnes")
nx.write_graphml_lxml(net, graphName[:-26] + "_IS.graphml")
return importanceScoresDict
class Params:
def __init__(self):
pass
def __simParams(
self,
mutModel=0.25,
cells=1,
samples=1,
generations=5,
popSize=24,
mu=10,
lambd=24,
iters=100,
genSteps=100,
simSteps=100,
crossoverProb=0.1,
mutationProb=0.9,
bitFlipProb=0.5,
):
self.mutModel = mutModel
self.cells = cells
self.samples = samples
self.generations = generations # generations to run #100
self.popSize = popSize # size of population #24
self.mu = mu # individuals selected #24
self.lambd = lambd # children produced #24
self.iters = iters # number of simulations to try in asynchronous mode
self.genSteps = genSteps # steps to find steady state with fake data
self.simSteps = (
simSteps # number of steps each individual is run when evaluating
)
self.crossoverProb = (
crossoverProb # prob of crossing over a particular parent
)
self.mutationProb = mutationProb # prob of mutating a particular parent
self.bitFlipProb = bitFlipProb # prob of flipping bits inside mutation
|
"""
Tests for financialaid models
"""
from django.db.models.signals import post_save
from factory.django import mute_signals
from rest_framework.exceptions import ValidationError
from financialaid.constants import FinancialAidStatus
from financialaid.factories import (
TierFactory,
FinancialAidFactory
)
from financialaid.models import (
Tier,
FinancialAidAudit,
)
from micromasters.utils import serialize_model_object
from profiles.factories import ProfileFactory
from search.base import MockedESTestCase
class FinancialAidModelsTests(MockedESTestCase):
"""
Tests for financialaid models
"""
def test_timestamped_model_update(self):
"""
Tests that timestamped models have update_on updated regardless of whether using .save() or .update()
"""
tier = TierFactory.create()
first_timestamp = tier.updated_on
tier.save()
second_timestamp = tier.updated_on
assert first_timestamp != second_timestamp
Tier.objects.filter(id=tier.id).update(name="new_tier")
third_timestamp = Tier.objects.get(id=tier.id) # Since we need to re-fetch the object
assert second_timestamp != third_timestamp
def test_financial_aid_model_unique(self):
"""
Tests that FinancialAid objects are unique per User and Program
"""
financial_aid = FinancialAidFactory.create()
# Test creation of FinancialAid that isn't unique_together with "user" and "tier_program__program"
# financial aid with same user and different program (new program created by the factory)
FinancialAidFactory.create(user=financial_aid.user)
# financial aid with same program and different user (new user created by the factory)
FinancialAidFactory.create(tier_program=financial_aid.tier_program)
# Test updating the original FinancialAid doesn't raise ValidationError
financial_aid.income_usd = 100
financial_aid.save()
# Test creation should fail for FinancialAid already existing with the same "user" and "tier_program__program"
with self.assertRaises(ValidationError):
FinancialAidFactory.create(
user=financial_aid.user,
tier_program=financial_aid.tier_program
)
def test_financial_aid_model_duplicate_if_reset(self):
"""
Tests that FinancialAid objects can not be unique per User and Program
if the other are in reset status
"""
financial_aid = FinancialAidFactory.create()
# change the first one to any state that is not `reset` will fail to create a new financial aid
for status in FinancialAidStatus.ALL_STATUSES:
if status == FinancialAidStatus.RESET:
continue
financial_aid.status = status
financial_aid.save()
with self.assertRaises(ValidationError):
FinancialAidFactory.create(
user=financial_aid.user,
tier_program=financial_aid.tier_program
)
# reset status will allow a new financial aid
financial_aid.status = FinancialAidStatus.RESET
financial_aid.save()
FinancialAidFactory.create(
user=financial_aid.user,
tier_program=financial_aid.tier_program
)
def test_save_and_log(self):
"""
Tests that FinancialAid.save_and_log() creates an audit record with the correct information.
"""
with mute_signals(post_save):
profile = ProfileFactory.create()
acting_user = profile.user
financial_aid = FinancialAidFactory.create()
original_before_json = serialize_model_object(financial_aid)
# Make sure audit object is created
assert FinancialAidAudit.objects.count() == 0
financial_aid.status = FinancialAidStatus.AUTO_APPROVED
financial_aid.save_and_log(acting_user)
assert FinancialAidAudit.objects.count() == 1
# Make sure the before and after data are correct
financial_aid.refresh_from_db()
original_after_json = serialize_model_object(financial_aid)
financial_aid_audit = FinancialAidAudit.objects.first()
before_json = financial_aid_audit.data_before
after_json = financial_aid_audit.data_after
for field, value in before_json.items():
# Data before
if isinstance(value, float):
# JSON serialization of FloatField is precise, so we need to do almost equal
self.assertAlmostEqual(value, original_before_json[field])
else:
assert value == original_before_json[field]
for field, value in after_json.items():
# Data after
if isinstance(value, float):
# JSON serialization of FloatField is precise, so we need to do almost equal
self.assertAlmostEqual(value, original_after_json[field])
else:
assert value == original_after_json[field]
def test_to_dict(self):
"""
assert output of to_dict
"""
financial_aid = FinancialAidFactory.create()
assert financial_aid.to_dict() == serialize_model_object(financial_aid)
|
from twisted.internet.defer import inlineCallbacks
import hathor
from hathor.version_resource import VersionResource
from tests import unittest
from tests.resources.base_resource import StubSite, _BaseResourceTest
class BaseVersionTest(_BaseResourceTest._ResourceTest):
__test__ = False
def setUp(self):
super().setUp()
self.web = StubSite(VersionResource(self.manager))
@inlineCallbacks
def test_get(self):
response = yield self.web.get("version")
data = response.json_value()
self.assertEqual(data['version'], hathor.__version__)
class SyncV1VersionTest(unittest.SyncV1Params, BaseVersionTest):
__test__ = True
class SyncV2VersionTest(unittest.SyncV2Params, BaseVersionTest):
__test__ = True
# sync-bridge should behave like sync-v2
class SyncBridgeVersionTest(unittest.SyncBridgeParams, SyncV2VersionTest):
pass
|
import time
import os
import numpy as np
from nibabel import trackvis as tv
from dipy.viz import fos
from dipy.io import pickles as pkl
from dipy.core import track_learning as tl
from dipy.core import track_performance as pf
from dipy.core import track_metrics as tm
fname='/home/eg01/Data/PBC/pbc2009icdm/brain1/brain1_scan1_fiber_track_mni.trk'
C_fname='/tmp/larch_tree.pkl'
appr_fname='/tmp/larch_tracks.trk'
print 'Loading trackvis file...'
streams,hdr=tv.read(fname)
print 'Copying tracks...'
tracks=[i[0] for i in streams]
#tracks=tracks[:1000]
#print 'Deleting unnecessary data...'
del streams#,hdr
if not os.path.isfile(C_fname):
print 'Starting LARCH ...'
tim=time.clock()
C,atracks=tl.larch(tracks,[50.**2,20.**2,5.**2],True,True)
#tracks=[tm.downsample(t,3) for t in tracks]
#C=pf.local_skeleton_clustering(tracks,20.)
print 'Done in total of ',time.clock()-tim,'seconds.'
print 'Saving result...'
pkl.save_pickle(C_fname,C)
streams=[(i,None,None)for i in atracks]
tv.write(appr_fname,streams,hdr)
else:
print 'Loading result...'
C=pkl.load_pickle(C_fname)
skel=[]
for c in C:
skel.append(C[c]['repz'])
print 'Showing dataset after clustering...'
r=fos.ren()
fos.clear(r)
colors=np.zeros((len(skel),3))
for (i,s) in enumerate(skel):
color=np.random.rand(1,3)
colors[i]=color
fos.add(r,fos.line(skel,colors,opacity=1))
fos.show(r)
|
from sympy import symbols, Dij, LeviCivita
x, y = symbols('x,y')
def test_Dij():
assert Dij(1, 1) == 1
assert Dij(1, 2) == 0
assert Dij(x, x) == 1
assert Dij(x**2-y**2, x**2-y**2) == 1
def test_levicivita():
assert LeviCivita(1, 2, 3) == 1
assert LeviCivita(1, 3, 2) == -1
assert LeviCivita(1, 2, 2) == 0
i,j,k = symbols('i j k')
assert LeviCivita(i, j, k) == LeviCivita(i,j,k, evaluate=False)
assert LeviCivita(i, j, i) == 0
assert LeviCivita(1, i, i) == 0
assert LeviCivita(i, j, k).doit() == (j - i)*(k - i)*(k - j)/2
assert LeviCivita(1, 2, 3, 1) == 0
assert LeviCivita(4, 5, 1, 2, 3) == 1
assert LeviCivita(4, 5, 2, 1, 3) == -1
|
import ast
import types
import signal
import ctypes
import ctypes.util
import threading
c_off_t = ctypes.c_int64
from ebml.exceptions import UnexpectedEndOfData
# Imports for compatibility purposes, in case some modules still expect these
# functions to still be here.
from .vint import (detectVintSize, getVintSize, fromVint, toVint, parseVint,
parseVints, readVint, peekVint, parseFile, parseElements)
def toVints(a):
return b"".join(map(toVint, a))
def formatBytes(data):
return " ".join(f"{x:02X}" for x in data)
class Constant(object):
def __init__(self, value):
self.value = value
def __get__(self, inst=None, cls=None):
if inst is None:
return self
return self.value
def make_fallocate():
libc_name = ctypes.util.find_library('c')
libc = ctypes.CDLL(libc_name)
_fallocate = libc.fallocate
_fallocate.restype = ctypes.c_int
_fallocate.argtypes = [ctypes.c_int, ctypes.c_int, c_off_t, c_off_t]
del libc
del libc_name
def fallocate(fd, mode, offset, len_):
res = _fallocate(fd.fileno(), mode, offset, len_)
if res != 0:
raise IOError(res, 'fallocate')
return fallocate
_fallocate = make_fallocate()
del make_fallocate
FALLOC_FL_KEEP_SIZE = 0x01
FALLOC_FL_PUNCH_HOLE = 0x02
FALLOC_FL_COLLAPSE_RANGE = 0x08
FALLOC_FL_INSERT_RANGE = 0x20
class NoInterrupt(object):
"""
Context manager used to perform a sequence of IO operations that
must not be interrupted with KeyboardInterrupt.
"""
def __enter__(self):
self._signal_received = False
if threading.currentThread() is threading.main_thread():
self._old_handler = signal.signal(signal.SIGINT, self.handler)
def handler(self, sig, frame):
self._signal_received = (sig, frame)
def __exit__(self, type, value, traceback):
if threading.currentThread() is threading.main_thread():
signal.signal(signal.SIGINT, self._old_handler)
if self._signal_received:
self._old_handler(*self._signal_received)
|
""" Helper module to run HVE scheme
"""
from searchableencryption.hve import util
def run_hve_multiple(hve_quadruple: tuple,
indices: list,
queries: list,
groupParam: dict):
""" Run HVE with multiple indices and queries
:param tuple hve_quadruple: quadruple of HVE functions (setup, encrypt, gen_token, query)
:param list indices: list of indices with 0 or 1 entries where 1s indicates locations
:param list queries: list of queries with 0, 1, or `WILDCARD`
:param dict groupParam: group parameters
:returns: list of list of matched indices of each query
"""
setup = hve_quadruple[0]
encrypt = hve_quadruple[1]
gen_token = hve_quadruple[2]
query = hve_quadruple[3]
width = util.check_size(indices, queries)
print('width:', width)
(pk, sk) = setup(width=width, group_param=groupParam)
print('Done setup')
C = []
for index in indices:
C.append(encrypt(pk, index))
print('Done encrypt')
matches = []
for qi, I_star in enumerate(queries):
token = gen_token(sk, I_star)
print('Done gen token')
matched_items = list()
for ci, cipher in enumerate(C):
matched = query(token, cipher, predicate_only=True, group=pk['group'])
if matched:
# matched
matched_items.append(ci)
matches.append(matched_items)
return matches
|
import intake
import intake.config
from intake.source.cache import CacheMetadata
import os
import subprocess
from intake.source.tests.util import temp_cache
cpath = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'../../../catalog/tests/catalog_caching.yml'))
def test_help(tmpdir):
out = subprocess.check_output('INTAKE_CONF_DIR=%s intake cache' % tmpdir,
shell=True).decode()
assert 'usage: ' in out
out2 = subprocess.check_output('INTAKE_CONF_DIR=%s intake cache -h' % tmpdir,
shell=True).decode()
assert out2 == out
def test_list_keys(temp_cache):
tmpdir = intake.config.confdir
out = subprocess.check_output('INTAKE_CONF_DIR=%s intake cache'
' list-keys' % tmpdir,
shell=True).decode()
assert '[]\n' in out # empty cache
cat = intake.open_catalog(cpath)
cat.test_cache.read()
out = subprocess.check_output('INTAKE_CONF_DIR=%s intake cache list-keys'
'' % tmpdir,
shell=True).decode()
assert 'states.csv' in out
def test_precache(temp_cache):
tmpdir = intake.config.confdir
out = subprocess.check_output('INTAKE_CONF_DIR=%s intake cache list-keys'
'' % tmpdir,
shell=True).decode()
assert out == "[]\n\n"
out = subprocess.check_output('INTAKE_CONF_DIR=%s INTAKE_CACHE_DIR=%s '
'intake precache %s ' %
(tmpdir, tmpdir, cpath), shell=True).decode()
assert out.count('Caching for entry') > 1
out = subprocess.check_output('INTAKE_CONF_DIR=%s intake cache list-keys'
'' % tmpdir,
shell=True).decode()
assert 'states.csv' in out
assert 'small.npy' in out
def test_clear_all(temp_cache):
tmpdir = intake.config.confdir
cat = intake.open_catalog(cpath)
cat.test_cache.read()
md = CacheMetadata()
assert len(md) == 1
assert 'states' in list(md)[0]
subprocess.call('INTAKE_CONF_DIR=%s intake cache clear'
'' % tmpdir,
shell=True)
md = CacheMetadata()
assert len(md) == 0
def test_clear_one(temp_cache):
tmpdir = intake.config.confdir
cat = intake.open_catalog(cpath)
cat.test_cache.read()
cat.arr_cache.read()
md = CacheMetadata()
keys = list(md)
assert len(keys) == 2
subprocess.call('INTAKE_CONF_DIR=%s intake cache clear %s'
'' % (tmpdir, keys[0]),
shell=True)
md = CacheMetadata()
assert len(md) == 1
assert list(md)[0] == keys[1]
def test_usage(temp_cache):
tmpdir = intake.config.confdir
from intake.source.cache import BaseCache
BaseCache(None, None).clear_all()
out = subprocess.check_output('INTAKE_CONF_DIR=%s INTAKE_CACHE_DIR=%s'
' intake cache usage' % (tmpdir, tmpdir),
shell=True).decode()
assert '0.0' in out # empty!
|
import os
import sys
import colorama
import logging
import urllib.request
logger = logging.getLogger('main')
def start_fix():
import zipfile
import platform
print(f'Copying sqlite3.dll to the current directory: {os.getcwd()} ... ', end='')
work_dir = os.path.dirname(os.path.abspath(__file__))
filename = 'sqlite-dll-win64-x64-3350500.zip' if platform.architecture()[0] == '64bit' \
else 'sqlite-dll-win32-x86-3350500.zip'
url = 'https://www.sqlite.org/2021/' + filename
src = os.path.join(work_dir, 'sqlite.zip')
try:
with urllib.request.urlopen(url) as f:
with open(src, 'wb') as f_out:
f_out.write(f.read())
except:
print(colorama.Fore.LIGHTRED_EX + "\nCan't download sqlite.zip. Please, download it manually:\n" + url)
print(colorama.Fore.WHITE)
exit()
with zipfile.ZipFile(src, 'r') as zip_ref:
zip_ref.extractall('.')
print('finished')
print(colorama.Fore.LIGHTGREEN_EX + '\nPlease restart Label Studio to load the updated sqlite.dll\n')
print(colorama.Fore.WHITE)
exit()
def windows_dll_fix():
""" Copy sqlite.dll to the current directory and use it """
# check if it is not on windows
if sys.platform != 'win32':
return
print(f'Current platform is {sys.platform}, apply sqlite fix')
# set env
import ctypes
path_to_dll = os.path.abspath('.')
os.environ['PATH'] = path_to_dll + os.pathsep + os.environ['PATH']
try:
ctypes.CDLL(os.path.join(path_to_dll, 'sqlite3.dll'))
print('Add current directory to PATH for DLL search: ' + path_to_dll)
except OSError:
print("Can't load sqlite3.dll from current directory")
# check sqlite version
import sqlite3
v = sqlite3.sqlite_version_info
# if v[0] >= 3 and v[1] >= 35:
# print("sqlite3 version doesn't a fix")
# return
# check python version and warn
print(f'python version: {sys.version_info.major} sqlite minor version: {sys.version_info.minor}')
if sys.version_info.major == 3 and sys.version_info.minor in [6, 7, 8]:
print('\n' + colorama.Fore.LIGHTYELLOW_EX +
'You are on ' +
colorama.Fore.LIGHTRED_EX +
f'Windows Python {sys.version_info.major}.{sys.version_info.minor}.\n' +
colorama.Fore.LIGHTYELLOW_EX +
f"This Python version uses SQLite "
f"{colorama.Fore.LIGHTRED_EX}{v[0]}.{v[1]}.{v[2]} " +
colorama.Fore.LIGHTYELLOW_EX +
f"which does not support JSON Field.\n" +
'Read more about this issue: ' +
colorama.Fore.LIGHTWHITE_EX +
'https://code.djangoproject.com/wiki/JSON1Extension [Windows section]\n')
auto_agree = any([a == '--agree-fix-sqlite' for a in sys.argv])
agree = 'n'
if not auto_agree:
print(colorama.Fore.WHITE +
'Label Studio can try to resolve this issue by downloading the correct '
'sqlite.dll from https://sqlite.org in the current directory, '
'do you want to proceed? \n [y/n] > ', end='')
agree = input()
if agree == 'y' or auto_agree:
start_fix()
print(colorama.Fore.WHITE)
|
class Node(object):
__slots__ = ['obj', 'next']
def __init__(self, obj):
self.obj = obj
self.next = None
class LinkedList(object):
__slots__ = ['head', 'tail', 'length']
def __init__(self, *init_list):
self.head = None
self.tail = None
self.length = 0
for obj in init_list:
self.push(obj)
def push(self, obj):
if not self.head:
self.head = self.tail = Node(obj)
else:
self.tail.next = Node(obj)
self.tail = self.tail.next
self.length += 1
def pop(self) -> Node:
if (not self.head):
raise StopIteration
else:
self.length = self.length - 1
retval = self.head.obj
self.head = self.head.next
if not self.head:
self.tail = None
return retval
def clear(self):
self.head = self.tail = None
self.length = 0
def __iter__(self):
return self
def __next__(self) -> Node:
return self.pop()
def __bool__(self) -> bool:
return self.head != None
def __len__(self) -> int:
return self.length
def __str__(self) -> str:
return f'<Linked List> Size: {self.length}'
def __repr__(self) -> str:
return self.__str__()
|
from __future__ import absolute_import, unicode_literals
import datetime
import random
import string
today = datetime.date.today()
def random_string_generator(size=10, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def random_number_generator(size=6, chars=string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def unique_number_generator(instance, matnumber=None):
"""
Generates a unique matnumber generator. converting the username field into a unique mat number
"""
if matnumber is not None:
investor_id = matnumber
else:
investor_id = "INV_01293"
Klass = instance.__class__
qs_exists = Klass.objects.filter(investor_id=investor_id).exists()
if qs_exists:
matnumber = "INV_{matnumber}".format(matnumber=random_number_generator(size=6))
return unique_number_generator(instance, matnumber=matnumber)
return investor_id
def unique_staff_id(instance, staffid):
"""
Generate a unique staff id.
"""
school_initials = instance.school.initials
year = today.year
if staffid is not None:
staffID = staffid
else:
staffID = f"{school_initials}-{year}-STAFF"
Klass = instance.__class__
qs_exists = Klass.objects.filter(staffID=staffID).exists()
if qs_exists:
staffid = "{school_initials}-{year}-{staffid}".format(school_initials=school_initials, year=year, staffid=random_string_generator(size=6))
return random_string_generator(instance, staffid=staffid)
return staffID
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-26 12:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
('opportunity', '0001_initial'),
('contacts', '0001_initial'),
('common', '0001_initial'),
('leads', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='comment',
name='contact',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='contact_comments', to='contacts.Contact'),
),
migrations.AddField(
model_name='comment',
name='lead',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='leads', to='leads.Lead'),
),
migrations.AddField(
model_name='comment',
name='opportunity',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='opportunity_comments', to='opportunity.Opportunity'),
),
migrations.AddField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
|
import sys
import time
import datetime
import argparse
import struct
import math
import serial
import pygame
from pygame.locals import *
sys.path.append('../lib')
import hdlc
SCR_WIDTH = 800
SCR_HEIGHT = 600
black = (0,0,0)
light_gray = (224,224,224)
white = (255,255,255)
red = (255,0,0)
FIX_DIV = 65536.0
VAL_SQRT_1_DIV_2 = 0.70710678118654752440084436210485
VAL_SQRT_3_DIV_2 = 0.86602540378443864676372317075294
VAL_1_DIV_45 = 1. / 45.
VAL_1_DIV_128 = 1. / 128.
SIDE_X_OFFSET = 3.5
SIDE_Y_OFFSET = 5
FRONT_SIDE_X_OFFSET = 2.5
FRONT_SIDE_Y_OFFSET = 1
FRONT_Y_OFFSET = 4
parser = argparse.ArgumentParser(description="RustTelemetry", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--port", type=str, default="/dev/tty.usbserial-A8008iwL", help="usb serial port device eg. /dev/ttyUSB0")
args = parser.parse_args()
s = serial.Serial(args.port, 57600, timeout=0.001);
s.flushInput()
s.flushOutput()
parser = hdlc.HdlcChecksummed()
# header bytes
BC_TELEMETRY = 0x01
CB_MOTOR_COMMAND = 0x02
AUTOMATIC_DEFAULT = 0
STEERING_PWM_DEFAULT = 90
DRIVING_PWM_DEFAULT = 90
def send_packet(data):
data = hdlc.add_checksum(data)
data = hdlc.escape_delimit(data)
s.write(data)
def p(x, y):
return (int(SCR_WIDTH / 2 + 3*x), int(SCR_HEIGHT - 3*y - SCR_HEIGHT / 8))
def run():
time = cycles = left = right = front_left = front_right = front = mc_x = mc_y = mc_dist = mc_angle = steerPwm = speedPwm = battery = 0
lx = ly = flx = fly = fx = fy = frx = fry = rx = ry = 0
accel_x = accel_y = accel_z = speed_x = speed_y = speed_z = 0.0
last_time = last_cycles = 0
state = 0
pygame.init()
pygame.display.set_caption("RustTelemetry")
# initialize font; must be called after 'pygame.init()' to avoid 'Font not Initialized' error
myfont = pygame.font.SysFont("monospace", 20)
screen = pygame.display.set_mode((SCR_WIDTH, SCR_HEIGHT))
# by default the key repeat is disabled, enable it
pygame.key.set_repeat(50, 50)
running = True
automatic = AUTOMATIC_DEFAULT
steering_pwm = STEERING_PWM_DEFAULT # center
drive_pwm = DRIVING_PWM_DEFAULT # stop
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == KEYUP:
if event.key == K_SPACE:
automatic = 0 if automatic == 1 else 1
steering_pwm = STEERING_PWM_DEFAULT # center
drive_pwm = DRIVING_PWM_DEFAULT # stop
motor_command = struct.pack("<BBBB", CB_MOTOR_COMMAND, automatic, steering_pwm, drive_pwm)
send_packet(motor_command)
print(automatic, steering_pwm, drive_pwm)
elif event.type == KEYDOWN:
if event.key == K_RIGHT:
automatic = 0
steering_pwm += 1
if steering_pwm > 180:
steering_pwm = 180
print 'steering pwm %u' % steering_pwm
elif event.key == K_LEFT:
automatic = 0
steering_pwm -= 1
if steering_pwm < 0:
steering_pwm = 0
print 'steering pwm %u' % steering_pwm
elif event.key == K_UP:
automatic = 0
drive_pwm += 1
if drive_pwm > 180:
drive_pwm = 180
print 'drive pwm %u' % drive_pwm
elif event.key == K_DOWN:
automatic = 0
drive_pwm -= 1
if drive_pwm < 0:
drive_pwm = 0
print 'drive pwm %u' % drive_pwm
elif event.key == K_p:
automatic = 0
drive_pwm = 105
print 'drive pwm %u' % drive_pwm
elif event.key == K_SPACE:
continue # handled in event.type == KEYUP
elif event.key == K_ESCAPE:
running = False
#kill
automatic = AUTOMATIC_DEFAULT
steering_pwm = STEERING_PWM_DEFAULT # center
drive_pwm = DRIVING_PWM_DEFAULT # stop
continue
else:
#kill
automatic = AUTOMATIC_DEFAULT
steering_pwm = STEERING_PWM_DEFAULT # center
drive_pwm = DRIVING_PWM_DEFAULT # stop
motor_command = struct.pack("<BBBB", CB_MOTOR_COMMAND, automatic, steering_pwm, drive_pwm)
send_packet(motor_command)
print(automatic, steering_pwm, drive_pwm)
# read serial
data = s.read(20)
if data:
parser.put(data)
for packet in parser:
header, = struct.unpack("<B", packet[:1])
if header == BC_TELEMETRY:
last_cycles = cycles
last_time = time
time, cycles, left, right, front_left, front_right, front, mc_x, mc_y, mc_dist, mc_angle, accel_x, accel_y, accel_z, speed_x, speed_y, speed_z, automatic, steerPwm, speedPwm, battery, state = struct.unpack("<IIiiiiiiiiiffffffBBBHB", packet[1:])
left /= FIX_DIV
front_left /= FIX_DIV
front /= FIX_DIV
front_right /= FIX_DIV
right /= FIX_DIV
mc_x /= FIX_DIV
mc_y /= FIX_DIV
mc_dist /= FIX_DIV
mc_angle /= FIX_DIV
#mc_angle_rad = math.radians(mc_angle)
#mc_x_calc = mc_dist * math.cos(mc_angle_rad)
#mc_y_calc = mc_dist * math.sin(mc_angle_rad)
a1 = left * VAL_SQRT_1_DIV_2
lx = -(a1 + SIDE_X_OFFSET)
ly = a1 - SIDE_Y_OFFSET
flx = -(front_left * 0.5 + FRONT_SIDE_X_OFFSET)
fly = front_left * VAL_SQRT_3_DIV_2 + FRONT_SIDE_Y_OFFSET
fx = 0
fy = front + FRONT_Y_OFFSET
frx = front_right * 0.5 + FRONT_SIDE_X_OFFSET
fry = front_right * VAL_SQRT_3_DIV_2 + FRONT_SIDE_Y_OFFSET
a2 = right * VAL_SQRT_1_DIV_2
rx = a2 + SIDE_X_OFFSET
ry = a2 - SIDE_Y_OFFSET
#print("battery: %u" % battery)
if automatic:
print("%.2f %.2f %.2f %.2f %.2f (%.f,%.2f;%.2f,%.2f %3u %3u %u)" % (left, front_left, front, front_right, right, mc_x, mc_y, mc_dist, mc_angle, steerPwm, speedPwm, state))
#if math.sqrt(accel_x*accel_x + accel_y*accel_y + accel_z*accel_z) > 0.1:
# print("%f\t%f %3u" % (accel_x, speed_x, speedPwm))
sys.stdout.flush()
# erase the screen
screen.fill(white)
if lx != 0:
pts = [p(lx, 0), p(lx, ly), p(flx, fly), p(fx, fy), p(frx, fry), p(rx, ry), p(rx, 0)]
pygame.draw.polygon(screen, light_gray, pts, 0)
pygame.draw.polygon(screen, black, pts, 3)
pygame.draw.lines(screen, red, False, [p(0,0), p(mc_x, mc_y)], 3)
pygame.draw.circle(screen, black, p(0, 0), 10, 0)
pygame.draw.circle(screen, red, p(mc_x, mc_y), 10, 0)
# render text
label = myfont.render("battery: %.3fV" % (battery / 1000.0,), 1, (255,125,125))
screen.blit(label, (10, 10))
if automatic == 1:
label = myfont.render("automatic: yes", 1, (255,0,0))
else:
label = myfont.render("automatic: no", 1, (0,255,0))
screen.blit(label, (10, 40))
label = myfont.render("accel: %+0.4f %+0.4f %+0.4f" % (accel_x, accel_y, accel_z), 1, (125,125,255))
screen.blit(label, (10, 70))
label = myfont.render("speed: %+0.4f %+0.4f %+0.4f" % (speed_x, speed_y, speed_z), 1, (125,125,255))
screen.blit(label, (10, 100))
label = myfont.render("cycles per millisecond: %0.2f" % (float(cycles - last_cycles) / float(time - last_time)), 1, (125,255,125))
screen.blit(label, (10, 130))
label = myfont.render("steer: %3u drive: %3u" % (steerPwm, speedPwm), 1, (255,125,255))
screen.blit(label, (10, SCR_HEIGHT - 30))
# update the screen
pygame.display.update()
if __name__=="__main__":
run()
|
''' mbinary
#########################################################################
# File : rotate.py
# Author: mbinary
# Mail: zhuheqin1@gmail.com
# Blog: https://mbinary.xyz
# Github: https://github.com/mbinary
# Created Time: 2018-05-19 21:54
# Description: three methods of rotating a list
1. 利用 ba=(br)^T(ar)^T=(arbr)^T,通过三次反转字符串: 即首先对序列前部分逆序,再对序列后部分逆序,再对整个序列全部逆序
2. 分组交换(尽可能使数组的前面连续几个数为所要结果):a长度大于b,将 ab 分成 a0a1b,交换 a0 和 b,得 ba1a0,只需再交换 a1和 a0。若 a 长度小于 b,将 ab 分成 ab0b1,交换 a 和 b0,得 b0ab1,只需再交换 a 和b0。通过不断将数组划分,和交换,直到不能再划分为止。分组过程与求最大公约数很相似。
3.所有序号为 (j+i*m) % n (j 表示每个循环链起始位置,i 为计数变量,m 表示左旋转位数,n 表示字符串长度),会构成一个循环链(共有 gcd(n,m)个,gcd 为 n、m 的最大公约数),每个循环链上的元素只要移动一个位置即可,最后整个过程总共交换了 n 次(每一次循环链,是交换 n/gcd(n,m)次,总共 gcd(n,m)个循环链。所以,总共交换 n 次)。
#########################################################################
'''
def rotate(s,k,right=False):
def reverse(a,b):
while a<b:
s[a],s[b]=s[b],s[a]
a+=1
b-=1
n=len(s)
k = k%n if not right else n-k%n
reverse(0,k-1)
reverse(k,n-1)
reverse(0,n-1)
return s
def rotate2(s,k,right=False):
def swap(a,b,c):
for i in range(c):
s[a+i],s[b+i] = s[b+i],s[a+i]
def _rot(pl,pr):
''' swap s[pl,pr) , s[pr:]'''
if pr==n:return
if pr-pl<=n-pr:
swap(pl,pr,pr-pl)
_rot(pr,2*pr-pl)
else:
swap(pl,pr,n-pr)
_rot(n-pr+pl,pr)
n=len(s)
k = k%n if not right else n-k%n
_rot(0,k)
return s
def rotate3(s,k,right=False):
def gcd(a,b):
if b==0:return a
return gcd(b,a%b)
n=len(s)
k = k%n if not right else n-k%n
r=gcd(n,k)
for i in range(r):
tmp = s[i]
j = (i+k)%n
while j!=i:
s[j-k] = s[j]
j = (j+k)%n
s[(j-k+n)%n] = tmp
return s
def test():
def f(func,*args,right=False):
print(' '.join(['testing:',func.__name__,str(args),'right=',str(right)]))
rst = func(*args,right=right)
print('result',rst)
print()
return f
if __name__=='__main__':
s=[i for i in range(10)]
tester= test()
tester(rotate,s,4,right=True)
tester(rotate,s,4)
tester(rotate2,s,2,right=True)
tester(rotate2,s,2)
tester(rotate3,s,132,right=True)
tester(rotate3,s,132)
'''
testing: rotate ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 4) right= True
result [6, 7, 8, 9, 0, 1, 2, 3, 4, 5]
testing: rotate ([6, 7, 8, 9, 0, 1, 2, 3, 4, 5], 4) right= False
result [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
testing: rotate2 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 2) right= True
result [8, 9, 0, 1, 2, 3, 4, 5, 6, 7]
testing: rotate2 ([8, 9, 0, 1, 2, 3, 4, 5, 6, 7], 2) right= False
result [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
testing: rotate3 ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 132) right= True
result [8, 9, 0, 1, 2, 3, 4, 5, 6, 7]
testing: rotate3 ([8, 9, 0, 1, 2, 3, 4, 5, 6, 7], 132) right= False
result [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
'''
|
from . import register
from django.utils.safestring import mark_safe
from django.conf import settings
from django.template import loader
@register.inclusion_tag("banner.html")
def cookie_banner():
return {}
def render_gtm_template(template_filename, gtm_container_id):
t = loader.get_template(template_filename)
return t.render({"GTM_CONTAINER_ID": gtm_container_id})
@register.simple_tag()
def google_tag_manager():
if not settings.IEE_GA_GTM:
return mark_safe("<!-- missing GTM container id -->")
return render_gtm_template("gtm.html", settings.IEE_GA_GTM)
@register.simple_tag()
def google_tag_manager_noscript():
if not settings.IEE_GA_GTM:
return mark_safe("<!-- missing GTM container id -->")
return render_gtm_template("gtm_noscript.html", settings.IEE_GA_GTM)
|
from behave import given, when, then
@given(u'we have irisvmpy installed')
def step_impl(context):
raise NotImplementedError(u'STEP: Given we have irisvmpy installed')
@when(u'we run program')
def step_impl(context):
raise NotImplementedError(u'STEP: When we run program')
@then(u'<species> will be displayed')
def step_impl(context):
raise NotImplementedError(u'STEP: Then Something will be displayed')
|
"""
Lambda responsible for handling API requests to show the list of batches, or to
show a particular batch.
"""
import json
import botocore
from shared import db
from shared.api_helpers import input_batch_to_human_readable
from shared.constants import BatchMetadataType, BatchStatus
from shared.log import log_request_and_context, logger
def get_all_batches():
"""
Generate a dictionary of batch ids by status
:returns: json serializable dictionary indexed by batch status and containing
list of batch ids
"""
batch_ids_by_status = {}
for status in [
BatchStatus.IN_PROGRESS,
BatchStatus.VALIDATION_FAILURE,
BatchStatus.INTERNAL_ERROR,
BatchStatus.COMPLETE,
]:
batches = db.get_batches_by_type_status(BatchMetadataType.INPUT, status)
batch_ids = [batch["BatchId"] for batch in batches]
batch_ids_by_status[status] = batch_ids
return batch_ids_by_status
def get_batch_description(batch_id):
"""
Looks up a batch using the given batch id and validates that the batch
is of appropriate type, then returns a human readable representation.
:param batch_id: Id of batch to convert to human readable description
:returns: json serializable description of a given batch
"""
batch_metadata = db.get_batch_metadata(batch_id)
# User should only be querying for parent batches of type "INPUT", not frame
# level batches.
if batch_metadata["BatchMetadataType"] != BatchMetadataType.INPUT:
logger.error(
"User requested existing batch, but it is of the wrong type (not INPUT): %s", batch_id
)
return None
# Convert batch metadata to something user presentable.
return input_batch_to_human_readable(batch_metadata)
def handle_request(request):
"""
Handles requests for all batches or specific batch information
:param request: Dictionary containing "batchId"
:returns: Dictionary consisting of the api response body.
"""
batch_id = request["batchId"]
if batch_id is None:
return get_all_batches()
return get_batch_description(batch_id)
def parse_request(event):
"""
Parses a given request's url params.
:param event: API gateway input event for GET request
:returns: Parsed request params dictionary
"""
url_params = event.get("multiValueQueryStringParameters")
if url_params is None:
return {"batchId": None}
batch_ids = url_params.get("batchId")
if len(batch_ids) != 1:
return {"batchId": None}
batch_id = batch_ids[0]
return {
"batchId": batch_id,
}
def lambda_handler(event, context):
"""Lambda function that responds shows active batch information.
Parameters
----------
event: dict, required API gateway request with an input SQS arn, output SQS arn
context: object, required Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
Lambda Output Format: dict
Return doc:
https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
log_request_and_context(event, context)
try:
request = parse_request(event)
except (KeyError, ValueError) as err:
logger.error("Failed to parse request: %s", err)
return {
"statusCode": 400,
"body": "Error: failed to parse request.",
}
try:
batch_info = handle_request(request)
except botocore.exceptions.ClientError as err:
logger.error("Boto call failed to execute during request handling: {err}")
return {
"statusCode": 500,
"body": "Error: internal error",
}
if batch_info is None:
logger.error("Batch id not found, request: %s", request)
return {
"statusCode": 400,
"body": f"batch id: {request['batchId']} not found",
"headers": {"X-Amzn-ErrorType": "InvalidParameterException"},
}
response = {
"statusCode": 200,
"body": json.dumps(batch_info, indent=4, default=str),
"isBase64Encoded": False,
}
return response
|
import serial
import time
class WifiInterface():
__debug = True
__serial_interface_port = "/dev/ttyAMA0"
__last_cmd = ''
__wifi_serial = None
# AP
__ap_name = "PiCopter-Wifi"
__ap_password = ""
__ap_channel = 2
__ap_sec = 0
def __init__(self, baud, wifi_mode, server_ip, server_port):
self.__baud_rate = baud
self.__ip = server_ip
self.__port = server_port
self.__WIFI_MODE = wifi_mode
def __init_serial_interface(self):
# todo: finish
self.__wifi_serial = serial.Serial()
self.__wifi_serial.port = self.__serial_interface_port
self.__wifi_serial.baudrate = self.__baud_rate
self.__wifi_serial.timeout = 2
self.__wifi_serial.writeTimeout = 2
# needed ?
self.__wifi_serial.bytesize = serial.EIGHTBITS
self.__wifi_serial.parity = serial.PARITY_NONE
self.__wifi_serial.stopbits = serial.STOPBITS_ONE
self.__wifi_serial.xonxoff = False
self.__wifi_serial.rtscts = False
self.__wifi_serial.open()
return self.__wifi_serial.isOpen()
#-------------------------------------------------------------------
#initialization and open the port.
#possible timeout values:
# 1. None: wait forever, block call
# 2. 0: non-blocking mode, return immediately
# 3. x, x is bigger than 0, float allowed, timeout block call
# ser = serial.Serial()
# ser.port = SerialPort
# ser.baudrate = SerialBaudrate
# ser.bytesize = serial.EIGHTBITS #number of bits per bytes
# ser.parity = serial.PARITY_NONE #set parity check: no parity
# ser.stopbits = serial.STOPBITS_ONE #number of stop bits
# #ser.timeout = 1 #non-block read
# ser.timeout = 2.5 #timeout block call
# ser.xonxoff = False #disable software flow control
# ser.rtscts = False #disable hardware (RTS/CTS) flow control
# ser.dsrdtr = False #disable hardware (DSR/DTR) flow control
# ser.writeTimeout = 2 #timeout for write
#-------------------------------------------------------------------
def init_interface(self):
if (self.__init_serial_interface()):
print "serial interface set up"
else:
print "serial interface error"
# check if esp alive
resp = self.send_cmd('AT')
if (resp == 'OK'):
print "OK"
else:
print "Error!!!"
self.send_cmd("AT+RST", 2, "ready")
time.sleep(0.5)
if "ACCESS_POINT" == self.__WIFI_MODE:
# setup server
self.send_cmd('AT+CIPMUX=1')
self.send_cmd("AT+CIPSERVER=1," + str(self.__port))
# setup mode
self.send_cmd("AT+CWMODE=3")
self.send_cmd("AT+CWMODE?")
# setup ap
self.send_cmd("AT+CWSAP=\"" + self.__ap_name + "\",\"" + self.__ap_password + "\"," + str(self.__ap_channel) + "," + str(self.__ap_sec))
self.send_cmd("AT+CWSAP?")
elif "CLIENT" == self.__WIFI_MODE:
self.send_cmd("AT+CWMODE=2")
self.send_cmd("AT+CWMODE?")
# self.send_cmd("AT+CWJAP=\"" + self.__ap_name + "\",\"" + self.__ap_password + "\"", 10)
# self.send_cmd("AT+CWJAP?")
self.send_cmd("AT+CWSAP=\"" + self.__ap_name + "\",\"" + self.__ap_password + "\"," + str(self.__ap_channel) + "," + str(self.__ap_sec), 10)
self.send_cmd("AT+CWSAP?")
# self.send_cmd("AT+CIPMUX=1")
# AT+CIPSTART="UDP","0",0,10000,2 //set udp local port , remote ip and port is irrespective until send data...
self.send_cmd("AT+CIPSTART=\"UDP\",\"0\",0,8888,2", 5)
# get ip address
self.send_cmd("AT+CIFSR")
def send_cmd(self, cmd, timo = 1, term='OK'):
# TODO: check flushInput cmd
self.__wifi_serial.flushInput()
if(self.__debug):
print("Send command: " + cmd)
self.__wifi_serial.write(cmd + "\r\n")
# check response
resp_buffer = self.__wifi_serial.readline()
time.sleep( 0.2 )
# TODO: add timeout
start_time = time.clock()
if(self.__debug):
print("Start time: " + str(start_time))
while( time.clock() - start_time < timo ):
while( self.__wifi_serial.inWaiting() ):
resp_buffer += self.__wifi_serial.readline() #.strip( "\r\n" )
if term in resp_buffer:
ret = term
break
if 'ready' in resp_buffer:
ret = 'ready'
break
if 'ERROR' in resp_buffer:
ret = 'ERROR'
break
if 'Error' in resp_buffer:
ret = 'Error'
break
if(self.__debug):
print(resp_buffer)
print("Return value: " + ret)
print("Runtime: " + str(time.clock() - start_time) + " sec")
return ret
# ------------------------------------------
# def wifiCheckRxStream():
# while( ser.inWaiting() ):
# s = ser.readline().strip( "\r\n" )
# +IPD,0,213:POST / HTTP/1.1
# Host: 192.168.4.1:8888
# Connection: keep-alive
# Accept: */*
# User-Agent: HTTPea/1.1.1 CFNetwork/758.1.6 Darwin/15.0.0
# Accept-Language: de-de
# Content-Length: 0
# Accept-Encoding: gzip, deflate
def get_cmd(self):
cmd = ""
if(self.__wifi_serial.inWaiting()):
while (self.__wifi_serial.inWaiting()):
cmd += self.__wifi_serial.readline()
# print self.__wifi_serial.inWaiting()
# print cmd
# if(cmd.find("+IPD,") > 0):
# print "IPD gefunden"
# Todo prufen auf gewunschte lange
# self.send_response()
if( cmd != "" ):
print( cmd )
if "IPD" in cmd:
cmd = cmd.split(":", 1)[1]
print(cmd)
self.__last_cmd = cmd
print("Return value: " + cmd)
return cmd
def send_response(self):
# s = wifiCommand( "AT+CIPSTART=\"TCP\",\""+servIP+"\","+str(servPort), 10, sTerm="Linked" )
# // wifi_send_cmd("AT+CIPSTART=0,TCP,192.168.4.2,8888");
# self.send_cmd("AT+CIPSTART=0,TCP,192.168.4.2,8888")
response = "HTTP/1.1 200 OK\r\n"
response += "Content-Type: text/plain\r\n\r\n"
response += "Super!\r\n"
resp_cmd = "AT+CIPSEND=0,"
resp_cmd += str(len(response))
self.send_cmd(resp_cmd)
time.sleep(0.2)
self.send_cmd(response)
# / HTTP Header
# String response = "HTTP/1.1 200 OK\r\n";
# response += "Content-Type: text/plain\r\n\r\n";
# response += "Super!\r\n";
# // setup esp 4 resp
# // AT+CIPSEND= <id>,<length>
# String at_cmd = "AT+CIPSEND=0,";
# at_cmd += String(response.length());
# Debug_Serial.print(F("Lange HTTP Response: "));
# Debug_Serial.println(String(response.length()));
# wifi_send_cmd(at_cmd);
# delay(20);
# wifi_send_cmd(response);
|
"""
AxographRawIO
=============
RawIO class for reading AxoGraph files (.axgd, .axgx)
Original author: Jeffrey Gill
Documentation of the AxoGraph file format provided by the developer is
incomplete and in some cases incorrect. The primary sources of official
documentation are found in two out-of-date documents:
- AxoGraph X User Manual, provided with AxoGraph and also available online:
https://axograph.com/documentation/AxoGraph%20User%20Manual.pdf
- AxoGraph_ReadWrite.h, a header file that is part of a C++ program
provided with AxoGraph, and which is also available here:
https://github.com/CWRUChielLab/axographio/blob/master/
axographio/include/axograph_readwrite/AxoGraph_ReadWrite.h
These were helpful starting points for building this RawIO, especially for
reading the beginnings of AxoGraph files, but much of the rest of the file
format was deciphered by reverse engineering and guess work. Some portions of
the file format remain undeciphered.
The AxoGraph file format is versatile in that it can represent both time series
data collected during data acquisition and non-time series data generated
through manual or automated analysis, such as power spectrum analysis. This
implementation of an AxoGraph file format reader makes no effort to decoding
non-time series data. For simplicity, it makes several assumptions that should
be valid for any file generated directly from episodic or continuous data
acquisition without significant post-acquisition modification by the user.
Detailed logging is provided during header parsing for debugging purposes:
>>> import logging
>>> r = AxographRawIO(filename)
>>> r.logger.setLevel(logging.DEBUG)
>>> r.parse_header()
Background and Terminology
--------------------------
Acquisition modes:
AxoGraph can operate in two main data acquisition modes:
- Episodic "protocol-driven" acquisition mode, in which the program records
from specified signal channels for a fixed duration each time a trigger
is detected. Each trigger-activated recording is called an "episode".
From files acquired in this mode, AxographRawIO creates multiple Neo
Segments, one for each episode, unless force_single_segment=True.
- Continuous "chart recorder" acquisition mode, in which it creates a
continuous recording that can be paused and continued by the user
whenever they like. From files acquired in this mode, AxographRawIO
creates a single Neo Segment.
"Episode": analogous to a Neo Segment
See descriptions of acquisition modes above and of groups below.
"Column": analogous to a Quantity array
A column is a 1-dimensional array of data, stored in any one of a number of
data types (e.g., scaled ints or floats). In the oldest version of the
AxoGraph file format, even time was stored as a 1-dimensional array. In
newer versions, time is stored as a special type of "column" that is really
just a starting time and a sampling period.
Column data appears in series in the file, i.e., all of the first column's
data appears before the second column's. As an aside, because of this
design choice AxoGraph cannot write data to disk as it is collected but
must store it all in memory until data acquisition ends. This also affected
how file slicing was implmented for this RawIO: Instead of using a single
memmap to address into a 2-dimensional block of data, AxographRawIO
constructs multiple 1-dimensional memmaps, one for each column, each with
its own offset.
Each column's data array is preceded by a header containing the column
title, which normally contains the units (e.g., "Current (nA)"). Data
recorded in episodic acquisition mode will contain a repeating sequence of
column names, where each repetition corresponds to an episode (e.g.,
"Time", "Column A", "Column B", "Column A", "Column B", etc.).
AxoGraph offers a spreadsheet view for viewing all column data.
"Trace": analogous to a single-channel Neo AnalogSignal
A trace is a 2-dimensional series. Raw data is not stored in the part of
the file concerned with traces. Instead, in the header for each trace are
indexes pointing to two data columns, defined earlier in the file,
corresponding to the trace's x and y data. These indexes can be changed in
AxoGraph under the "Assign X and Y Columns" tool, though doing so may
violate assumptions made by AxographRawIO.
For time series data collected under the usual data acquisition modes that
has not been modified after collection by the user, the x-index always
points to the time column; one trace exists for each non-time column, with
the y-index pointing to that column.
Traces are analogous to AnalogSignals in Neo. However, for simplicity of
implementation, AxographRawIO does not actually check the pairing of
columns in the trace headers. Instead it assumes the default pairing
described above when it creates signal channels while scanning through
columns. Older versions of the AxoGraph file format lack trace headers
entirely, so this is the most general solution.
Trace headers contain additional information about the series, such as plot
style, which is parsed by AxographRawIO and made available in
self.info['trace_header_info_list'] but is otherwise unused.
"Group": analogous to a Neo ChannelIndex for matching channels across Segments
A group is a collection of one or more traces. Like traces, raw data is not
stored in the part of the file concerned with groups. Instead, each trace
header contains an index pointing to the group it is assigned to. Group
assignment of traces can be changed in AxoGraph under the "Group Traces"
tool, or by using the "Merge Traces" or "Separate Traces" commands, though
doing so may violate assumptions made by AxographRawIO.
Files created in episodic acquisition mode contain multiple traces per
group, one for each episode. In that mode, a group corresponds to a signal
channel and is analogous to a ChannelIndex in Neo; the traces within the
group represent the time series recorded for that channel across episodes
and are analogous to AnalogSignals from multiple Segments in Neo.
In contrast, files created in continuous acquisition mode contain one trace
per group, each corresponding to a signal channel. In that mode, groups and
traces are basically conceptually synonymous, though the former can still
be thought of as analogous to ChannelIndexes in Neo for a single-Segment.
Group headers are only consulted by AxographRawIO to determine if is safe
to interpret a file as episodic and therefore translatable to multiple
Segments in Neo. Certain criteria have to be met, such as all groups
containing equal numbers of traces and each group having homogeneous signal
parameters. If trace grouping was modified by the user after data
acquisition, this may result in the file being interpretted as
non-episodic. Older versions of the AxoGraph file format lack group headers
entirely, so these files are never deemed safe to interpret as episodic,
even if the column names follow a repeating sequence as described above.
"Tag" / "Event marker": analogous to a Neo Event
In continuous acquisition mode, the user can press a hot key to tag a
moment in time with a short label. Additionally, if the user stops or
restarts data acquisition in this mode, a tag is created automatically with
the label "Stop" or "Start", respectively. These are displayed by AxoGraph
as event markers. AxographRawIO will organize all event markers into a
single Neo Event channel with the name "AxoGraph Tags".
In episodic acquisition mode, the tag hot key behaves differently. The
current episode number is recorded in a user-editable notes section of the
file, made available by AxographRawIO in self.info['notes']. Because these
do not correspond to moments in time, they are not processed into Neo
Events.
"Interval bar": analogous to a Neo Epoch
After data acquisition, the user can annotate an AxoGraph file with
horizontal, labeled bars called interval bars that span a specified period
of time. These are not episode specific. AxographRawIO will organize all
interval bars into a single Neo Epoch channel with the name "AxoGraph
Intervals".
"""
from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
_event_channel_dtype)
import os
from datetime import datetime
from io import open, BufferedReader
from struct import unpack, calcsize
import numpy as np
class AxographRawIO(BaseRawIO):
"""
RawIO class for reading AxoGraph files (.axgd, .axgx)
Args:
filename (string):
File name of the AxoGraph file to read.
force_single_segment (bool):
Episodic files are normally read as multi-Segment Neo objects. This
parameter can force AxographRawIO to put all signals into a single
Segment. Default: False.
Example:
>>> import neo
>>> r = neo.rawio.AxographRawIO(filename=filename)
>>> r.parse_header()
>>> print(r)
>>> # get signals
>>> raw_chunk = r.get_analogsignal_chunk(
... block_index=0, seg_index=0,
... i_start=0, i_stop=1024,
... channel_names=channel_names)
>>> float_chunk = r.rescale_signal_raw_to_float(
... raw_chunk,
... dtype='float64',
... channel_names=channel_names)
>>> print(float_chunk)
>>> # get event markers
>>> ev_raw_times, _, ev_labels = r.get_event_timestamps(
... event_channel_index=0)
>>> ev_times = r.rescale_event_timestamp(
... ev_raw_times, dtype='float64')
>>> print([ev for ev in zip(ev_times, ev_labels)])
>>> # get interval bars
>>> ep_raw_times, ep_raw_durations, ep_labels = r.get_event_timestamps(
... event_channel_index=1)
>>> ep_times = r.rescale_event_timestamp(
... ep_raw_times, dtype='float64')
>>> ep_durations = r.rescale_epoch_duration(
... ep_raw_durations, dtype='float64')
>>> print([ep for ep in zip(ep_times, ep_durations, ep_labels)])
>>> # get notes
>>> print(r.info['notes'])
>>> # get other miscellaneous info
>>> print(r.info)
"""
name = 'AxographRawIO'
description = 'This IO reads .axgd/.axgx files created with AxoGraph'
extensions = ['axgd', 'axgx']
rawmode = 'one-file'
def __init__(self, filename, force_single_segment=False):
BaseRawIO.__init__(self)
self.filename = filename
self.force_single_segment = force_single_segment
def _parse_header(self):
self.header = {}
self._scan_axograph_file()
if not self.force_single_segment and self._safe_to_treat_as_episodic():
self.logger.debug('Will treat as episodic')
self._convert_to_multi_segment()
else:
self.logger.debug('Will not treat as episodic')
self.logger.debug('')
self._generate_minimal_annotations()
blk_annotations = self.raw_annotations['blocks'][0]
blk_annotations['format_ver'] = self.info['format_ver']
if self.info['format_ver'] >= 3:
blk_annotations['comment'] = self.info['comment']
blk_annotations['notes'] = self.info['notes']
blk_annotations['rec_datetime'] = self._get_rec_datetime()
# modified time is not ideal but less prone to
# cross-platform issues than created time (ctime)
blk_annotations['file_datetime'] = datetime.fromtimestamp(
os.path.getmtime(self.filename))
def _source_name(self):
return self.filename
def _segment_t_start(self, block_index, seg_index):
# same for all segments
return self._t_start
def _segment_t_stop(self, block_index, seg_index):
# same for all signals in all segments
t_stop = self._t_start + \
len(self._raw_signals[seg_index][0]) * self._sampling_period
return t_stop
###
# signal and channel zone
def _get_signal_size(self, block_index, seg_index, channel_indexes):
# same for all signals in all segments
return len(self._raw_signals[seg_index][0])
def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
# same for all signals in all segments
return self._t_start
def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop,
channel_indexes):
if channel_indexes is None or \
np.all(channel_indexes == slice(None, None, None)):
channel_indexes = range(self.signal_channels_count())
raw_signals = [self._raw_signals
[seg_index]
[channel_index]
[slice(i_start, i_stop)]
for channel_index in channel_indexes]
raw_signals = np.array(raw_signals).T # loads data into memory
return raw_signals
###
# spiketrain and unit zone
def _spike_count(self, block_index, seg_index, unit_index):
# not supported
return None
def _get_spike_timestamps(self, block_index, seg_index, unit_index,
t_start, t_stop):
# not supported
return None
def _rescale_spike_timestamp(self, spike_timestamps, dtype):
# not supported
return None
###
# spike waveforms zone
def _get_spike_raw_waveforms(self, block_index, seg_index, unit_index,
t_start, t_stop):
# not supported
return None
###
# event and epoch zone
def _event_count(self, block_index, seg_index, event_channel_index):
# Retrieve size of either event or epoch channel:
# event_channel_index: 0 AxoGraph Tags, 1 AxoGraph Intervals
# AxoGraph tags can only be inserted in continuous data acquisition
# mode. When the tag hot key is pressed in episodic acquisition mode,
# the notes are updated with the current episode number instead of an
# instantaneous event marker being created. This means that Neo-like
# Events cannot be generated by AxoGraph for multi-Segment (episodic)
# files. Furthermore, Neo-like Epochs (interval markers) are not
# episode specific. For these reasons, this function ignores seg_index.
return self._raw_event_epoch_timestamps[event_channel_index].size
def _get_event_timestamps(self, block_index, seg_index,
event_channel_index, t_start, t_stop):
# Retrieve either event or epoch data, unscaled:
# event_channel_index: 0 AxoGraph Tags, 1 AxoGraph Intervals
# AxoGraph tags can only be inserted in continuous data acquisition
# mode. When the tag hot key is pressed in episodic acquisition mode,
# the notes are updated with the current episode number instead of an
# instantaneous event marker being created. This means that Neo-like
# Events cannot be generated by AxoGraph for multi-Segment (episodic)
# files. Furthermore, Neo-like Epochs (interval markers) are not
# episode specific. For these reasons, this function ignores seg_index.
timestamps = self._raw_event_epoch_timestamps[event_channel_index]
durations = self._raw_event_epoch_durations[event_channel_index]
labels = self._event_epoch_labels[event_channel_index]
if durations is None:
# events
if t_start is not None:
# keep if event occurs after t_start ...
keep = timestamps >= int(t_start / self._sampling_period)
timestamps = timestamps[keep]
labels = labels[keep]
if t_stop is not None:
# ... and before t_stop
keep = timestamps <= int(t_stop / self._sampling_period)
timestamps = timestamps[keep]
labels = labels[keep]
else:
# epochs
if t_start is not None:
# keep if epoch ends after t_start ...
keep = timestamps + durations >= \
int(t_start / self._sampling_period)
timestamps = timestamps[keep]
durations = durations[keep]
labels = labels[keep]
if t_stop is not None:
# ... and starts before t_stop
keep = timestamps <= int(t_stop / self._sampling_period)
timestamps = timestamps[keep]
durations = durations[keep]
labels = labels[keep]
return timestamps, durations, labels
def _rescale_event_timestamp(self, event_timestamps, dtype):
# Scale either event or epoch start times from sample index to seconds
# (t_start shouldn't be added)
event_times = event_timestamps.astype(dtype) * self._sampling_period
return event_times
def _rescale_epoch_duration(self, raw_duration, dtype):
# Scale epoch durations from samples to seconds
epoch_durations = raw_duration.astype(dtype) * self._sampling_period
return epoch_durations
###
# multi-segment zone
def _safe_to_treat_as_episodic(self):
"""
The purpose of this fuction is to determine if the file contains any
irregularities in its grouping of traces such that it cannot be treated
as episodic. Even "continuous" recordings can be treated as
single-episode recordings and could be identified as safe by this
function. Recordings in which the user has changed groupings to create
irregularities should be caught by this function.
"""
# First check: Old AxoGraph file formats do not contain enough metadata
# to know for certain that the file is episodic.
if self.info['format_ver'] < 3:
self.logger.debug('Cannot treat as episodic because old format '
'contains insufficient metadata')
return False
# Second check: If the file is episodic, it should report that it
# contains more than 1 episode.
if self.info['n_episodes'] == 1:
self.logger.debug('Cannot treat as episodic because file reports '
'one episode')
return False
# Third check: If the file is episodic, groups of traces should all
# contain the same number of traces, one for each episode. This is
# generally true of "continuous" (single-episode) recordings as well,
# which normally have 1 trace per group.
group_id_to_col_indexes = {}
for group_id in self.info['group_header_info_list']:
col_indexes = []
for trace_header in self.info['trace_header_info_list'].values():
if trace_header['group_id_for_this_trace'] == group_id:
col_indexes.append(trace_header['y_index'])
group_id_to_col_indexes[group_id] = col_indexes
n_traces_by_group = {k: len(v) for k, v in
group_id_to_col_indexes.items()}
all_groups_have_same_number_of_traces = len(np.unique(list(
n_traces_by_group.values()))) == 1
if not all_groups_have_same_number_of_traces:
self.logger.debug('Cannot treat as episodic because groups differ '
'in number of traces')
return False
# Fourth check: The number of traces in each group should equal
# n_episodes.
n_traces_per_group = np.unique(list(n_traces_by_group.values()))
if n_traces_per_group != self.info['n_episodes']:
self.logger.debug('Cannot treat as episodic because n_episodes '
'does not match number of traces per group')
return False
# Fifth check: If the file is episodic, all traces within a group
# should have identical signal channel parameters (e.g., name, units)
# except for their unique ids. This too is generally true of
# "continuous" (single-episode) files, which normally have 1 trace per
# group.
signal_channels_with_ids_dropped = \
self.header['signal_channels'][
[n for n in self.header['signal_channels'].dtype.names
if n != 'id']]
group_has_uniform_signal_parameters = {}
for group_id, col_indexes in group_id_to_col_indexes.items():
# subtract 1 from indexes in next statement because time is not
# included in signal_channels
signal_params_for_group = np.array(
signal_channels_with_ids_dropped[np.array(col_indexes) - 1])
group_has_uniform_signal_parameters[group_id] = \
len(np.unique(signal_params_for_group)) == 1
all_groups_have_uniform_signal_parameters = \
np.all(list(group_has_uniform_signal_parameters.values()))
if not all_groups_have_uniform_signal_parameters:
self.logger.debug('Cannot treat as episodic because some groups '
'have heterogeneous signal parameters')
return False
# all checks passed
self.logger.debug('Can treat as episodic')
return True
def _convert_to_multi_segment(self):
"""
Reshape signal headers and signal data for an episodic file
"""
self.header['nb_segment'] = [self.info['n_episodes']]
# drop repeated signal headers
self.header['signal_channels'] = \
self.header['signal_channels'].reshape(
self.info['n_episodes'], -1)[0]
# reshape signal memmap list
new_sig_memmaps = []
n_channels = len(self.header['signal_channels'])
sig_memmaps = self._raw_signals[0]
for first_index in np.arange(0, len(sig_memmaps), n_channels):
new_sig_memmaps.append(
sig_memmaps[first_index:first_index + n_channels])
self._raw_signals = new_sig_memmaps
self.logger.debug('New number of segments: {}'.format(
self.info['n_episodes']))
return
def _get_rec_datetime(self):
"""
Determine the date and time at which the recording was started from
automatically generated notes. How these notes should be parsed differs
depending on whether the recording was obtained in episodic or
continuous acquisition mode.
"""
rec_datetime = None
date_string = ''
time_string = ''
datetime_string = ''
for note_line in self.info['notes'].split('\n'):
# episodic acquisition mode
if note_line.startswith('Created on '):
date_string = note_line.strip('Created on ')
if note_line.startswith('Start data acquisition at '):
time_string = note_line.strip('Start data acquisition at ')
# continuous acquisition mode
if note_line.startswith('Created : '):
datetime_string = note_line.strip('Created : ')
if date_string and time_string:
datetime_string = ' '.join([date_string, time_string])
if datetime_string:
try:
rec_datetime = datetime.strptime(datetime_string,
'%a %b %d %Y %H:%M:%S')
except ValueError:
pass
return rec_datetime
def _scan_axograph_file(self):
"""
This function traverses the entire AxoGraph file, constructing memmaps
for signals and collecting channel information and other metadata
"""
with open(self.filename, 'rb') as fid:
f = StructFile(fid)
self.logger.debug('filename: {}'.format(self.filename))
self.logger.debug('')
# the first 4 bytes are always a 4-character file type identifier
# - for early versions of AxoGraph, this identifier was 'AxGr'
# - starting with AxoGraph X, the identifier is 'axgx'
header_id = f.read(4).decode('utf-8')
assert header_id in ['AxGr', 'axgx'], \
'not an AxoGraph binary file! "{}"'.format(self.filename)
self.logger.debug('header_id: {}'.format(header_id))
# the next two numbers store the format version number and the
# number of data columns to follow
# - for 'AxGr' files, these numbers are 2-byte unsigned short ints
# - for 'axgx' files, these numbers are 4-byte long ints
# - the 4-character identifier changed from 'AxGr' to 'axgx' with
# format version 3
if header_id == 'AxGr':
format_ver, n_cols = f.read_f('HH')
assert format_ver == 1 or format_ver == 2, \
'mismatch between header identifier "{}" and format ' \
'version "{}"!'.format(header_id, format_ver)
elif header_id == 'axgx':
format_ver, n_cols = f.read_f('ll')
assert format_ver >= 3, \
'mismatch between header identifier "{}" and format ' \
'version "{}"!'.format(header_id, format_ver)
else:
raise NotImplementedError(
'unimplemented file header identifier "{}"!'.format(
header_id))
self.logger.debug('format_ver: {}'.format(format_ver))
self.logger.debug('n_cols: {}'.format(n_cols))
self.logger.debug('')
##############################################
# BEGIN COLUMNS
sig_memmaps = []
sig_channels = []
for i in range(n_cols):
self.logger.debug('== COLUMN INDEX {} =='.format(i))
##############################################
# NUMBER OF DATA POINTS IN COLUMN
n_points = f.read_f('l')
self.logger.debug('n_points: {}'.format(n_points))
##############################################
# COLUMN TYPE
# depending on the format version, data columns may have a type
# - prior to verion 3, column types did not exist and data was
# stored in a fixed pattern
# - beginning with version 3, several data types are available
# as documented in AxoGraph_ReadWrite.h
if format_ver == 1 or format_ver == 2:
col_type = None
elif format_ver >= 3:
col_type = f.read_f('l')
else:
raise NotImplementedError(
'unimplemented file format version "{}"!'.format(
format_ver))
self.logger.debug('col_type: {}'.format(col_type))
##############################################
# COLUMN NAME AND UNITS
# depending on the format version, column titles are stored
# differently
# - prior to version 3, column titles were stored as
# fixed-length 80-byte Pascal strings
# - beginning with version 3, column titles are stored as
# variable-length strings (see StructFile.read_string for
# details)
if format_ver == 1 or format_ver == 2:
title = f.read_f('80p').decode('utf-8')
elif format_ver >= 3:
title = f.read_f('S')
else:
raise NotImplementedError(
'unimplemented file format version "{}"!'.format(
format_ver))
self.logger.debug('title: {}'.format(title))
# units are given in parentheses at the end of a column title,
# unless units are absent
if len(title.split()) > 0 and title.split()[-1][0] == '(' and \
title.split()[-1][-1] == ')':
name = ' '.join(title.split()[:-1])
units = title.split()[-1].strip('()')
else:
name = title
units = ''
self.logger.debug('name: {}'.format(name))
self.logger.debug('units: {}'.format(units))
##############################################
# COLUMN DTYPE, SCALE, OFFSET
if format_ver == 1:
# for format version 1, all columns are arrays of floats
dtype = 'f'
gain, offset = 1, 0 # data is neither scaled nor off-set
if i == 0:
# there is no guarantee that this time column is
# regularly sampled, and in fact the test file for
# version 1 has slight variations in the intervals
# between samples (due to numerical imprecision,
# probably), so technically an IrregularlySampledSignal
# is needed here, but I'm going to cheat by assuming
# regularity
# create a memory map that allows accessing parts of
# the file without loading it all into memory
array = np.memmap(
self.filename,
mode='r',
dtype=f.byte_order + dtype,
offset=f.tell(),
shape=n_points)
# advance the file position to after the data array
f.seek(array.nbytes, 1)
first_value, increment = \
array[0], \
np.median(np.diff(array)) # here's the cheat
self.logger.debug(
'interval: {}, freq: {}'.format(
increment, 1 / increment))
self.logger.debug(
'start: {}, end: {}'.format(
first_value,
first_value + increment * (n_points - 1)))
# assume this is the time column
t_start, sampling_period = first_value, increment
self.logger.debug('')
continue # skip saving memmap, chan info for time col
elif format_ver == 2:
# for format version 2, the first column is a "series" of
# regularly spaced values specified merely by a first value
# and an increment, and all subsequent columns are arrays
# of shorts with a scaling factor
if i == 0:
# series
first_value, increment = f.read_f('ff')
self.logger.debug(
'interval: {}, freq: {}'.format(
increment, 1 / increment))
self.logger.debug(
'start: {}, end: {}'.format(
first_value,
first_value + increment * (n_points - 1)))
# assume this is the time column
t_start, sampling_period = first_value, increment
self.logger.debug('')
continue # skip memmap, chan info for time col
else:
# scaled short
dtype = 'h'
gain, offset = \
f.read_f('f'), 0 # data is scaled without offset
elif format_ver >= 3:
# for format versions 3 and later, the column type
# determines how the data should be read
# - column types 1, 2, 3, and 8 are not defined in
# AxoGraph_ReadWrite.h
# - column type 9 is different from the others in that it
# represents regularly spaced values
# (such as times at a fixed frequency) specified by a
# first value and an increment, without storing a large
# data array
if col_type == 9:
# series
first_value, increment = f.read_f('dd')
self.logger.debug(
'interval: {}, freq: {}'.format(
increment, 1 / increment))
self.logger.debug(
'start: {}, end: {}'.format(
first_value,
first_value + increment * (n_points - 1)))
if i == 0:
# assume this is the time column
t_start, sampling_period = first_value, increment
self.logger.debug('')
continue # skip memmap, chan info for time col
else:
raise NotImplementedError(
'series data are supported only for the first '
'data column (time)!')
elif col_type == 4:
# short
dtype = 'h'
gain, offset = 1, 0 # data neither scaled nor off-set
elif col_type == 5:
# long
dtype = 'l'
gain, offset = 1, 0 # data neither scaled nor off-set
elif col_type == 6:
# float
dtype = 'f'
gain, offset = 1, 0 # data neither scaled nor off-set
elif col_type == 7:
# double
dtype = 'd'
gain, offset = 1, 0 # data neither scaled nor off-set
elif col_type == 10:
# scaled short
dtype = 'h'
gain, offset = f.read_f('dd') # data scaled w/ offset
else:
raise NotImplementedError(
'unimplemented column type "{}"!'.format(col_type))
else:
raise NotImplementedError(
'unimplemented file format version "{}"!'.format(
format_ver))
##############################################
# COLUMN MEMMAP AND CHANNEL INFO
# create a memory map that allows accessing parts of the file
# without loading it all into memory
array = np.memmap(
self.filename,
mode='r',
dtype=f.byte_order + dtype,
offset=f.tell(),
shape=n_points)
# advance the file position to after the data array
f.seek(array.nbytes, 1)
self.logger.debug('gain: {}, offset: {}'.format(gain, offset))
self.logger.debug('initial data: {}'.format(
array[:5] * gain + offset))
# channel_info will be cast to _signal_channel_dtype
channel_info = (
name, i, 1 / sampling_period, f.byte_order + dtype,
units, gain, offset, 0)
self.logger.debug('channel_info: {}'.format(channel_info))
self.logger.debug('')
sig_memmaps.append(array)
sig_channels.append(channel_info)
# END COLUMNS
##############################################
if format_ver == 1 or format_ver == 2:
# for format versions 1 and 2, metadata like graph display
# information was stored separately in the "resource fork" of
# the file, so there is nothing more to do here, and the rest
# of the file is empty
rest_of_the_file = f.read()
assert rest_of_the_file == b''
raw_event_timestamps = []
raw_epoch_timestamps = []
raw_epoch_durations = []
event_labels = []
epoch_labels = []
elif format_ver >= 3:
# for format versions 3 and later, there is a lot more!
##############################################
# COMMENT
self.logger.debug('== COMMENT ==')
comment = f.read_f('S')
self.logger.debug(comment if comment else 'no comment!')
self.logger.debug('')
##############################################
# NOTES
self.logger.debug('== NOTES ==')
notes = f.read_f('S')
self.logger.debug(notes if notes else 'no notes!')
self.logger.debug('')
##############################################
# TRACES
self.logger.debug('== TRACES ==')
n_traces = f.read_f('l')
self.logger.debug('n_traces: {}'.format(n_traces))
self.logger.debug('')
trace_header_info_list = {}
group_ids = []
for i in range(n_traces):
# AxoGraph traces are 1-indexed in GUI, so use i+1 below
self.logger.debug('== TRACE #{} =='.format(i + 1))
trace_header_info = {}
if format_ver < 6:
# before format version 6, there was only one version
# of the header, and version numbers were not provided
trace_header_info['trace_header_version'] = 1
else:
# for format versions 6 and later, the header version
# must be read
trace_header_info['trace_header_version'] = \
f.read_f('l')
if trace_header_info['trace_header_version'] == 1:
TraceHeaderDescription = TraceHeaderDescriptionV1
elif trace_header_info['trace_header_version'] == 2:
TraceHeaderDescription = TraceHeaderDescriptionV2
else:
raise NotImplementedError(
'unimplemented trace header version "{}"!'.format(
trace_header_info['trace_header_version']))
for key, fmt in TraceHeaderDescription:
trace_header_info[key] = f.read_f(fmt)
# AxoGraph traces are 1-indexed in GUI, so use i+1 below
trace_header_info_list[i + 1] = trace_header_info
group_ids.append(
trace_header_info['group_id_for_this_trace'])
self.logger.debug(trace_header_info)
self.logger.debug('')
##############################################
# GROUPS
self.logger.debug('== GROUPS ==')
n_groups = f.read_f('l')
group_ids = \
np.sort(list(set(group_ids))) # remove duplicates and sort
assert n_groups == len(group_ids), \
'expected group_ids to have length {}: {}'.format(
n_groups, group_ids)
self.logger.debug('n_groups: {}'.format(n_groups))
self.logger.debug('group_ids: {}'.format(group_ids))
self.logger.debug('')
group_header_info_list = {}
for i in group_ids:
# AxoGraph groups are 0-indexed in GUI, so use i below
self.logger.debug('== GROUP #{} =='.format(i))
group_header_info = {}
if format_ver < 6:
# before format version 6, there was only one version
# of the header, and version numbers were not provided
group_header_info['group_header_version'] = 1
else:
# for format versions 6 and later, the header version
# must be read
group_header_info['group_header_version'] = \
f.read_f('l')
if group_header_info['group_header_version'] == 1:
GroupHeaderDescription = GroupHeaderDescriptionV1
else:
raise NotImplementedError(
'unimplemented group header version "{}"!'.format(
group_header_info['group_header_version']))
for key, fmt in GroupHeaderDescription:
group_header_info[key] = f.read_f(fmt)
# AxoGraph groups are 0-indexed in GUI, so use i below
group_header_info_list[i] = group_header_info
self.logger.debug(group_header_info)
self.logger.debug('')
##############################################
# UNKNOWN
self.logger.debug('>> UNKNOWN 1 <<')
# 36 bytes of undeciphered data (types here are guesses)
unknowns = f.read_f('9l')
self.logger.debug(unknowns)
self.logger.debug('')
##############################################
# EPISODES
self.logger.debug('== EPISODES ==')
# a subset of episodes can be selected for "review", or
# episodes can be paged through one by one, and the indexes of
# those currently in review appear in this list
episodes_in_review = []
n_episodes = f.read_f('l')
for i in range(n_episodes):
episode_bool = f.read_f('Z')
if episode_bool:
episodes_in_review.append(i + 1)
self.logger.debug('n_episodes: {}'.format(n_episodes))
self.logger.debug('episodes_in_review: {}'.format(
episodes_in_review))
if format_ver == 5:
# the test file for version 5 contains this extra list of
# episode indexes with unknown purpose
old_unknown_episode_list = []
n_episodes2 = f.read_f('l')
for i in range(n_episodes2):
episode_bool = f.read_f('Z')
if episode_bool:
old_unknown_episode_list.append(i + 1)
self.logger.debug('old_unknown_episode_list: {}'.format(
old_unknown_episode_list))
if n_episodes2 != n_episodes:
self.logger.debug(
'n_episodes2 ({}) and n_episodes ({}) '
'differ!'.format(n_episodes2, n_episodes))
# another list of episode indexes with unknown purpose
unknown_episode_list = []
n_episodes3 = f.read_f('l')
for i in range(n_episodes3):
episode_bool = f.read_f('Z')
if episode_bool:
unknown_episode_list.append(i + 1)
self.logger.debug('unknown_episode_list: {}'.format(
unknown_episode_list))
if n_episodes3 != n_episodes:
self.logger.debug(
'n_episodes3 ({}) and n_episodes ({}) '
'differ!'.format(n_episodes3, n_episodes))
# episodes can be masked to be removed from the pool of
# reviewable episodes completely until unmasked, and the
# indexes of those currently masked appear in this list
masked_episodes = []
n_episodes4 = f.read_f('l')
for i in range(n_episodes4):
episode_bool = f.read_f('Z')
if episode_bool:
masked_episodes.append(i + 1)
self.logger.debug('masked_episodes: {}'.format(
masked_episodes))
if n_episodes4 != n_episodes:
self.logger.debug(
'n_episodes4 ({}) and n_episodes ({}) '
'differ!'.format(n_episodes4, n_episodes))
self.logger.debug('')
##############################################
# UNKNOWN
self.logger.debug('>> UNKNOWN 2 <<')
# 68 bytes of undeciphered data (types here are guesses)
unknowns = f.read_f('d 9l d 4l')
self.logger.debug(unknowns)
self.logger.debug('')
##############################################
# FONTS
if format_ver >= 6:
font_categories = ['axis titles', 'axis labels (ticks)',
'notes', 'graph title']
else:
# would need an old version of AxoGraph to determine how it
# used these settings
font_categories = ['everything (?)']
font_settings_info_list = {}
for i in font_categories:
self.logger.debug('== FONT SETTINGS FOR {} =='.format(i))
font_settings_info = {}
for key, fmt in FontSettingsDescription:
font_settings_info[key] = f.read_f(fmt)
# I don't know why two arbitrary values were selected to
# represent this switch, but it seems they were
# - setting1 could contain other undeciphered data as a
# bitmask, like setting2
assert font_settings_info['setting1'] in \
[FONT_BOLD, FONT_NOT_BOLD], \
'expected setting1 ({}) to have value FONT_BOLD ' \
'({}) or FONT_NOT_BOLD ({})'.format(
font_settings_info['setting1'],
FONT_BOLD,
FONT_NOT_BOLD)
# size is stored 10 times bigger than real value
font_settings_info['size'] = \
font_settings_info['size'] / 10.0
font_settings_info['bold'] = \
bool(font_settings_info['setting1'] == FONT_BOLD)
font_settings_info['italics'] = \
bool(font_settings_info['setting2'] & FONT_ITALICS)
font_settings_info['underline'] = \
bool(font_settings_info['setting2'] & FONT_UNDERLINE)
font_settings_info['strikeout'] = \
bool(font_settings_info['setting2'] & FONT_STRIKEOUT)
font_settings_info_list[i] = font_settings_info
self.logger.debug(font_settings_info)
self.logger.debug('')
##############################################
# X-AXIS SETTINGS
self.logger.debug('== X-AXIS SETTINGS ==')
x_axis_settings_info = {}
for key, fmt in XAxisSettingsDescription:
x_axis_settings_info[key] = f.read_f(fmt)
self.logger.debug(x_axis_settings_info)
self.logger.debug('')
##############################################
# UNKNOWN
self.logger.debug('>> UNKNOWN 3 <<')
# 108 bytes of undeciphered data (types here are guesses)
unknowns = f.read_f('8l 3d 13l')
self.logger.debug(unknowns)
self.logger.debug('')
##############################################
# EVENTS / TAGS
self.logger.debug('=== EVENTS / TAGS ===')
n_events, n_events_again = f.read_f('ll')
self.logger.debug('n_events: {}'.format(n_events))
# event / tag timing is stored as an index into time
raw_event_timestamps = []
event_labels = []
for i in range(n_events_again):
event_index = f.read_f('l')
raw_event_timestamps.append(event_index)
n_events_yet_again = f.read_f('l')
for i in range(n_events_yet_again):
title = f.read_f('S')
event_labels.append(title)
event_list = []
for event_label, event_index in \
zip(event_labels, raw_event_timestamps):
# t_start shouldn't be added here
event_time = event_index * sampling_period
event_list.append({
'title': event_label,
'index': event_index,
'time': event_time})
for event in event_list:
self.logger.debug(event)
self.logger.debug('')
##############################################
# UNKNOWN
self.logger.debug('>> UNKNOWN 4 <<')
# 28 bytes of undeciphered data (types here are guesses)
unknowns = f.read_f('7l')
self.logger.debug(unknowns)
self.logger.debug('')
##############################################
# EPOCHS / INTERVAL BARS
self.logger.debug('=== EPOCHS / INTERVAL BARS ===')
n_epochs = f.read_f('l')
self.logger.debug('n_epochs: {}'.format(n_epochs))
epoch_list = []
for i in range(n_epochs):
epoch_info = {}
for key, fmt in EpochInfoDescription:
epoch_info[key] = f.read_f(fmt)
epoch_list.append(epoch_info)
# epoch / interval bar timing and duration are stored in
# seconds, so here they are converted to (possibly non-integer)
# indexes into time to fit into the procrustean beds of
# _rescale_event_timestamp and _rescale_epoch_duration
raw_epoch_timestamps = []
raw_epoch_durations = []
epoch_labels = []
for epoch in epoch_list:
raw_epoch_timestamps.append(
epoch['t_start'] / sampling_period)
raw_epoch_durations.append(
(epoch['t_stop'] - epoch['t_start']) / sampling_period)
epoch_labels.append(epoch['title'])
self.logger.debug(epoch)
self.logger.debug('')
##############################################
# UNKNOWN
self.logger.debug(
'>> UNKNOWN 5 (includes y-axis plot ranges) <<')
# lots of undeciphered data
rest_of_the_file = f.read()
self.logger.debug(rest_of_the_file)
self.logger.debug('')
##############################################
# RAWIO HEADER
# event_channels will be cast to _event_channel_dtype
event_channels = []
event_channels.append(('AxoGraph Tags', '', 'event'))
event_channels.append(('AxoGraph Intervals', '', 'epoch'))
# organize header
self.header['nb_block'] = 1
self.header['nb_segment'] = [1]
self.header['signal_channels'] = \
np.array(sig_channels, dtype=_signal_channel_dtype)
self.header['event_channels'] = \
np.array(event_channels, dtype=_event_channel_dtype)
self.header['unit_channels'] = \
np.array([], dtype=_unit_channel_dtype)
##############################################
# DATA OBJECTS
# organize data
self._sampling_period = sampling_period
self._t_start = t_start
self._raw_signals = [sig_memmaps] # first index is seg_index
self._raw_event_epoch_timestamps = [
np.array(raw_event_timestamps),
np.array(raw_epoch_timestamps)]
self._raw_event_epoch_durations = [
None,
np.array(raw_epoch_durations)]
self._event_epoch_labels = [
np.array(event_labels, dtype='U'),
np.array(epoch_labels, dtype='U')]
##############################################
# EXTRA INFORMATION
# keep other details
self.info = {}
self.info['header_id'] = header_id
self.info['format_ver'] = format_ver
self.info['t_start'] = t_start
self.info['sampling_period'] = sampling_period
if format_ver >= 3:
self.info['n_cols'] = n_cols
self.info['n_traces'] = n_traces
self.info['n_groups'] = n_groups
self.info['n_episodes'] = n_episodes
self.info['n_events'] = n_events
self.info['n_epochs'] = n_epochs
self.info['comment'] = comment
self.info['notes'] = notes
self.info['trace_header_info_list'] = trace_header_info_list
self.info['group_header_info_list'] = group_header_info_list
self.info['event_list'] = event_list
self.info['epoch_list'] = epoch_list
self.info['episodes_in_review'] = episodes_in_review
self.info['masked_episodes'] = masked_episodes
self.info['font_settings_info_list'] = font_settings_info_list
self.info['x_axis_settings_info'] = x_axis_settings_info
class StructFile(BufferedReader):
"""
A container for the file buffer with some added convenience functions for
reading AxoGraph files
"""
def __init__(self, *args, **kwargs):
# As far as I've seen, every AxoGraph file uses big-endian encoding,
# regardless of the system architecture on which it was created, but
# here I provide means for controlling byte ordering in case a counter
# example is found.
self.byte_order = kwargs.pop('byte_order', '>')
if self.byte_order == '>':
# big-endian
self.utf_16_decoder = 'utf-16-be'
elif self.byte_order == '<':
# little-endian
self.utf_16_decoder = 'utf-16-le'
else:
# unspecified
self.utf_16_decoder = 'utf-16'
super().__init__(*args, **kwargs)
def read_and_unpack(self, fmt):
"""
Calculate the number of bytes corresponding to the format string, read
in that number of bytes, and unpack them according to the format string
"""
return unpack(
self.byte_order + fmt,
self.read(calcsize(self.byte_order + fmt)))
def read_string(self):
"""
The most common string format in AxoGraph files is a variable length
string with UTF-16 encoding, preceded by a 4-byte integer (long)
specifying the length of the string in bytes. Unlike a Pascal string
('p' format), these strings are not stored in a fixed number of bytes
with padding at the end. This function reads in one of these variable
length strings
"""
# length may be -1, 0, or a positive integer
length = self.read_and_unpack('l')[0]
if length > 0:
return self.read(length).decode(self.utf_16_decoder)
else:
return ''
def read_bool(self):
"""
AxoGraph files encode each boolean as 4-byte integer (long) with value
1 = True, 0 = False. This function reads in one of these booleans.
"""
return bool(self.read_and_unpack('l')[0])
def read_f(self, fmt, offset=None):
"""
This function is a wrapper for read_and_unpack that adds compatibility
with two new format strings:
'S': a variable length UTF-16 string, readable with read_string
'Z': a boolean encoded as a 4-byte integer, readable with read_bool
This method does not implement support for numbers before the new
format strings, such as '3Z' to represent 3 bools (use 'ZZZ' instead).
"""
if offset is not None:
self.seek(offset)
# place commas before and after each instance of S or Z
for special in ['S', 'Z']:
fmt = fmt.replace(special, ',' + special + ',')
# split S and Z into isolated strings
fmt = fmt.split(',')
# construct a tuple of unpacked data
data = ()
for subfmt in fmt:
if subfmt == 'S':
data += (self.read_string(),)
elif subfmt == 'Z':
data += (self.read_bool(),)
else:
data += self.read_and_unpack(subfmt)
if len(data) == 1:
return data[0]
else:
return data
FONT_BOLD = 75 # mysterious arbitrary constant
FONT_NOT_BOLD = 50 # mysterious arbitrary constant
FONT_ITALICS = 1
FONT_UNDERLINE = 2
FONT_STRIKEOUT = 4
TraceHeaderDescriptionV1 = [
# documented in AxoGraph_ReadWrite.h
('x_index', 'l'),
('y_index', 'l'),
('err_bar_index', 'l'),
('group_id_for_this_trace', 'l'),
('hidden', 'Z'), # AxoGraph_ReadWrite.h incorrectly states "shown" instead
('min_x', 'd'),
('max_x', 'd'),
('min_positive_x', 'd'),
('x_is_regularly_spaced', 'Z'),
('x_increases_monotonically', 'Z'),
('x_interval_if_regularly_spaced', 'd'),
('min_y', 'd'),
('max_y', 'd'),
('min_positive_y', 'd'),
('trace_color', 'xBBB'),
('display_joined_line_plot', 'Z'),
('line_thickness', 'd'),
('pen_style', 'l'),
('display_symbol_plot', 'Z'),
('symbol_type', 'l'),
('symbol_size', 'l'),
('draw_every_data_point', 'Z'),
('skip_points_by_distance_instead_of_pixels', 'Z'),
('pixels_between_symbols', 'l'),
('display_histogram_plot', 'Z'),
('histogram_type', 'l'),
('histogram_bar_separation', 'l'),
('display_error_bars', 'Z'),
('display_pos_err_bar', 'Z'),
('display_neg_err_bar', 'Z'),
('err_bar_width', 'l'),
]
# documented in AxoGraph_ReadWrite.h
# - only one difference exists between versions 1 and 2
TraceHeaderDescriptionV2 = list(TraceHeaderDescriptionV1) # make a copy
TraceHeaderDescriptionV2.insert(3, ('neg_err_bar_index', 'l'))
GroupHeaderDescriptionV1 = [
# undocumented and reverse engineered
('title', 'S'),
('unknown1', 'h'), # 2 bytes of undeciphered data (types are guesses)
('units', 'S'),
('unknown2', 'hll'), # 10 bytes of undeciphered data (types are guesses)
]
FontSettingsDescription = [
# undocumented and reverse engineered
('font', 'S'),
('size', 'h'), # divide this 2-byte integer by 10 to get font size
('unknown1', '5b'), # 5 bytes of undeciphered data (types are guesses)
('setting1', 'B'), # includes bold setting
('setting2', 'B'), # italics, underline, strikeout specified in bitmap
]
XAxisSettingsDescription = [
# undocumented and reverse engineered
('unknown1', '3l2d'), # 28 bytes of undeciphered data (types are guesses)
('plotted_x_range', 'dd'),
('unknown2', 'd'), # 8 bytes of undeciphered data (types are guesses)
('auto_x_ticks', 'Z'),
('x_minor_ticks', 'd'),
('x_major_ticks', 'd'),
('x_axis_title', 'S'),
('unknown3', 'h'), # 2 bytes of undeciphered data (types are guesses)
('units', 'S'),
('unknown4', 'h'), # 2 bytes of undeciphered data (types are guesses)
]
EpochInfoDescription = [
# undocumented and reverse engineered
('title', 'S'),
('t_start', 'd'),
('t_stop', 'd'),
('y_pos', 'd'),
]
|
import os
import sys
import imageio
import numpy as np
import utils
class VideoRecorder(object):
def __init__(self, view, root_dir, height=256, width=256, fps=60):
self.view = view
self.save_dir = utils.make_dir(root_dir, 'video') if root_dir else None
self.height = height
self.width = width
self.fps = fps
if str(self.view) == 'both':
self.frames1 = []
self.frames3 = []
else:
self.frames = []
def init(self, enabled=True):
if str(self.view) == 'both':
self.frames1 = []
self.frames3 = []
else:
self.frames = []
self.enabled = self.save_dir is not None and enabled
def record(self, env):
if self.enabled:
if str(self.view) == 'both':
frame1 = env.env.render_overwrite(offscreen=True, overwrite_view='view_1', resolution=(84, 84))
frame1 = np.transpose(frame1, (1, 2, 0)) # e.g. (3, 84, 84) -> (84, 84, 3) bc the latter is needed to save gif
self.frames1.append(frame1)
frame3 = env.env.render_overwrite(offscreen=True, overwrite_view='view_3', resolution=(84, 84))
frame3 = np.transpose(frame3, (1, 2, 0)) # e.g. (3, 84, 84) -> (84, 84, 3) bc the latter is needed to save gif
self.frames3.append(frame3)
else:
frame = env.env.render(offscreen=True, camera_name="configured_view", resolution=(84, 84))
frame = np.transpose(frame, (1, 2, 0)) # e.g. (3, 84, 84) -> (84, 84, 3) bc the latter is needed to save gif
self.frames.append(frame)
def save(self, file_name):
if self.enabled:
if str(self.view) == 'both':
path = os.path.join(self.save_dir, file_name + '-view_1.gif')
imageio.mimsave(path, self.frames1, fps=self.fps)
path = os.path.join(self.save_dir, file_name + '-view_3.gif')
imageio.mimsave(path, self.frames3, fps=self.fps)
else:
path = os.path.join(self.save_dir, file_name + '.gif')
imageio.mimsave(path, self.frames, fps=self.fps)
|
import ifcb
from ifcb.io.client import Client
from ifcb.io.path import Filesystem
from ifcb.io.stitching import stitch, find_pairs, StitchedBin
import os
import sys
def test_bin(pid):
#client = Client()
client = Filesystem(['../exampleData'])
catch = False
dir = os.path.join('stitch',ifcb.lid(pid))
try:
os.mkdir(dir)
except:
pass
unstitched = client.resolve(pid)
stitched = StitchedBin(unstitched)
print stitched
pairs = list(find_pairs(unstitched))
for target,ignore in pairs:
t = stitched.target(target)
print 'Got %s' % t
basename = ifcb.lid(t.pid)
t.image().save(os.path.join(dir,basename+'.png'),'png')
t.mask().save(os.path.join(dir,basename+'_mask.png'),'png')
t.stitch_raw().save(os.path.join(dir,basename+'_raw.png'),'png')
def test_rotate(pid):
for client,fn in zip([Client(), Filesystem(['../exampleData'])],['target_web','target_file']):
target = client.resolve(pid)
print target.info
fn = os.path.join('/Users/jfutrelle/Pictures',fn+'.png')
target.image().save(fn,'png')
print 'saved %s' % fn
if __name__=='__main__':
#test_bin('http://ifcb-data.whoi.edu/IFCB5_2010_273_135001')
#test_bin('http://ifcb-data.whoi.edu/IFCB1_2011_294_114650')
#test_bin('http://ifcb-data.whoi.edu/IFCB1_2011_282_235113')
#test_bin('http://ifcb-data.whoi.edu/IFCB5_2010_264_121939')
#test_bin('http://ifcb-data.whoi.edu/IFCB1_2011_287_152253')
#test_bin('http://ifcb-data.whoi.edu/IFCB1_2011_295_022253')
#test_bin('http://ifcb-data.whoi.edu/IFCB5_2010_273_121647')
#test_bin('http://ifcb-data.whoi.edu/IFCB5_2010_242_133222')
#test_bin('http://ifcb-data.whoi.edu/IFCB1_2011_297_142938')
#test_bin('http://ifcb-data.whoi.edu/IFCB5_2011_305_135951')
test_bin('http://ifcb-data.whoi.edu/IFCB5_2010_264_102403')
#test_bin('http://ifcb-data.whoi.edu/IFCB1_2011_231_182610')
#test_bin('http://ifcb-data.whoi.edu/IFCB1_2009_216_075913')
|
from plumbum import local
import benchbuild as bb
from benchbuild.environments.domain.declarative import ContainerImage
from benchbuild.source import HTTP
from benchbuild.utils.cmd import cat, make, mkdir, mv, unzip
class Crafty(bb.Project):
""" crafty benchmark """
NAME = 'crafty'
DOMAIN = 'scientific'
GROUP = 'benchbuild'
SOURCE = [
HTTP(
remote={
'25.2': (
'http://www.craftychess.com/downloads/source/'
'crafty-25.2.zip'
)
},
local='crafty.zip'
),
HTTP(
remote={
'1.0': 'http://www.craftychess.com/downloads/book/book.bin'
},
local='book.bin'
),
HTTP(
remote={
'2016-11-crafty.tar.gz':
'http://lairosiel.de/dist/2016-11-crafty.tar.gz'
},
local='inputs.tar.gz'
)
]
CONTAINER = ContainerImage().from_('benchbuild:alpine')
def compile(self):
crafty_source = local.path(self.source_of('crafty.zip'))
book_source = local.path(self.source_of('inputs.tar.gz'))
unpack_dir = "crafty.src"
mkdir(unpack_dir)
with local.cwd(unpack_dir):
unzip(crafty_source)
mv(book_source, unpack_dir)
clang = bb.compiler.cc(self)
with local.cwd(unpack_dir):
target_opts = ["-DCPUS=1", "-DSYZYGY", "-DTEST"]
_make = bb.watch(make)
_make(
"target=UNIX", "CC=" + str(clang),
"opt=" + " ".join(target_opts), "crafty-make"
)
def run_tests(self):
unpack_dir = local.path('crafty.src')
test_source = local.path(self.source_of('inputs.tar.gz'))
with local.cwd(unpack_dir):
crafty = bb.wrap("./crafty", self)
_test1 = bb.watch((cat[test_source / "test1.sh"] | crafty))
_test2 = bb.watch((cat[test_source / "test2.sh"] | crafty))
_test1(retcode=[0, 120])
_test2(retcode=[0, 120])
|
from Tkinter import *
import tkMessageBox
def doNothing():
print "OK OK I won't"
root = Tk()
canvas = Canvas(root, width = 200, height = 100)
canvas.pack()
blackLine = canvas.create_line(0,0,200,50)
redLine = canvas.create_line(0,100,200,50, fill = 'red')
greenBox = canvas.create_rectangle(25, 25, 130, 60, fill = 'green')
canvas.delete(redLine) #if you want to delete a object
canvas.delete(ALL) #if you want to delete all objects
root.mainloop()
|
from flask_restx import Namespace, Resource
from flask import request
from .view import TradeView
trade = Namespace('trade', path='/trades')
view = TradeView()
@trade.route('')
class TradeList(Resource):
@trade.doc('get trade list')
def get(self):
'''List all trades'''
return view.get_trades()
@trade.doc('create a trade')
def post(self):
return view.create_trade(request.json)
@trade.route('/instructions')
class TradeInstructions(Resource):
@trade.doc('get instructions')
def post(self):
return view.get_instructions(request.json)
@trade.route('/<int:trade_id>')
class Trade(Resource):
@trade.doc('get trade')
def get(self, trade_id: int):
return view.get_trade(trade_id)
@trade.doc('delete a trade')
def delete(self, trade_id: int):
return view.delete_trade(trade_id)
@trade.doc('update a trade')
def put(self, trade_id: int):
return view.update_trade(trade_id, request.json)
@trade.route('/<int:trade_id>/portfolios')
class TradePortfolios(Resource):
@trade.doc('add portfolios')
def post(self, trade_id: int):
return view.update_portfolios(trade_id, request.json)
@trade.route('/<int:trade_id>/positions')
class TradePositions(Resource):
@trade.doc('get positions')
def get(self, trade_id: int):
args = request.args
return view.get_positions(trade_id, args)
@trade.doc('update positions')
def put(self, trade_id: int):
return view.update_positions(trade_id, request.json)
@trade.route('/<int:trade_id>/prices')
class TradePrices(Resource):
@trade.doc('get prices')
def get(self, trade_id: int):
return view.get_prices(trade_id)
@trade.doc('update iex prices')
def post(self, trade_id: int):
return view.update_prices(trade_id, request.json)
@trade.route('/<int:trade_id>/requests')
class TradeRequests(Resource):
@trade.doc('get requests')
def get(self, trade_id: int):
return view.get_requests(trade_id)
|
# ref: https://github.com/xianchen2/Text_Retrieval_BM25/blob/master/document_retrieval.ipynb
import pickle
import re
from nltk.stem import PorterStemmer
import os
from .documentParser import DocumentParser
class LocalBM25:
b = 0.75
k = 1.2
def __init__(self):
# some prepartion
self.refDocuments = []
script_dir = os.path.dirname(__file__) #<-- absolute dir the script is in
rel_path = "db/db_singledocs.list"
abs_file_path = os.path.join(script_dir, rel_path)
with open(abs_file_path, 'rb') as document_file:
self.refDocuments = pickle.load(document_file)
self.refDocumentsParsed = DocumentParser(self.refDocuments, 'value')
# BM25
self.avgdl = self.refDocumentsParsed.avgdl
self.idf = self.refDocumentsParsed.idf
def rankDocuments(self, query, documents):
self.queryDocumentsParsed = DocumentParser(documents, 'text')
self.file_to_terms = self.queryDocumentsParsed.file_to_terms
self.regdex = self.queryDocumentsParsed.regdex
self.invertedIndex = self.queryDocumentsParsed.invertedIndex
self.dl = self.queryDocumentsParsed.dl
self.total_score = self.BM25scores(self.queryParser(query))
return self.ranked_docs()
def queryParser(self, query):
q = query.lower()
#subsitute all non-word characters with whitespace
pattern = re.compile('\W+')
q = pattern.sub(' ', q)
# split text into words (tokenized list for a document)
q = q.split()
# stemming words
stemmer = PorterStemmer()
q = [stemmer.stem(w) for w in q ]
return q
def get_score (self,filename,qlist):
'''
filename: filename
qlist: termlist of the query
output: the score for one document
'''
score = 0
#print('filename: ' + filename)
for w in qlist:
if w not in self.file_to_terms[filename]:
continue
if w not in self.idf:
self.idf[w] = 10 # <-- default high value for rare items
#print('the word: '+ w)
wc = len(self.invertedIndex[w][filename])
score += self.idf[w] * ((wc)* (self.k+1)) / (wc + self.k *
(1 - self.b + self.b * self.dl[filename] / self.avgdl))
#print(score)
return score
def BM25scores(self,qlist):
'''
output: a dictionary with filename as key, score as value
'''
total_score = {}
for doc in self.file_to_terms.keys():
total_score[doc] = self.get_score(doc,qlist)
return total_score
def ranked_docs(self):
ranked_docs = sorted(self.total_score.items(), key=lambda x: x[1], reverse=True)
return ranked_docs
|
# Imports here
from torchvision import transforms, models
import torch
from PIL import Image
import json
import argparse
def load_model(model_path , checkpoint = False):
load_point = torch.load(model_path)
if checkpoint == True: # Loading the checkpoint
if load_point['name'] == 'vgg19':
my_model = models.vgg19(pretrained = True)
elif load_point['name'] == 'vgg16':
my_model = models.vgg16(pretrained = True)
elif load_point['name'] == 'vgg13':
my_model = models.vgg13(pretrained = True)
elif load_point['name'] == 'vgg11':
my_model = models.vgg11(pretrained = True)
for param in my_model.parameters():
param.requires_grad = False #turning off tuning of the model
my_model.classifier = load_point['classifier']
my_model.load_state_dict(load_point['state_dict'])
my_model.class_to_idx = load_point['mapping']
my_model.name = load_point['name']
else: # Loading the complete model
my_model = torch.load(model_path)
my_model.to(device)
return my_model
def process_image(image):
pil_image = Image.open(image)
transform = transforms.Compose([transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# transform image to tensor
pil_transform = transform(pil_image)
return pil_transform
def predict(image_path, model, topk=5):
# processing image
test_image = process_image(image_path)
test_image = test_image.to(device)
test_image = test_image.unsqueeze(dim = 0)
model.eval()
with torch.no_grad():
test_prediction = model(test_image)
test_prediction = torch.exp(test_prediction)
porbs, classes = test_prediction.topk(topk)
model.train()
class_list = []
for c in classes[0]:
for k,v in model.class_to_idx.items():
if c == v:
class_list.append(k)
return [round(p,5) for p in porbs[0].tolist()], class_list
def main():
#initialize the parser
parser = argparse.ArgumentParser(description='to get the prediction type: python predict.py <image_path> <model_path> --checkpoint <to load a checkpoint> --top_k <k> --category_names <label_path> --gpu')
#Add the positional parameters
parser.add_argument('image', help='Path to the image', type = str)
parser.add_argument('model', help='Path to load model', type=str)
#Add the optional parameters
parser.add_argument('--checkpoint', help='load model checkpoint', default=False, action='store_true')
parser.add_argument('--top_k', help='Top k predictions', type=int, default=5)
parser.add_argument('--category_names', help='path to labels map', type=str, default='cat_to_name.json')
parser.add_argument('--gpu', help='Enable gpu', default=False, action='store_true')
#Parse the argument
args = parser.parse_args()
# setting the device
global device
device = torch.device('cuda' if args.gpu and torch.cuda.is_available() else 'cpu')
#get the model from the model path
model = load_model(args.model, args.checkpoint)
# get the labels
with open(args.category_names, 'r') as f:
cat_to_name = json.load(f)
#predict the image
prob, classes = predict(args.image, model, args.top_k)
print('prediction: ', cat_to_name[classes[0]])
print('top {} probabilities: {}'.format(args.top_k, prob))
print('top {} classes: {}'.format(args.top_k, classes))
if __name__ == '__main__':
main()
|
import requests
import json
se = requests.session()
def register_test():
register_info = json.dumps({'phone_number': '18798318100', 'username': 'skil', 'password': '123456'})
r = requests.post('http://127.0.0.1:5000/register', data=register_info)
print(r.text)
def login_test():
login_info = json.dumps({'phone_number':'18798318100', 'password': '123456', 'remember_me': 'True'})
r = se.post('http://127.0.0.1:5000/login', data=login_info)
print(r.text, r.cookies)
return r.cookies
def Edit_test():
cookies = login_test()
index = se.get('http://127.0.0.1:5000/').text
print(index)
user_info = json.dumps({'phone_number': '18798318100', 'location': 'Beijing',
'about_me': 'player one', 'age': 24, 'sex': 'male'})
r = se.post('http://127.0.0.1:5000/api/user_info', data=user_info)
print(r.text)
index = se.get('http://127.0.0.1:5000/').text
print(index)
register_test()
Edit_test()
|
import json
import pytest
from podle.views import NewsletterWebhook
pytestmark = pytest.mark.django_db()
class TestNewsletterWebhook:
def test_post_works(self, newsletter, request_builder):
# GIVEN
assert not newsletter.audio_url
filename = "tests/fixtures/webhook.json"
with open(filename) as f:
webhook_response = json.load(f)
request = request_builder.post(data=webhook_response)
# WHEN
response = NewsletterWebhook.as_view()(request)
# THEN
assert response.status_code == 200
newsletter.refresh_from_db()
assert newsletter.audio_url == webhook_response["newsletter_url"]
def test_post_404(self, request_builder):
# GIVEN
filename = "tests/fixtures/webhook.json"
with open(filename) as f:
webhook_response = json.load(f)
request = request_builder.post(data=webhook_response)
# WHEN
response = NewsletterWebhook.as_view()(request)
# THEN
assert response.status_code == 404
|
#!/usr/bin/env python3
import json
import time
import os
import shutil
import numpy as np
from coco_helper import (load_preprocessed_batch, image_filenames, original_w_h,
class_labels, num_classes, bg_class_offset, class_map,
MODEL_DATA_LAYOUT, MODEL_COLOURS_BGR, MODEL_INPUT_DATA_TYPE, MODEL_DATA_TYPE, MODEL_USE_DLA,
MODEL_IMAGE_WIDTH, MODEL_IMAGE_HEIGHT, MODEL_IMAGE_CHANNELS,
IMAGE_DIR, IMAGE_LIST_FILE, MODEL_NORMALIZE_DATA, SUBTRACT_MEAN, GIVEN_CHANNEL_MEANS, BATCH_SIZE, BATCH_COUNT)
from tensorrt_helper import (initialize_predictor, inference_for_given_batch)
## Post-detection filtering by confidence score:
#
SCORE_THRESHOLD = float(os.getenv('CK_DETECTION_THRESHOLD', 0.0))
## Model properties:
#
MODEL_MAX_PREDICTIONS = int(os.getenv('ML_MODEL_MAX_PREDICTIONS', 100))
## Writing the results out:
#
CUR_DIR = os.getcwd()
DETECTIONS_OUT_DIR = os.path.join(CUR_DIR, os.environ['CK_DETECTIONS_OUT_DIR'])
ANNOTATIONS_OUT_DIR = os.path.join(CUR_DIR, os.environ['CK_ANNOTATIONS_OUT_DIR'])
RESULTS_OUT_DIR = os.path.join(CUR_DIR, os.environ['CK_RESULTS_OUT_DIR'])
FULL_REPORT = os.getenv('CK_SILENT_MODE', '0') in ('NO', 'no', 'OFF', 'off', '0')
def main():
setup_time_begin = time.time()
# Cleanup results directory
if os.path.isdir(DETECTIONS_OUT_DIR):
shutil.rmtree(DETECTIONS_OUT_DIR)
os.mkdir(DETECTIONS_OUT_DIR)
pycuda_context, max_batch_size, input_volume, output_volume, num_layers = initialize_predictor()
print('Images dir: ' + IMAGE_DIR)
print('Image list file: ' + IMAGE_LIST_FILE)
print('Batch size: {}'.format(BATCH_SIZE))
print('Batch count: {}'.format(BATCH_COUNT))
print('Detections dir: ' + DETECTIONS_OUT_DIR);
print('Normalize: {}'.format(MODEL_NORMALIZE_DATA))
print('Subtract mean: {}'.format(SUBTRACT_MEAN))
print('Per-channel means to subtract: {}'.format(GIVEN_CHANNEL_MEANS))
print("Data layout: {}".format(MODEL_DATA_LAYOUT) )
print("DLA mode used: {}".format(MODEL_USE_DLA) )
print('Model image height: {}'.format(MODEL_IMAGE_HEIGHT))
print('Model image width: {}'.format(MODEL_IMAGE_WIDTH))
print('Model image channels: {}'.format(MODEL_IMAGE_CHANNELS))
print('Model input data type: {}'.format(MODEL_INPUT_DATA_TYPE))
print('Model (internal) data type: {}'.format(MODEL_DATA_TYPE))
print('Model BGR colours: {}'.format(MODEL_COLOURS_BGR))
print('Model max_batch_size: {}'.format(max_batch_size))
print('Model output volume (number of outputs per one prediction): {}'.format(output_volume))
print('Model num_layers: {}'.format(num_layers))
print('Number of class_labels: {}'.format(num_classes))
print('Post-detection confidence score threshold: {}'.format(SCORE_THRESHOLD))
print("")
setup_time = time.time() - setup_time_begin
# Run batched mode
test_time_begin = time.time()
total_load_time = 0
next_batch_offset = 0
total_inference_time = 0
first_inference_time = 0
images_loaded = 0
for batch_index in range(BATCH_COUNT):
batch_number = batch_index+1
begin_time = time.time()
current_batch_offset = next_batch_offset
batch_data, next_batch_offset = load_preprocessed_batch(image_filenames, current_batch_offset)
load_time = time.time() - begin_time
total_load_time += load_time
images_loaded += BATCH_SIZE
trimmed_batch_results, inference_time_s = inference_for_given_batch(batch_data)
print("[batch {} of {}] loading={:.2f} ms, inference={:.2f} ms".format(
batch_number, BATCH_COUNT, load_time*1000, inference_time_s*1000))
total_inference_time += inference_time_s
# Remember inference_time for the first batch
if batch_index == 0:
first_inference_time = inference_time_s
# Process results
for index_in_batch in range(BATCH_SIZE):
single_image_predictions = trimmed_batch_results[index_in_batch]
num_boxes = single_image_predictions[MODEL_MAX_PREDICTIONS*7].view('int32')
global_image_index = current_batch_offset + index_in_batch
width_orig, height_orig = original_w_h[global_image_index]
filename_orig = image_filenames[global_image_index]
detections_filename = os.path.splitext(filename_orig)[0] + '.txt'
detections_filepath = os.path.join(DETECTIONS_OUT_DIR, detections_filename)
with open(detections_filepath, 'w') as det_file:
det_file.write('{:d} {:d}\n'.format(width_orig, height_orig))
for row in range(num_boxes):
(image_id, ymin, xmin, ymax, xmax, confidence, class_number) = single_image_predictions[row*7:(row+1)*7]
if confidence >= SCORE_THRESHOLD:
class_number = int(class_number)
if class_map:
class_number = class_map[class_number]
image_id = int(image_id)
x1 = xmin * width_orig
y1 = ymin * height_orig
x2 = xmax * width_orig
y2 = ymax * height_orig
class_label = class_labels[class_number - bg_class_offset]
det_file.write('{:.2f} {:.2f} {:.2f} {:.2f} {:.3f} {} {}\n'.format(
x1, y1, x2, y2, confidence, class_number, class_label))
pycuda_context.pop()
test_time = time.time() - test_time_begin
if BATCH_COUNT > 1:
avg_inference_time = (total_inference_time - first_inference_time) / (images_loaded - BATCH_SIZE)
else:
avg_inference_time = total_inference_time / images_loaded
avg_load_time = total_load_time / images_loaded
# Store benchmarking results:
output_dict = {
'run_time_state': {
'setup_time_s': setup_time,
'test_time_s': test_time,
'images_load_time_total_s': total_load_time,
'images_load_time_avg_s': avg_load_time,
'prediction_time_total_s': total_inference_time,
'prediction_time_avg_s': avg_inference_time,
'avg_time_ms': avg_inference_time * 1000,
'avg_fps': 1.0 / avg_inference_time,
'batch_time_ms': avg_inference_time * 1000 * BATCH_SIZE,
'batch_size': BATCH_SIZE,
}
}
with open('tmp-ck-timer.json', 'w') as out_file:
json.dump(output_dict, out_file, indent=4, sort_keys=True)
if __name__ == '__main__':
main()
|
import string
def MOS_ASYMPTOTE(
states,
bandgap=None,
title="Title",
draw_band=False,
draw_occupation=False,
draw_energy=False
):
"""
:states: TODO
:returns: TODO
"""
energies = "{"
spins = "{"
occupations = "{"
bands = "{"
STATES_STRING=""
if not bandgap:
VB = max([s["energy"] for s in states])
LB = min([s["energy"] for s in states])
draw_band_gap = "false"
else:
VB = bandgap[1]
LB = bandgap[0]
draw_band_gap = "true"
for state in states:
energy = str(state["energy"])
spin = str(state["spin"])
occupation = str(state["occupation"])
band = str(state["number"])
STATES_STRING += """\
//energy, spin, occupation, band
state(%s, %s, %s, %s)
.setAutoPosition()
.draw(
draw_band = DRAW_BAND,
draw_occupation = DRAW_OCCUPATION,
draw_energy = DRAW_ENERGY
);\n\n"""%(energy, spin, occupation, band)
return string.Template("""
currentpen = fontsize(20);
string LUMO_TITLE = "$title";
real ENERGIE_LB_PRISTINE = $LB;
real ENERGIE_VB_PRISTINE = $VB;
real OBERKANTE = 100;
real UNTERKANTE = 0;
real IMG_WIDTH = 50;
real KANTEN_HEIGHT = 20;
bool DRAW_ENERGY = $draw_energy;
bool DRAW_BAND = $draw_band;
bool DRAW_BAND_GAB = $draw_band_gap;
bool DRAW_OCCUPATION = $draw_occupation;
unitsize(.2cm);
struct state {
static int state_count;
real energy;
real occupation;
real band;
real value;
pen color;
pen spin_color;
pen spin_occupied_color = black;
pen spin_unoccupied_color = 0.7*white+dashed;
pen occupied_color = red;
pen unoccupied_color = gray;
string title = "";
real spin = 0;
real VB = ENERGIE_VB_PRISTINE;
real LB = ENERGIE_LB_PRISTINE;
real DASH_WIDTH = 25;
real DASH_HEIGHT = 1.8;
real X_COORD = 0;
real Y_OFFSET = 0;
real OCCUPATION_CUTOFF = 0.1;
real getPlottingValue (){
real val = 100*(energy - VB)/(LB-VB);
return val + Y_OFFSET;
};
bool isOccupied(){
if ( occupation >= OCCUPATION_CUTOFF ) {
return true;
} else {
return false;
}
};
state setStyle(){
real parameter;
if ( spin != 0 ) {
parameter = occupation;
} else {
parameter = occupation/2;
}
color = parameter*occupied_color+(1-parameter)*unoccupied_color;
spin_color = parameter*spin_occupied_color+(1-parameter)*spin_unoccupied_color+linewidth(3);
return this;
};
void operator init(real energy, real spin, real occupation, real band){
this.energy = energy;
this.occupation = occupation;
this.band = band;
this.spin = spin;
this.value = getPlottingValue();
state_count += 1;
setStyle();
};
pair getMiddlePoint ( ){
real x,y;
x = X_COORD+(DASH_WIDTH)/2;
y = value + (DASH_HEIGHT)/2;
return (x,y);
};
state setColors (pen color, pen spin_color=this.spin_color){
this.color = color;
this.spin_color = spin_color;
return this;
};
state setAutoPosition (){
int controller = state_count%%2;
X_COORD=0+controller*(DASH_WIDTH);
return this;
};
bool isLeft (){
if ( getMiddlePoint().x >= IMG_WIDTH/2 ) {
return false;
}
else{
return true;
}
};
bool isUp (){ return spin == 1?true:false; };
bool isDown (){ return spin == 2?true:false; };
pair getSpinPosition (bool up=false){
real x_deviation = 0.25*DASH_WIDTH;
pair middle = getMiddlePoint();
if (up) {
return (middle - (-x_deviation,0));
} else {
return (middle + (-x_deviation,0));
}
};
path getSpinArrow (){
bool up = isUp();
pair position = getSpinPosition(up);
real height = 3*DASH_HEIGHT;
if (isUp()) {
return position - (0,height/2) -- position + (0,height/2);
} else {
return position + (0,height/2) -- position - (0,height/2);
}
};
state draw_energy (){
if ( isLeft() ) {
label((string)energy, (X_COORD,value), W, red);
} else {
label((string)energy, (X_COORD+DASH_WIDTH, value), E, red);
}
return this;
};
state draw_spin(){
path spinArrow = getSpinArrow();
draw(spinArrow, spin_color,Arrow(15));
return this;
};
state draw (
bool draw_band = false,
bool draw_occupation = true,
bool draw_energy = true
){
filldraw(
box(
(X_COORD,value)
,(X_COORD+DASH_WIDTH,value+DASH_HEIGHT)
),
color,color*0
);
if ( draw_band )
label(scale(1)*(string)band , getMiddlePoint() - (DASH_WIDTH/4 , 0) , black);
if ( draw_occupation && occupation != 0)
label(scale(1)*(string)occupation , getSpinPosition(!isUp()) , black);
if ( draw_energy ) draw_energy();
if ( spin != 0 ) draw_spin();
return this;
};
};
//----------------------------
//- Valence and Cond bands -
//----------------------------
label(LUMO_TITLE, (25, 100+KANTEN_HEIGHT/1.1), 0.8*blue);
path UNTERKANTE_BOX = box((0 , UNTERKANTE) , (IMG_WIDTH , UNTERKANTE - KANTEN_HEIGHT));
path OBERKANTE_BOX = box((0 , OBERKANTE) , (IMG_WIDTH , OBERKANTE + KANTEN_HEIGHT));
pen bandStyle = .8*white;
filldraw(OBERKANTE_BOX , bandStyle, bandStyle);
filldraw(UNTERKANTE_BOX , bandStyle, bandStyle);
/* DRAW STATES */
/***************/
$states
//-----------
//- SCALE -
//-----------
real pointsToEnergy ( real point ){
return (ENERGIE_LB_PRISTINE-ENERGIE_VB_PRISTINE)*point/100 + ENERGIE_VB_PRISTINE;
};
int steps = 2;
real width = 100/steps;
// Bandgap
draw((50,0)--(50,100),dashed+linewidth(.5), Bars(4mm));
label((string)(ENERGIE_LB_PRISTINE-ENERGIE_VB_PRISTINE)+" eV", (50,50), Fill(white));
label("VB", (IMG_WIDTH,0)+UNTERKANTE, N, Fill(white));
label("CB", (IMG_WIDTH,100)+UNTERKANTE, S, Fill(white));
// SCALE
draw((0,0)--(0,100), linewidth(1));
for ( int i = 0; i <= steps; i+=1 ) {
// SCALE TICKS
draw((0,width*i)--(2,width*i));
// SCALE LABELS
label(scale(0.7)*(string)pointsToEnergy(width*i), (1,width*i), E, Fill(white));
}
// vim: nospell
""").safe_substitute(
title=title,
LB=LB,
VB=VB,
draw_energy=str(draw_energy).lower(),
draw_band_gap=str(draw_band_gap).lower(),
draw_band=str(draw_band).lower(),
draw_occupation=str(draw_occupation).lower(),
states=STATES_STRING
)
|
"""
@author: Badita Marin-Georgian
@email: geo.badita@gmail.com
@date: 29.03.2020 20:58
"""
import numpy as np
from env_interpretation.state_utils import get_state_from_observation
from env_interpretation.utils import n_from_prod
def test_env_3(env3_robots):
env_data = env3_robots.get_env_metadata()
env3_robots.reset()
state = env3_robots.get_current_state()
action = [0, 0, 0]
action = n_from_prod(env_data['sets'], action)
new_state, reward, done, _ = env3_robots.step(action)
new_state_from_obs = get_state_from_observation(new_state)
assert done == False
assert reward == -0.666666
assert state.robots_data == new_state_from_obs.robots_data
assert state.time + 1 == new_state_from_obs.time
assert new_state_from_obs.positions == [2, 2, 3]
assert env3_robots.state.all() == np.array(new_state).all()
for i in range(3):
action = [0, 0, 0]
action = n_from_prod(env_data['sets'], action)
new_state, reward, done, _ = env3_robots.step(action)
action = [0, 0, 70]
action = n_from_prod(env_data['sets'], action)
new_state, reward, done, _ = env3_robots.step(action)
assert done == False
assert reward == 0.001
assert get_state_from_observation(new_state).time == 6
assert get_state_from_observation(new_state).robots_data == [15, 30, 0]
def test_env_termination(env4_robots):
env_data = env4_robots.get_env_metadata()
env4_robots.reset()
action_map_terminate = {
'-1': [0] * 6,
'2': [25] + [0] * 5,
'4': [0, 25] + [0] * 4,
'6': [0] * 5 + [25],
'8': [0, 25] + [0] * 4
}
for i in range(1, 9):
if action_map_terminate.get(str(i), None):
action = action_map_terminate[str(i)]
action = n_from_prod(env_data['sets'], action)
_, __, done, ___ = env4_robots.step(action)
else:
action = action_map_terminate['-1']
action = n_from_prod(env_data['sets'], action)
_, __, done, ___ = env4_robots.step(action)
assert done == True
|
from knowledgerepr import fieldnetwork
from ontomatch import glove_api
from nltk.corpus import stopwords
import numpy as np
import pickle
import itertools
import operator
from ontomatch import javarandom
from dataanalysis import nlp_utils as nlp
from nltk.corpus import stopwords
# minhash variables
k = 512
mersenne_prime = 536870911
rnd_seed = 1
rnd = javarandom.Random(rnd_seed)
random_seeds = []
a = np.int64()
for i in range(k):
randoms = [rnd.nextLong(), rnd.nextLong()]
random_seeds.append(randoms)
def minhash(str_values):
def java_long(number):
return (number + 2**63) % 2**64 - 2**63
def remainder(a, b):
return a - (b * int(a/b))
def hash_this(value):
h = mersenne_prime
length = len(value)
for i in range(length):
h = 31 * h + ord(value[i])
return h
mh = [9223372036854775807 for i in range(k)]
for v in str_values:
v = nlp.camelcase_to_snakecase(v)
v = v.replace('_', ' ')
v = v.replace('-', ' ')
v = v.lower()
for token in v.split(' '):
if token not in stopwords.words('english'):
raw_hash = hash_this(token)
for i in range(k):
first_part = java_long(random_seeds[i][0] * raw_hash)
second_part = java_long(random_seeds[i][1])
nomodule = java_long(first_part + second_part)
h = java_long(remainder(nomodule, mersenne_prime))
if h < mh[i]:
mh[i] = h
return mh
def extract_cohesive_groups(table_name, attrs, sem_sim_threshold=0.7, group_size_cutoff=0):
def does_it_keep_group_coherent(running_group, a, b, threshold):
if len(running_group) == 0:
return True
av = glove_api.get_embedding_for_word(a)
bv = glove_api.get_embedding_for_word(b)
for el in running_group:
elv = glove_api.get_embedding_for_word(el)
sim_a = glove_api.semantic_distance(elv, av)
if sim_a > threshold:
sim_b = glove_api.semantic_distance(elv, bv)
if sim_b > threshold:
return True
else:
return False
else:
return False
tokens = set()
ctb = nlp.curate_string(table_name)
tokens |= set(ctb.split(' '))
for attr in attrs:
cattr = nlp.curate_string(attr)
tokens |= set(cattr.split(' '))
tokens = [t for t in tokens if t not in stopwords.words('english') and len(t) > 1]
running_groups = [set()]
for a, b in itertools.combinations(tokens, 2):
av = glove_api.get_embedding_for_word(a)
bv = glove_api.get_embedding_for_word(b)
if av is None or bv is None:
continue
sim = glove_api.semantic_distance(av, bv)
if sim > sem_sim_threshold: # try to add to existing group
added_to_existing_group = False
for running_group in running_groups:
ans = does_it_keep_group_coherent(running_group, a, b, sem_sim_threshold)
if ans: # Add to as many groups as necessary
added_to_existing_group = True
running_group.add(a)
running_group.add(b)
if not added_to_existing_group:
running_group = set()
running_group.add(a)
running_group.add(b)
running_groups.append(running_group)
return [(sem_sim_threshold, group) for group in running_groups if len(group) > group_size_cutoff]
def extract_cohesive_groups1(table_name, attrs):
tokens = set()
ctb = nlp.curate_string(table_name)
tokens |= set(ctb.split(' '))
for attr in attrs:
cattr = nlp.curate_string(attr)
tokens |= set(cattr.split(' '))
#tokens = [t for t in tokens if t not in stopwords.words('english') and len(t) > 1]
token_vector = [(t, glove_api.get_embedding_for_word(t)) for t in tokens
if t not in stopwords.words('english') and len(t) > 1
and glove_api.get_embedding_for_word(t) is not None]
threshold = 0.5
group = set()
for a, b in itertools.combinations(token_vector, 2):
sim = glove_api.semantic_distance(a[1], b[1])
if sim > threshold:
group.add(a[0])
group.add(b[0])
#group2 = extract_cohesive_groups2(table_name, attrs)
return [(threshold, group)] #, group2
def extract_cohesive_groups2(table_name, attrs):
def maybe_add_new_set(groups, current):
# Right now, filter duplicate sets, and subsumed sets as well
score, current_set = current
for score, set_attrs in groups:
if len(current_set) == len(set_attrs) and len(current_set - set_attrs) == 0:
return # if repeated, then just return without adding
len_a = len(current_set)
len_b = len(set_attrs)
if len_a > len_b:
if len(set_attrs - current_set) == 0:
return
else:
if len((current_set - set_attrs)) == 0:
return
groups.append(current) # otherwise add and finish
groups = []
tokens = set()
ctb = nlp.curate_string(table_name)
tokens |= set(ctb.split(' '))
for attr in attrs:
cattr = nlp.curate_string(attr)
tokens |= set(cattr.split(' '))
tokens = [t for t in tokens if t not in stopwords.words('english') and len(t) > 1]
for anchor in tokens:
threshold = 0.7
current = (threshold, set()) # keeps (score, []) cohesiveness score and list of attrs that honor it
for t in tokens:
if anchor == t: # not interested in self-comparison
continue
anchor_v = glove_api.get_embedding_for_word(anchor)
t_v = glove_api.get_embedding_for_word(t)
if anchor_v is not None and t_v is not None:
ss = glove_api.semantic_distance(anchor_v, t_v)
if ss > current[0]:
new_set = current[1]
new_set.add(anchor)
new_set.add(t)
#current = (ss, new_set)
current = (threshold, new_set)
if len(current[1]) > 0:
maybe_add_new_set(groups, current)
return groups
def store_signatures(signatures, path):
f = open(path + '/semantic_vectors.pkl', 'wb')
pickle.dump(signatures, f)
f.close()
def load_signatures(path):
f = open(path + '/semantic_vectors.pkl', 'rb')
semantic_vectors = pickle.load(f)
f.close()
return semantic_vectors
def read_table_columns(path_to_serialized_model, network=False):
# If the network is not provided, then we use the path to deserialize from disk
if not network:
network = fieldnetwork.deserialize_network(path_to_serialized_model)
source_ids = network._get_underlying_repr_table_to_ids()
col_info = network._get_underlying_repr_id_to_field_info()
cols = []
# for table_name, field_ids in ...
for k, v in source_ids.items():
db_name = None
for el in v:
(db_name, sn_name, fn_name, data_type) = col_info[el]
cols.append(fn_name)
yield (db_name, k, cols)
cols.clear()
def generate_table_vectors(path_to_serialized_model, network=False):
table_vectors = dict()
for db_name, table_name, cols in read_table_columns(path_to_serialized_model, network=network):
semantic_vectors = []
seen_tokens = []
for c in cols:
c = c.replace('_', ' ')
tokens = c.split(' ')
for token in tokens:
token = token.lower()
if token not in stopwords.words('english'):
if token not in seen_tokens:
seen_tokens.append(token)
vec = glove_api.get_embedding_for_word(token)
if vec is not None:
semantic_vectors.append(vec)
print("Table: " + str(table_name) + " has: " + str(len(semantic_vectors)))
table_vectors[(db_name, table_name)] = semantic_vectors
return table_vectors
def get_semantic_vectors_for(tokens):
s_vectors = []
for t in tokens:
vec = glove_api.get_embedding_for_word(t)
if vec is not None:
s_vectors.append(vec)
return s_vectors
def compute_internal_cohesion(sv):
semantic_sim_array = []
for a, b in itertools.combinations(sv, 2):
sem_sim = glove_api.semantic_distance(a, b)
semantic_sim_array.append(sem_sim)
coh = 0
if len(semantic_sim_array) > 1: # if not empty slice
coh = np.mean(semantic_sim_array)
return coh
def compute_internal_cohesion_elementwise(x, sv):
semantic_sim_array = []
for el in sv:
if x is not None and el is not None:
sem_sim = glove_api.semantic_distance(x, el)
semantic_sim_array.append(sem_sim)
coh = 0
if len(semantic_sim_array) > 1:
coh = np.mean(semantic_sim_array)
return coh
def compute_sem_distance_with(x, sv):
semantic_sim_array = []
for el in sv:
if x is not None and el is not None:
sem_sim = glove_api.semantic_distance(x, el)
semantic_sim_array.append(sem_sim)
ssim = 0
if len(semantic_sim_array) > 1:
ssim = np.mean(semantic_sim_array)
return ssim
def groupwise_semantic_sim(sv1, sv2, threshold):
to_ret = False # the default is false
for a, b in itertools.product(sv1, sv2):
sim = glove_api.semantic_distance(a, b)
if sim < threshold:
return False # return False and terminate as soon as one combination does not satisfy the threshold
to_ret = True # if at least we iterate once, the default changes to True
return to_ret
def compute_semantic_similarity(sv1, sv2,
penalize_unknown_word=False,
add_exact_matches=True,
signal_strength_threshold=0.5):
total_comparisons = 0
skipped_comparisons = 0
accum = []
for a, b in itertools.product(sv1, sv2):
if a is not None and b is not None:
if not (a == b).all() or add_exact_matches: # otherwise this just does not add up
total_comparisons += 1
sim = glove_api.semantic_distance(a, b)
accum.append(sim)
elif (a == b).all() and not add_exact_matches:
skipped_comparisons += 1
elif penalize_unknown_word: # if one is None and penalize is True, then sim = 0
skipped_comparisons += 1
sim = 0
accum.append(sim)
sim = 0
if len(accum) > 0:
sim = np.mean(accum)
strong_signal = False
# in this case we cannot judge the semantic as the word is not in the dict
if total_comparisons == 0:
# capturing the case of [] - [a, ...n] when n > 1: intuition is that many words convey a lot of "meaning"
if len(sv1) > 2 or len(sv2) > 2:
return sim, True
return sim, strong_signal
total_of_all_comparisons = skipped_comparisons + total_comparisons
ratio_of_strong_signal = 0
if total_of_all_comparisons > 0:
ratio_of_strong_signal = float(total_comparisons/total_of_all_comparisons)
# if not many skipped comparisons, then this is a strong signal
if ratio_of_strong_signal >= signal_strength_threshold:
strong_signal = True
return sim, strong_signal
def __compute_semantic_similarity(sv1, sv2):
products = 0
accum = 0
for x in sv1:
products += 1
internal_cohesion = compute_internal_cohesion_elementwise(x, sv1)
distance = compute_sem_distance_with(x, sv2)
denominator = 2 * max(internal_cohesion, distance)
if (internal_cohesion + distance) < 0:
value = 0
else:
if denominator > 0:
value = internal_cohesion + distance / denominator
else:
value = 0
accum += value
ss = accum / products
return ss
def compute_semantic_similarity_cross_average(sv1, sv2):
global_sim = []
for v1 in sv1:
local_sim = []
for v2 in sv2:
sem_sim = glove_api.semantic_distance(v1, v2)
local_sim.append(sem_sim)
ls = 0
if len(local_sim) > 1:
ls = np.mean(local_sim)
elif len(local_sim) == 1:
ls = local_sim[0]
global_sim.append(ls)
gs = 0
if len(global_sim) > 1:
gs = np.mean(global_sim)
elif len(global_sim) == 1:
gs = global_sim[0]
return gs
def compute_semantic_similarity_max_average(sv1, sv2):
global_sim = []
for v1 in sv1:
local_sim = []
for v2 in sv2:
sem_sim = glove_api.semantic_distance(v1, v2)
local_sim.append(sem_sim)
if len(local_sim) > 0:
ls = max(local_sim)
else:
continue
global_sim.append(ls)
gs = 0
if len(global_sim) > 1:
gs = np.mean(global_sim)
elif len(global_sim) == 1:
gs = global_sim[0]
return gs
def compute_semantic_similarity_min_average(sv1, sv2):
global_sim = []
for v1 in sv1:
local_sim = []
for v2 in sv2:
sem_sim = glove_api.semantic_distance(v1, v2)
local_sim.append(sem_sim)
if len(local_sim) > 0:
ls = min(local_sim)
else:
continue
global_sim.append(ls)
gs = 0
if len(global_sim) > 1:
gs = np.mean(global_sim)
elif len(global_sim) == 1:
gs = global_sim[0]
return gs
def compute_semantic_similarity_median(sv1, sv2):
global_sim = []
for v1 in sv1:
local_sim = []
for v2 in sv2:
sem_sim = glove_api.semantic_distance(v1, v2)
local_sim.append(sem_sim)
ls = 0
if len(local_sim) > 1:
ls = np.median(local_sim)
elif len(local_sim) == 1:
ls = local_sim[0]
global_sim.append(ls)
gs = 0
if len(global_sim) > 1:
gs = np.median(global_sim)
elif len(global_sim) == 1:
gs = global_sim[0]
return gs
def compute_semantic_similarity_table(table, semantic_vectors):
sv1 = semantic_vectors[table]
results = dict()
for k, v in semantic_vectors.items():
if sv1 != k:
avg_sim = compute_semantic_similarity_cross_average(sv1, v)
median_sim = compute_semantic_similarity_median(sv1, v)
max_sim = compute_semantic_similarity_max_average(sv1, v)
min_sim = compute_semantic_similarity_min_average(sv1, v)
results[k] = (avg_sim, max_sim, min_sim, median_sim)
return results
def compute_new_ss(table, semantic_vectors):
sv1 = semantic_vectors[table]
res = dict()
for k, v in semantic_vectors.items():
if table != k:
if k == "molecule_hierarchy":
a = 1
ss, strong_signal = compute_semantic_similarity(sv1, v)
#print(str(k) + " -> " + str(ss))
res[k] = ss
return res
def test(path_to_serialized_model):
# Load glove model
print("Loading language model...")
path_to_glove_model = "../glove/glove.6B.100d.txt"
glove_api.load_model(path_to_glove_model)
print("Loading language model...OK")
total_tables = 0
avg_attrs_per_table = 0
avg_groups_per_table = 0
for db, t, attrs in read_table_columns(path_to_serialized_model):
total_tables += 1
groups = extract_cohesive_groups(t, attrs)
avg_attrs_per_table += len(attrs)
avg_groups_per_table += len(groups)
print("Table: " + str(t))
print("num groups: " + str(len(groups)))
for score, tokens in groups:
print("Score: " + str(score))
print(tokens)
print("#####")
avg_attrs_per_table = avg_attrs_per_table / total_tables
avg_groups_per_table = avg_groups_per_table / total_tables
print("Avg attr per table: " + str(avg_attrs_per_table))
print("Avg group per table: " + str(avg_groups_per_table))
if __name__ == "__main__":
path_to_serialized_model = "../models/massdata/"
test(path_to_serialized_model)
exit()
"""
# Load glove model
print("Loading glove model...")
glove_api.load_model("../glove/glove.6B.100d.txt")
print("Loading glove model...OK")
# For the rest of operations, raise all errors
np.seterr(all='raise')
table_vectors = generate_table_vectors(path_to_serialized_model)
print("Storing semantic vectors...")
store_signatures(table_vectors, "data/chemical/")
print("Storing semantic vectors...OK")
"""
values = ["test", "test1", "torpedo", "raiz", "agua", "water"]
print("MP: " + str(mersenne_prime))
for el in random_seeds:
print("SEED0: " + str(el[0]))
print("SEED1: " + str(el[1]))
mh = minhash(values)
print(mh)
exit()
semantic_vectors = load_signatures("data/chemical")
"""
tables_coh = []
for t, vecs in semantic_vectors.items():
coh = compute_internal_cohesion(vecs)
tables_coh.append((coh, t))
tables_coh = sorted(tables_coh, reverse=True)
for coh, t in tables_coh:
print(str(t) + " -> " + str(coh))
res = compute_semantic_similarity_table("Cambridge Home Page Featured Story_mfs6-yu9a.csv", semantic_vectors)
only_cross_average = []
only_max_average = []
only_min_average = []
only_median_average = []
for k, v in res.items():
print(str(k) + " - " + str(v))
only_cross_average.append((v[0], k))
only_max_average.append((v[1], k))
only_min_average.append((v[2], k))
only_median_average.append((v[3], k))
oca = sorted(only_cross_average, reverse=True)
omx = sorted(only_max_average, reverse=True)
omi = sorted(only_min_average, reverse=True)
oma = sorted(only_median_average, reverse=True)
print("Average")
for i in range(len(oca)):
print(oca[i])
print("")
print("")
print("")
print("")
print("Max")
for i in range(len(oca)):
print(oma[i])
"""
# New metrics
table = "parameter_type"
table_sim = compute_new_ss(table, semantic_vectors)
table_sim = sorted(table_sim.items(), key=operator.itemgetter(1), reverse=True)
for k, v in table_sim:
print(str(k) + " -> " + str(v))
|
import pytest
from tabulation import SNIa, Lifetimes, IMF
from scipy import integrate
imf = IMF("Kroupa", 0.1, 50)
lifetimes = Lifetimes("Raiteri_96")
number_sn_ia = 1.6E-3
@pytest.fixture
def sn_ia_old():
return SNIa("old ART", "Nomoto_18", lifetimes, imf,
exploding_fraction=0.015, min_mass=3, max_mass=8)
@pytest.fixture
def sn_ia_new():
return SNIa("ART power law", "Nomoto_18", lifetimes, imf,
number_sn_ia=number_sn_ia)
# hack stolen from github to use parametrize on fixtures
# https://github.com/pytest-dev/pytest/issues/349#issuecomment-189370273
@pytest.fixture(params=['sn_ia_old', 'sn_ia_new'])
def both_sn(request):
return request.getfuncargvalue(request.param)
def test_number_sn_old(sn_ia_old):
assert number_sn_ia / 10 < sn_ia_old.number_sn_Ia < number_sn_ia
def test_number_sn_new(sn_ia_new):
assert sn_ia_new.number_sn_Ia == number_sn_ia
def test_normalization_old(sn_ia_old):
# Want to integrate to infinity, but scipy runs into trouble doing that,
# so I'll just do a very long time with larger tolerance.
integral = integrate.quad(sn_ia_old.old_art_phi_per_dt, 0, 10 ** 13)[0]
assert pytest.approx(1, rel=0.05) == integral
def test_normalization_new(sn_ia_new):
integral = integrate.quad(sn_ia_new.sn_dtd, 0, 13.79E9, args=(0.02))[0]
assert pytest.approx(number_sn_ia, rel=0.05) == integral
def test_sn_ia_rate_turn_on(sn_ia_new):
z = 0.01
age_8 = lifetimes.lifetime(8.0, z)
assert sn_ia_new.sn_dtd(0, z) == 0
assert sn_ia_new.sn_dtd(age_8 / 2, z) == 0
assert sn_ia_new.sn_dtd(age_8 - 1, z) == 0
assert sn_ia_new.sn_dtd(age_8 + 1, z) != 0
def test_sn_ia_rate_new_late_times_no_z_change(sn_ia_new):
age = 1E9
rate_1 = sn_ia_new.sn_dtd(age, 0)
rate_2 = sn_ia_new.sn_dtd(age, 0.0001)
rate_3 = sn_ia_new.sn_dtd(age, 0.001)
rate_4 = sn_ia_new.sn_dtd(age, 0.02)
assert rate_1 == rate_2 == rate_3 == rate_4
def test_sn_ia_rate_new_late_times(sn_ia_new):
# plotted this and guessed at values
z = 0.02
assert 1E-12 < sn_ia_new.sn_dtd(1E8, z) < 1E11
assert 1E-13 < sn_ia_new.sn_dtd(1E9, z) < 1E12
assert 1E-14 < sn_ia_new.sn_dtd(1E9, z) < 1E13
def test_ejected_mass_error_checking(both_sn):
for z in [0.0001, 0.001, 0.01, 0.03]:
with pytest.raises(ValueError):
both_sn.ejected_mass("C", z)
def test_ejected_mass_correct(both_sn):
assert both_sn.ejected_mass("C", 0.02) == 4.75E-2 + 5.17E-8
assert both_sn.ejected_mass("N", 0.02) == 1.1E-5 + 5.46E-8
assert both_sn.ejected_mass("O", 0.02) == 5.0E-2 + 4.6E-6 + 1.43E-7
assert both_sn.ejected_mass("Fe", 0.02) == sum([0.131, 0.741, 2.7E-2,
6.24E-4, 1.21E-8])
assert both_sn.ejected_mass("C", 0.002) == 6.67E-2 + 1.28E-12
assert both_sn.ejected_mass("N", 0.002) == 7.83E-10 + 1.32E-8
assert both_sn.ejected_mass("O", 0.002) == 9.95E-2 + 1.32E-11 + 7.60E-13
assert both_sn.ejected_mass("Fe", 0.002) == sum([0.18, 0.683, 1.85E-2,
5.64E-4, 1.10E-8])
def test_ejected_mass_elts_not_present(both_sn):
assert both_sn.ejected_mass("H", 0.02) == 0
assert both_sn.ejected_mass("He", 0.02) == 0
assert both_sn.ejected_mass("H", 0.002) == 0
assert both_sn.ejected_mass("He", 0.002) == 0
def test_ejected_mass_total_metals(both_sn):
assert 1.2 < both_sn.ejected_mass("total_metals", 0.02) < 1.4
assert 1.2 < both_sn.ejected_mass("total_metals", 0.002) < 1.4
|
from output.models.ms_data.regex.bopomofo_xsd.bopomofo import Doc
__all__ = [
"Doc",
]
|
#!usr/bin/env/ python
# -*- coding: utf-8 -*-
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, TextAreaField, \
HiddenField, RadioField, FileField, SubmitField, IntegerField
from wtforms.validators import DataRequired, Length, Email, Regexp
from . import models
class PostForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
category = StringField('Category')
tags = StringField('Tags')
abstract = TextAreaField('Abstract')
content = TextAreaField('Content')
status = HiddenField('status')
post_id = HiddenField('post_id')
class CommentForm(FlaskForm):
email = StringField('* Email', validators=[DataRequired(), Length(1, 128), Email()])
author = StringField('* Name', validators=[DataRequired(), Length(1, 200)])
body = TextAreaField('* Comment <small><span class="label label-info">markdown</span></small>',
validators=[DataRequired()])
comment_id = HiddenField('comment_id')
|
import six
import django
from django.db import models
from django.db.models import F, Max, Min
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext, ugettext_lazy as _
try:
from django.db.transaction import atomic
except ImportError:
from django.db.transaction import commit_on_success as atomic
try:
from django_comments.managers import CommentManager
from django_comments.models import Comment
except ImportError:
from django.contrib.comments.managers import CommentManager
from django.contrib.comments.models import Comment
from django_comments_xtd.conf import settings
LIKEDIT_FLAG = "I liked it"
DISLIKEDIT_FLAG = "I disliked it"
def max_thread_level_for_content_type(content_type):
app_model = "%s.%s" % (content_type.app_label, content_type.model)
if app_model in settings.COMMENTS_XTD_MAX_THREAD_LEVEL_BY_APP_MODEL:
return settings.COMMENTS_XTD_MAX_THREAD_LEVEL_BY_APP_MODEL[app_model]
else:
return settings.COMMENTS_XTD_MAX_THREAD_LEVEL
class MaxThreadLevelExceededException(Exception):
def __init__(self, content_type=None):
self.max_by_app = max_thread_level_for_content_type(content_type)
def __str__(self):
return (ugettext("Can not post comments over the thread level "
"%{max_thread_level}") %
{"max_thread_level": self.max_by_app})
class XtdCommentManager(CommentManager):
if django.VERSION[:2] < (1, 6):
get_queryset = models.Manager.get_query_set
def for_app_models(self, *args):
"""Return XtdComments for pairs "app.model" given in args"""
content_types = []
for app_model in args:
app, model = app_model.split(".")
content_types.append(ContentType.objects.get(app_label=app,
model=model))
return self.for_content_types(content_types)
def for_content_types(self, content_types):
qs = self.get_queryset().filter(content_type__in=content_types)\
.reverse()
return qs
class XtdComment(Comment):
thread_id = models.IntegerField(default=0, db_index=True)
parent_id = models.IntegerField(default=0)
level = models.SmallIntegerField(default=0)
order = models.IntegerField(default=1, db_index=True)
followup = models.BooleanField(blank=True, default=False,
help_text=_("Receive by email further "
"comments in this conversation"))
objects = XtdCommentManager()
class Meta:
ordering = settings.COMMENTS_XTD_LIST_ORDER
def save(self, *args, **kwargs):
is_new = self.pk is None
super(Comment, self).save(*args, **kwargs)
if is_new:
if not self.parent_id:
self.parent_id = self.id
self.thread_id = self.id
else:
if max_thread_level_for_content_type(self.content_type):
with atomic():
self._calculate_thread_data()
else:
raise MaxThreadLevelExceededException(self.content_type)
kwargs["force_insert"] = False
super(Comment, self).save(*args, **kwargs)
def _calculate_thread_data(self):
# Implements the following approach:
# http://www.sqlteam.com/article/sql-for-threaded-discussion-forums
parent = XtdComment.objects.get(pk=self.parent_id)
if parent.level == max_thread_level_for_content_type(self.content_type):
raise MaxThreadLevelExceededException(self.content_type)
self.thread_id = parent.thread_id
self.level = parent.level + 1
qc_eq_thread = XtdComment.objects.filter(thread_id=parent.thread_id)
qc_ge_level = qc_eq_thread.filter(level__lte=parent.level,
order__gt=parent.order)
if qc_ge_level.count():
min_order = qc_ge_level.aggregate(Min('order'))['order__min']
XtdComment.objects.filter(thread_id=parent.thread_id,
order__gte=min_order)\
.update(order=F('order')+1)
self.order = min_order
else:
max_order = qc_eq_thread.aggregate(Max('order'))['order__max']
self.order = max_order + 1
@models.permalink
def get_reply_url(self):
return ("comments-xtd-reply", None, {"cid": self.pk})
def allow_thread(self):
if self.level < max_thread_level_for_content_type(self.content_type):
return True
else:
return False
@classmethod
def tree_from_queryset(cls, queryset, with_participants=False):
"""Converts a XtdComment queryset into a list of nested dictionaries.
The queryset has to be ordered by thread_id, order.
Each dictionary contains two attributes::
{
'comment': the comment object itself,
'children': [list of child comment dictionaries]
}
"""
def get_participants(comment):
return {'likedit': comment.users_who_liked_it(),
'dislikedit': comment.users_who_disliked_it()}
def add_children(children, obj):
for item in children:
if item['comment'].pk == obj.parent_id:
child_dict = {'comment': obj, 'children': []}
if with_participants:
child_dict.update(get_participants(obj))
item['children'].append(child_dict)
return True
elif item['children']:
if add_children(item['children'], obj):
return True
return False
dic_list = []
cur_dict = None
for obj in queryset:
# A new comment at the same level as thread_dict.
if cur_dict and obj.level == cur_dict['comment'].level:
dic_list.append(cur_dict)
cur_dict = None
if not cur_dict:
cur_dict = {'comment': obj, 'children': []}
if with_participants:
cur_dict.update(get_participants(obj))
continue
if obj.parent_id == cur_dict['comment'].pk:
child_dict = {'comment': obj, 'children': []}
if with_participants:
child_dict.update(get_participants(obj))
cur_dict['children'].append(child_dict)
else:
add_children(cur_dict['children'], obj)
if cur_dict:
dic_list.append(cur_dict)
return dic_list
def users_who_liked_it(self):
return [flag.user for flag in self.flags.filter(flag=LIKEDIT_FLAG)]
def users_who_disliked_it(self):
return [flag.user for flag in self.flags.filter(flag=DISLIKEDIT_FLAG)]
class DummyDefaultManager:
"""
Dummy Manager to mock django's CommentForm.check_for_duplicate method.
"""
def __getattr__(self, name):
return lambda *args, **kwargs: []
def using(self, *args, **kwargs):
return self
class TmpXtdComment(dict):
"""
Temporary XtdComment to be pickled, ziped and appended to a URL.
"""
_default_manager = DummyDefaultManager()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
return None
def __setattr__(self, key, value):
self[key] = value
def save(self, *args, **kwargs):
pass
def _get_pk_val(self):
if self.xtd_comment:
return self.xtd_comment._get_pk_val()
else:
return ""
def __reduce__(self):
return (TmpXtdComment, (), None, None, six.iteritems(self))
# ----------------------------------------------------------------------
class BlackListedDomain(models.Model):
"""
A blacklisted domain from which comments should be discarded.
Automatically populated with a small amount of spamming domains,
gathered from http://www.joewein.net/spam/blacklist.htm
You can download for free a recent version of the list, and subscribe
to get notified on changes. Changes can be fetched with rsync for a
small fee (check their conditions, or use any other Spam filter).
"""
domain = models.CharField(max_length=200, db_index=True)
def __str__(self):
return self.domain
class Meta:
ordering = ('domain',)
|
import json
import os
import sys
import unittest
from bs4 import BeautifulSoup
from bs4.element import NavigableString
from hocr_parser import parser
if sys.version_info < (3, 0):
from io import open
class BaseTestClass(unittest.TestCase):
"""Super class for all test cases"""
@classmethod
def setup_class(cls):
"""
Sets up fixtures used during tests.
Creates a parser instance and saves it in cls.document.
Additionally, parses the hocr document again with BeautifulSoup and
saves the result in cls.soup so the parsed document can later be
checked against the original html.
"""
own_dir = os.path.dirname(os.path.abspath(__file__))
hocr_file = "output.tesseract.hocr"
hocr_path = os.path.join(own_dir, "data", hocr_file)
with open(hocr_path) as f:
hocr_data = f.read()
expected_file = hocr_file.rsplit(".", 1)[0] + ".expected.json"
expected_path = os.path.join(own_dir, "data", expected_file)
with open(expected_path, encoding="utf-8") as f:
expected_data = f.read()
cls.document = parser.HOCRParser(hocr_path, is_path=True)
cls.soup = BeautifulSoup(hocr_data, "html.parser")
cls.expected = json.loads(expected_data)
@staticmethod
def get_children_of_node(node):
def child_node_filter(node):
if isinstance(node, NavigableString):
return False
if not node.has_attr("id"):
return False
return True
return list(filter(child_node_filter, node.contents))
def recursively_compare_tree_against_html(self, func):
"""
Utility function for the common task of looping through the document
and html trees and comparing the obj and html nodes to each other.
Takes a comparator function as argument. Comparator functions receive
the following keyword arguments when they get called:
- obj: The current ocr object
- node: The current node in the html tree
Defines an inner function that takes obj, node, parent as arguments.
The inner function executes the comparator function with its input
arguments. Then it loops through the children, calling itself
with the child nodes as arguments.
The inner function is invoked with the root nodes.
:param func: A function object. Comparator function that gets called
for each element on each level. The comparator function
receives the three previous arguments as keyword arguments
on invocation
"""
def inner(obj, node):
# invoke comparator function
func(obj=obj, node=node)
# filter
child_nodes = self.get_children_of_node(node)
# same number of object children and html child nodes
self.assertEqual(len(obj.children), len(child_nodes))
# loop over children and call recursive compare on them
for (child_obj, child_node) in zip(obj.children, child_nodes):
inner(obj=child_obj, node=child_node)
# call inner() with root elements
inner(obj=self.document.root, node=self.soup.body)
class TreeStructureTests(BaseTestClass):
def test_equivalency(self):
"""
test_equivalency (test_hocr.TreeStructureTests)
Recursively compares an obj against the html node and checks different
aspects to see if the generated object and the html node are
equivalent, i.e. the object was generated from this node and all
information was parsed correctly.
Tests:
- same id
- same html
- parents have same id
- same number of children
- children have same ids
"""
def compare_func(obj, node):
# same id
self.assertEqual(obj.id, node.get("id"))
# same html
self.assertEqual(obj.html.prettify, node.prettify)
# parents have same id (only for non-root elements)
if not obj == self.document.root:
self.assertEqual(obj.parent.id, node.parent.get("id"))
# same number of children
child_nodes = self.get_children_of_node(node)
self.assertEqual(len(obj.children), len(child_nodes))
# children have same ids
for (child_obj, child_node) in zip(obj.children, child_nodes):
self.assertEqual(child_obj.id, child_node.get("id"))
self.recursively_compare_tree_against_html(compare_func)
def test_parent_link(self):
"""
test_parent_link (test_hocr.TreeStructureTests)
Recursively compares the parent node of the current obj
to the parent element of the html node.
Tests for parent-child link
The parent object in obj.parent must contain obj in its
children list.
"""
def compare_func(obj, node):
# no need to test for parents on root level of the tree
if obj == self.document.root:
return
# parent-child link. obj must be in obj.parent.children
self.assertTrue(obj in obj.parent.children)
self.recursively_compare_tree_against_html(compare_func)
def test_child_link(self):
"""
test_child_link (test_hocr.TreeStructureTests)
Recursively compares the child elements of an object against the
child nodes of the corresponding html node.
Tests for parent-child link
Child objects must have obj as their parent
"""
def compare_func(obj, node):
child_nodes = self.get_children_of_node(node)
for (child_obj, child_node) in zip(obj.children, child_nodes):
# parent-child link (children must have obj as their parent)
self.assertEqual(child_obj.parent, obj)
self.recursively_compare_tree_against_html(compare_func)
class HOCRParserTests(BaseTestClass):
def test_parsing(self):
# Strings next to other siblings shouldn't be parsed as nodes.
html = BeautifulSoup("""
<div id='node'>
I am noise. Have some newlines.
\n\n
<p id='child'>I am content</p>
</div>
""", "html.parser")
node = parser.HOCRNode(html.div)
self.assertEqual(len(node.children), 1)
self.assertEqual(node.ocr_text, "I am content")
# Strings inside tags should be parsed as ocr_text but not as children
html = BeautifulSoup("""
<div id='node'>I am not noise</div>
""", "html.parser")
node = parser.HOCRNode(html.div)
self.assertEqual(len(node.children), 0)
self.assertEqual(node.ocr_text, "I am not noise")
# tags without id should not be parsed
html = BeautifulSoup("""
<div id='node'>
<p>I don't have an id</p>
<p id='child'>I have an id</p>
</div>
""", "html.parser")
node = parser.HOCRNode(html.div)
self.assertEqual(len(node.children), 1)
self.assertEqual(node.children[0].ocr_text, "I have an id")
def test_consistency(self):
"""
test_consistency (test_ocr.HOCRParserTests)
- number of children must be consistent
obj.nchildren == len(obj._children)
== len(obj.children)
- obj.html equals node.prettify()
- coordinates
obj.__coordinates == obj.coordinates == expected_coordinates
"""
def compare_func(obj, node):
# number of children must be consistent
self.assertEqual(
len(obj.children),
len(obj._children)
)
# obj.html equals node
self.assertEqual(obj._html, node)
# coordinates
self.assertEqual(
obj._coordinates,
obj.coordinates,
self.expected["coordinates"][obj.id or "document"]
)
# confidence
self.assertAlmostEqual(
obj.confidence,
self.expected["confidence"][obj.id or "document"]
)
self.recursively_compare_tree_against_html(compare_func)
def test_ocr_text(self):
expected_text = self.expected["ocr_text"]
def compare_func(obj, node):
if obj == self.document.root:
expected = expected_text["document"]
else:
expected = expected_text[obj.id]
self.assertEqual(obj.ocr_text, expected)
self.recursively_compare_tree_against_html(compare_func)
def test_page_coordinates(self):
expected_coordinates = self.expected["coordinates"]
def compare_func(obj, node):
if obj == self.document.root:
expected = expected_coordinates["document"]
else:
expected = expected_coordinates[obj.id]
self.assertEqual(obj.coordinates, tuple(expected))
self.recursively_compare_tree_against_html(compare_func)
def test_creation_method_equality(self):
doc1 = self.document
doc2 = parser.HOCRParser(self.soup.prettify(), is_path=False)
self.assertEqual(doc1.ocr_text, doc2.ocr_text)
|
from django.apps import AppConfig
class WallpaperConfig(AppConfig):
name = 'wallpaper'
|
class NumMatrix(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
if not matrix:
return
self.m=m=matrix
self.maxi=len(m)
self.maxj=len(m[0])
self.BIT=[[0 for _ in range(self.maxj+1)] for _2 in range(self.maxi+1)]
self.temp=[[0 for _ in range(self.maxj+1)] for _2 in range(self.maxi+1)]
for i in range(1, self.maxi+1):
for j,num in enumerate(m[i-1]):
k=j+1
while k<=self.maxj:
self.temp[i][k]+=num
k+=k&-k # last bit
for j in range(1,self.maxj+1):
for i in range(1,self.maxi+1):
k=i
while k<=self.maxi:
self.BIT[k][j]+=self.temp[i][j]
k+=k&-k
# print self.temp
# print self.BIT
def update(self, row, col, val):
"""
update the element at matrix[row,col] to val.
:type row: int
:type col: int
:type val: int
:rtype: void
"""
val_change=val-self.m[row][col]
self.m[row][col]=val
i=row+1
while i<=self.maxi:
j=col+1
while j<=self.maxj:
self.BIT[i][j]+=val_change
j+=j&-j
i+=i&-i
# print self.BIT
def sumRegion(self, row1, col1, row2, col2):
"""
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
return self.sumRect(row2, col2)+self.sumRect(row1-1, col1-1)-self.sumRect(row1-1, col2)-self.sumRect(row2, col1-1)
def sumRect(self, row, col):
i,j,res=row+1,col+1,0
while i>0:
j=col+1
while j>0:
res+=self.BIT[i][j]
j-=j&-j
i-=i&-i
return res
# Your NumMatrix object will be instantiated and called as such:
# numMatrix = NumMatrix(matrix)
# numMatrix.sumRegion(0, 1, 2, 3)
# numMatrix.update(1, 1, 10)
# numMatrix.sumRegion(1, 2, 3, 4)
|
# Copyright 2015 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
from oslo_utils import versionutils
from oslo_versionedobjects import fields
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder.objects import base
from cinder.objects import fields as c_fields
from cinder import utils
LOG = logging.getLogger(__name__)
@base.CinderObjectRegistry.register
class Service(base.CinderPersistentObject, base.CinderObject,
base.CinderObjectDictCompat, base.CinderComparableObject,
base.ClusteredObject):
# Version 1.0: Initial version
# Version 1.1: Add rpc_current_version and object_current_version fields
# Version 1.2: Add get_minimum_rpc_version() and get_minimum_obj_version()
# Version 1.3: Add replication fields
# Version 1.4: Add cluster fields
# Version 1.5: Add UUID field
# Version 1.6: Modify UUID field to be not nullable
VERSION = '1.6'
OPTIONAL_FIELDS = ('cluster',)
fields = {
'id': fields.IntegerField(),
'host': fields.StringField(nullable=True),
'binary': fields.StringField(nullable=True),
'cluster_name': fields.StringField(nullable=True),
'cluster': fields.ObjectField('Cluster', nullable=True,
read_only=True),
'topic': fields.StringField(nullable=True),
'report_count': fields.IntegerField(default=0),
'disabled': fields.BooleanField(default=False, nullable=True),
'availability_zone': fields.StringField(nullable=True,
default='cinder'),
'disabled_reason': fields.StringField(nullable=True),
'modified_at': fields.DateTimeField(nullable=True),
'rpc_current_version': fields.StringField(nullable=True),
'object_current_version': fields.StringField(nullable=True),
# Replication properties
'replication_status': c_fields.ReplicationStatusField(nullable=True),
'frozen': fields.BooleanField(default=False),
'active_backend_id': fields.StringField(nullable=True),
'uuid': fields.StringField(),
}
def obj_make_compatible(self, primitive, target_version):
"""Make a service representation compatible with a target version."""
# Convert all related objects
super(Service, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
# Before v1.4 we didn't have cluster fields so we have to remove them.
if target_version < (1, 4):
for obj_field in ('cluster', 'cluster_name'):
primitive.pop(obj_field, None)
if target_version < (1, 5) and 'uuid' in primitive:
del primitive['uuid']
@staticmethod
def _from_db_object(context, service, db_service, expected_attrs=None):
expected_attrs = expected_attrs or []
for name, field in service.fields.items():
if ((name == 'uuid' and not db_service.get(name)) or
name in service.OPTIONAL_FIELDS):
continue
value = db_service.get(name)
if isinstance(field, fields.IntegerField):
value = value or 0
elif isinstance(field, fields.DateTimeField):
value = value or None
service[name] = value
service._context = context
if 'cluster' in expected_attrs:
db_cluster = db_service.get('cluster')
# If this service doesn't belong to a cluster the cluster field in
# the ORM instance will have value of None.
if db_cluster:
service.cluster = objects.Cluster(context)
objects.Cluster._from_db_object(context, service.cluster,
db_cluster)
else:
service.cluster = None
service.obj_reset_changes()
return service
def obj_load_attr(self, attrname):
if attrname not in self.OPTIONAL_FIELDS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
# NOTE(geguileo): We only have 1 optional field, so we don't need to
# confirm that we are loading the cluster.
# If this service doesn't belong to a cluster (cluster_name is empty),
# then cluster field will be None.
if self.cluster_name:
self.cluster = objects.Cluster.get_by_id(self._context, None,
name=self.cluster_name)
else:
self.cluster = None
self.obj_reset_changes(fields=(attrname,))
@classmethod
def get_by_host_and_topic(cls, context, host, topic, disabled=False):
db_service = db.service_get(context, disabled=disabled, host=host,
topic=topic)
return cls._from_db_object(context, cls(context), db_service)
@classmethod
def get_by_args(cls, context, host, binary_key):
db_service = db.service_get(context, host=host, binary=binary_key)
return cls._from_db_object(context, cls(context), db_service)
@classmethod
def get_by_uuid(cls, context, service_uuid):
db_service = db.service_get_by_uuid(context, service_uuid)
return cls._from_db_object(context, cls(), db_service)
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason=_('already created'))
updates = self.cinder_obj_get_changes()
if 'cluster' in updates:
raise exception.ObjectActionError(
action='create', reason=_('cluster assigned'))
if 'uuid' not in updates:
updates['uuid'] = uuidutils.generate_uuid()
self.uuid = updates['uuid']
db_service = db.service_create(self._context, updates)
self._from_db_object(self._context, self, db_service)
def save(self):
updates = self.cinder_obj_get_changes()
if 'cluster' in updates:
raise exception.ObjectActionError(
action='save', reason=_('cluster changed'))
if updates:
db.service_update(self._context, self.id, updates)
self.obj_reset_changes()
def destroy(self):
with self.obj_as_admin():
updated_values = db.service_destroy(self._context, self.id)
self.update(updated_values)
self.obj_reset_changes(updated_values.keys())
@classmethod
def _get_minimum_version(cls, attribute, context, binary):
services = ServiceList.get_all_by_binary(context, binary)
min_ver = None
min_ver_str = None
for s in services:
ver_str = getattr(s, attribute)
if ver_str is None:
# NOTE(dulek) None in *_current_version means that this
# service is in Liberty version, which we now don't provide
# backward compatibility to.
msg = _('Service %s is in Liberty version. We do not provide '
'backward compatibility with Liberty now, so you '
'need to upgrade it, release by release if live '
'upgrade is required. After upgrade you may need to '
'remove any stale service records via '
'"cinder-manage service remove".') % s.binary
raise exception.ServiceTooOld(msg)
ver = versionutils.convert_version_to_int(ver_str)
if min_ver is None or ver < min_ver:
min_ver = ver
min_ver_str = ver_str
return min_ver_str
@classmethod
def get_minimum_rpc_version(cls, context, binary):
return cls._get_minimum_version('rpc_current_version', context, binary)
@classmethod
def get_minimum_obj_version(cls, context, binary=None):
return cls._get_minimum_version('object_current_version', context,
binary)
@property
def is_up(self):
"""Check whether a service is up based on last heartbeat."""
return (self.updated_at and
self.updated_at >= utils.service_expired_time(True))
@base.CinderObjectRegistry.register
class ServiceList(base.ObjectListBase, base.CinderObject):
# Version 1.0: Initial version
# Version 1.1: Service object 1.2
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('Service'),
}
@classmethod
def get_all(cls, context, filters=None):
services = db.service_get_all(context, **(filters or {}))
return base.obj_make_list(context, cls(context), objects.Service,
services)
@classmethod
def get_all_by_topic(cls, context, topic, disabled=None):
services = db.service_get_all(context, topic=topic, disabled=disabled)
return base.obj_make_list(context, cls(context), objects.Service,
services)
@classmethod
def get_all_by_binary(cls, context, binary, disabled=None):
services = db.service_get_all(context, binary=binary,
disabled=disabled)
return base.obj_make_list(context, cls(context), objects.Service,
services)
|
import pygplates
import numpy as np
import geopandas as gpd
from shapely.geometry import LineString, Polygon
from .sphere import healpix_mesh
import os, tempfile
def create_gpml_crustal_thickness(longitude_array,latitude_array,thickness,filename=None):
multi_point = pygplates.MultiPointOnSphere(zip(latitude_array,longitude_array))
scalar_coverages = {
pygplates.ScalarType.create_gpml('CrustalThickness'): thickness}
ct_feature = pygplates.Feature()
ct_feature.set_geometry((multi_point,scalar_coverages))
ct_feature.set_name('Crustal Thickness')
output_feature_collection = pygplates.FeatureCollection(ct_feature)
if filename is not None:
output_feature_collection.write(filename)
else:
return output_feature_collection
def create_gpml_velocity_feature(longitude_array,latitude_array,filename=None,feature_type=None):
# function to make a velocity mesh nodes at an arbitrary set of points defined in Lat
# Long and Lat are assumed to be 1d arrays.
multi_point = pygplates.MultiPointOnSphere(zip(latitude_array,longitude_array))
# Create a feature containing the multipoint feature.
# optionally, define as 'MeshNode' type, so that GPlates will recognise it as a velocity layer
if feature_type=='MeshNode':
meshnode_feature = pygplates.Feature(pygplates.FeatureType.create_from_qualified_string('gpml:MeshNode'))
meshnode_feature.set_name('Velocity Mesh Nodes')
else:
meshnode_feature = pygplates.Feature()
meshnode_feature.set_name('Multipoint Feature')
meshnode_feature.set_geometry(multi_point)
output_feature_collection = pygplates.FeatureCollection(meshnode_feature)
if filename is not None:
output_feature_collection.write(filename)
else:
return output_feature_collection
def create_gpml_healpix_mesh(nSide,filename=None,feature_type=None):
# call the function to create a healpix array
longitude_array,latitude_array = healpix_mesh(nSide)
# call the function to create a multipoint feature, with user-defined type
output_feature_collection = create_gpml_velocity_feature(longitude_array,latitude_array,filename,feature_type)
if filename is not None: # This is superfluous, since file has already been written in previous line???
output_feature_collection.write(filename)
else:
return output_feature_collection
def create_gpml_regular_long_lat_mesh(Sampling=1,filename=None,feature_type=None):
# call the function to create a healpix array
longitude_array,latitude_array = np.meshgrid(np.arange(-180.,180.001,Sampling),np.arange(-90.,90.001,Sampling))
longitude_array = longitude_array.flatten()
latitude_array = latitude_array.flatten()
# call the function to create a multipoint feature, with user-defined type
output_feature_collection = create_gpml_velocity_feature(longitude_array,latitude_array,filename,feature_type)
if filename is not None:
output_feature_collection.write(filename)
else:
return output_feature_collection
def geometries_to_geodataframe(geometries, geometry_type='polygon'):
gdf = gpd.GeoDataFrame()
gdf['geometry'] = None
for i,geometry in enumerate(geometries):
if geometry_type in ['PolyLine','Polyline']:
poly = LineString([tuple(coord) for coord in np.fliplr(geometry)])
else:
poly = Polygon([tuple(coord) for coord in np.fliplr(geometry)])
gdf.loc[i, 'geometry'] = poly
return gdf
def geodataframe_to_geometries(gdf):
# from a geopandas geodataframe, return a list of the geometries (as type pygplates.GeometryOnSphere)
geometry_list = []
gdf = gdf.explode()
for i,row in gdf.iterrows():
geometry_list.append([(lat,lon) for lat,lon in zip(row.geometry.xy[1], row.geometry.xy[0])])
return geometry_list
def gdf2gpml(gdf):
"""
Given a geopandas geodataframe, returns a gplates feature collection
"""
temporary_file = tempfile.NamedTemporaryFile(delete=True, suffix='.geojson')
temporary_file.close()
gdf.to_file(temporary_file.name, driver='GeoJSON')
feature_collection = pygplates.FeatureCollection(temporary_file.name)
os.unlink(temporary_file.name)
return feature_collection
def gpml2gdf(features):
"""
Given a gplates feature collection, or a list of features, or a list
of reconstructed features, returns a geopandas geodataframe containing
the same features
"""
if isinstance(features, pygplates.FeatureCollection):
pass
elif isinstance(features, list):
if isinstance(features[0], pygplates.Feature):
features = pygplates.FeatureCollection(features)
elif isinstance(features[0], pygplates.ReconstructedFeatureGeometry):
features = _reconstructed_features_to_features(features)
features = pygplates.FeatureCollection(features)
else:
raise TypeError('Unexpected list item of type {:s} for gpml2gdf input'.format(type(features[0])))
else:
raise TypeError('Unexpected type {:s} for gpml2gdf input'.format(type(features)))
temporary_file = tempfile.NamedTemporaryFile(delete=True, suffix='.geojson')
temporary_file.close()
features.write(temporary_file.name)
gdf = gpd.read_file(temporary_file.name)
gdf['NAME'] = gdf['NAME'].astype(str)
os.unlink(temporary_file.name)
return gdf
def _reconstructed_features_to_features(reconstructed_features):
features = []
for feature in reconstructed_features:
f = feature.get_feature().clone()
f.set_geometry(feature.get_reconstructed_geometry())
features.append(f)
return features
|
from django.urls import path
from homework.views import presentation_views as views
urlpatterns = [
path("", views.get_presentations, name="presentations"),
path("add/", views.add_presentation, name="presentation-add"),
path("upload/", views.upload_image, name="image-upload"),
path("<str:pk>/update/", views.update_presentation, name="presentation-update"),
path("<str:pk>/delete/", views.delete_presentation, name="presentation-delete"),
]
|
import torch
from torch.utils._pytree import tree_map
from functools import partial
from torch.fx.operator_schemas import normalize_function
from torch.utils._mode_utils import no_dispatch
from torch._subclasses.meta_utils import MetaConverter
from typing import Union
from torch._ops import OpOverload
from torch.utils._python_dispatch import TorchDispatchMode
import functools
aten = torch.ops.aten
_device_not_kwarg_ops = (
aten._resize_output_.default,
aten.nested_tensor.default,
aten.pin_memory.default,
aten.is_pinned.default,
aten.to.device,
aten.to.prim_Device,
aten._pin_memory.default,
aten._resize_output.functional,
aten._resize_output.out,
)
# this op is never actually used
_non_kwarg_device_constructors = (torch.ops.aten._list_to_tensor,)
def contains_tensor_types(type):
tensor_type = torch._C.TensorType.get()
return type.isSubtypeOf(tensor_type) or any(
contains_tensor_types(e) for e in type.containedTypes()
)
@functools.lru_cache(None)
def _is_tensor_constructor(func: OpOverload):
assert isinstance(func, OpOverload)
schema = func._schema
if any(contains_tensor_types(arg.type) for arg in schema.arguments):
return False
# TODO: no real reason to restrict multiple outputs
return (
len(schema.returns) == 1 and schema.returns[0].type is torch._C.TensorType.get()
)
# Similar to `MetaConverter`, this is a class for converting
# multiple tensors into fake tensors which share the same view/storage
# structure. Like `MetaConverter`, it will keep alive all
# tensors that are converted to FakeTensors.
class FakeTensorConverter(MetaConverter):
def __init__(self):
self.tensor_memo = {}
self.meta_converter = MetaConverter()
def from_real_tensor(self, t):
existing_device = t.device
self.tensor_memo[t] = FakeTensor(self.meta_converter(t), existing_device)
return self.tensor_memo[t]
def from_meta_and_device(self, t, device):
if t in self.tensor_memo:
return self.tensor_memo[t]
self.tensor_memo[t] = FakeTensor(t, device)
return self.tensor_memo[t]
def __call__(self, t, device=None):
assert t.device.type != 'meta' or device is not None
if t in self.tensor_memo:
return self.tensor_memo[t]
elif t.device.type != 'meta':
return self.from_real_tensor(t)
else:
return self.from_meta_and_device(t, device)
# Meta tensors give you the ability to run PyTorch code without having to
# actually do computation through tensors allocated on a `meta` device.
# Because the device is `meta`, meta tensors do not model device propagation.
# FakeTensor extends MetaTensors to also carry an additional `fake_device`
# which tracks devices that would have been used.
def torch_dispatch_impl(cls_or_mode_instance, func, types, args, kwargs, run_function):
kwargs = kwargs if kwargs else {}
in_fake_mode = isinstance(cls_or_mode_instance, FakeTensorMode)
converter = cls_or_mode_instance.fake_tensor_converter if in_fake_mode else FakeTensorConverter()
# This classes virtualizes .device() calls, need to short-circuit
# it instead of calling device again or we would keep on recurring
if func == torch.ops.prim.device.default:
assert len(args) == 1 and isinstance(args[0], FakeTensor)
return args[0].fake_device
def wrap(e, device=None):
if isinstance(e, torch.Tensor) and not isinstance(e, FakeTensor):
return converter(e, device)
else:
return e
# if we are in the dispatch mode, we will enter this function even if the inputs
# are not FakeTensors. For now, throw if any non-Fake Tensor inputs
# and just support constructors. TODO: extend more broadly
if isinstance(cls_or_mode_instance, FakeTensorMode):
conversion_made = False
def check_non_fake_tensor(x):
nonlocal conversion_made
conversion_made = conversion_made or (isinstance(x, torch.Tensor) and not isinstance(x, FakeTensor))
tree_map(check_non_fake_tensor, args)
tree_map(check_non_fake_tensor, kwargs)
if conversion_made:
raise Exception(
"Invoking operators with non-Fake Tensor inputs in FakeTensorMode is not yet supported. "
f"Please convert all Tensors to FakeTensors first. Found in {func}"
)
# _to_copy fails when run with FakeTensors to cuda device
# TODO: debug
if func == torch.ops.aten._to_copy.default:
_, new_kwargs = normalize_function(
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
out_device = new_kwargs.pop("device", new_kwargs["input"].device)
with no_dispatch():
input = new_kwargs.pop("input").to("meta")
return FakeTensor(
torch.ops.aten._to_copy(input, **new_kwargs), out_device
)
if _is_tensor_constructor(func):
assert func not in _non_kwarg_device_constructors
_, new_kwargs = normalize_function(
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
# cpu is default device if none is specified
out_device = new_kwargs.pop("device", torch.device("cpu"))
new_kwargs["device"] = torch.device("meta")
r = run_function(func, types, (), new_kwargs)
return FakeTensor(r, out_device)
r = run_function(func, types, args, kwargs)
# TODO: handle non-kwarg devices
assert func not in _device_not_kwarg_ops, f"NYI: {func}"
# if device is specified, use that
if kwargs.get("device", None):
return tree_map(partial(wrap, device=kwargs["device"]), r)
# operators which copy size from another tensor do not
# also take device from the size tensor
# other size_as operators are not builtin operators
if func == aten.resize_as_.default:
_, new_kwargs = normalize_function(
func, args=args, kwargs=kwargs, normalize_to_only_use_kwargs=True
)
# device of the input is returned
return tree_map(partial(wrap, device=new_kwargs["input"].device), r)
common_device = FakeTensor._find_common_device(func, args, kwargs)
return tree_map(partial(wrap, device=common_device), r)
class FakeTensor(torch.Tensor):
fake_device: torch.device
@staticmethod
def __new__(cls, elem, device):
return torch.Tensor._make_subclass(
cls, elem, elem.requires_grad, dispatch_device=True
)
def __init__(self, elem, device: Union[torch.device, str]):
# elem does not need to be recorded, because FakeTensor *is a* elem
assert elem.device.type == "meta"
device = device if isinstance(device, torch.device) else torch.device(device)
assert device.type != "meta"
self.fake_device = device
@staticmethod
def from_tensor(t):
existing_device = t.device
return FakeTensor(t.to(device="meta"), existing_device)
# TODO: resolve error in default __repr__
def __repr__(self):
return f"FakeTensor({self.fake_device})"
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def run_fn(func, types, args, kwargs):
return torch.Tensor.__torch_dispatch__(func, types, args, kwargs)
return torch_dispatch_impl(cls, func, types, args, kwargs, run_fn)
@staticmethod
def _find_common_device(func, args, kwargs):
# cpu - zero-dim tensors can be called in cuda kernels,
# so overwrite the common_device if it the only existing
# device comes from a cpu zero-dim tensor
common_device = None
is_cpu_zero_dim = None
def cpu_zero_dim(t):
return t.device.type == "cpu" and t.dim() == 0
def merge_devices(t):
nonlocal common_device
nonlocal is_cpu_zero_dim
if not isinstance(t, FakeTensor):
return
if common_device is None:
common_device = t.device
is_cpu_zero_dim = cpu_zero_dim(t)
return
t_is_cpu_zero_dim = cpu_zero_dim(t)
if t.device == common_device:
if is_cpu_zero_dim:
is_cpu_zero_dim = t_is_cpu_zero_dim
return
# mismatching devices !
# if current tensor is cpu 0 dim, defer to existing device
if t_is_cpu_zero_dim:
return
# current device is from cpu 0 dim tensor, overwrite
if is_cpu_zero_dim:
common_device = t.device
is_cpu_zero_dim = t_is_cpu_zero_dim
return
# mismatching devices of non-zero dim tensors, throw
# This might be valid behavior and need to be explicitly modeled, e.g. reshape_as
raise Exception(
f"Unhandled FakeTensor Device Propagation for {func}, found two different devices {common_device}, {t.device}"
)
tree_map(merge_devices, args)
tree_map(merge_devices, kwargs)
assert common_device is not None, f"Could not find common device for {func}"
return common_device
__torch_function__ = torch._C._disabled_torch_function_impl
# We keep one instantiation of `fake_tensor_converter` active
# for the duration of `with torch_enable_mode(FakeTensorMode)`.
# This allows accurate storage aliasing across invocation of
# different operators. While this will keep all freshly allocated
# tensors alive during `FakeTensorMode`, there will no be no
# new allocations of Tensors which have non-meta storage so
# memory should not significantly incraese.
class FakeTensorMode(TorchDispatchMode):
def __init__(self):
self.fake_tensor_converter = FakeTensorConverter()
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
def run_fn(func, types, args, kwargs):
return func(*args, **kwargs)
return torch_dispatch_impl(self, func, types, args, kwargs, run_fn)
|
# Generated from grammar/Java8Parser.g4 by ANTLR 4.7.1
from antlr4 import *
if __name__ is not None and "." in __name__:
from .Java8Parser import Java8Parser
else:
from gras.file_dependency.java.grammar_v8.Java8Parser import Java8Parser
# This class defines a complete generic visitor for a parse tree produced by Java8Parser.
class Java8ParserVisitor(ParseTreeVisitor):
# Visit a parse tree produced by Java8Parser#literal.
def visitLiteral(self, ctx: Java8Parser.LiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#primitiveType.
def visitPrimitiveType(self, ctx: Java8Parser.PrimitiveTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#numericType.
def visitNumericType(self, ctx: Java8Parser.NumericTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#integralType.
def visitIntegralType(self, ctx: Java8Parser.IntegralTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#floatingPointType.
def visitFloatingPointType(self, ctx: Java8Parser.FloatingPointTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#referenceType.
def visitReferenceType(self, ctx: Java8Parser.ReferenceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classOrInterfaceType.
def visitClassOrInterfaceType(self, ctx: Java8Parser.ClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classType.
def visitClassType(self, ctx: Java8Parser.ClassTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classType_lf_classOrInterfaceType.
def visitClassType_lf_classOrInterfaceType(self, ctx: Java8Parser.ClassType_lf_classOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classType_lfno_classOrInterfaceType.
def visitClassType_lfno_classOrInterfaceType(self, ctx: Java8Parser.ClassType_lfno_classOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceType.
def visitInterfaceType(self, ctx: Java8Parser.InterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceType_lf_classOrInterfaceType.
def visitInterfaceType_lf_classOrInterfaceType(self, ctx: Java8Parser.InterfaceType_lf_classOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceType_lfno_classOrInterfaceType.
def visitInterfaceType_lfno_classOrInterfaceType(self,
ctx: Java8Parser.InterfaceType_lfno_classOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeVariable.
def visitTypeVariable(self, ctx: Java8Parser.TypeVariableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#arrayType.
def visitArrayType(self, ctx: Java8Parser.ArrayTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#dims.
def visitDims(self, ctx: Java8Parser.DimsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeParameter.
def visitTypeParameter(self, ctx: Java8Parser.TypeParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeParameterModifier.
def visitTypeParameterModifier(self, ctx: Java8Parser.TypeParameterModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeBound.
def visitTypeBound(self, ctx: Java8Parser.TypeBoundContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#additionalBound.
def visitAdditionalBound(self, ctx: Java8Parser.AdditionalBoundContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeArguments.
def visitTypeArguments(self, ctx: Java8Parser.TypeArgumentsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeArgumentList.
def visitTypeArgumentList(self, ctx: Java8Parser.TypeArgumentListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeArgument.
def visitTypeArgument(self, ctx: Java8Parser.TypeArgumentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#wildcard.
def visitWildcard(self, ctx: Java8Parser.WildcardContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#wildcardBounds.
def visitWildcardBounds(self, ctx: Java8Parser.WildcardBoundsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#packageName.
def visitPackageName(self, ctx: Java8Parser.PackageNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeName.
def visitTypeName(self, ctx: Java8Parser.TypeNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#packageOrTypeName.
def visitPackageOrTypeName(self, ctx: Java8Parser.PackageOrTypeNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#expressionName.
def visitExpressionName(self, ctx: Java8Parser.ExpressionNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodName.
def visitMethodName(self, ctx: Java8Parser.MethodNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#ambiguousName.
def visitAmbiguousName(self, ctx: Java8Parser.AmbiguousNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#compilationUnit.
def visitCompilationUnit(self, ctx: Java8Parser.CompilationUnitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#packageDeclaration.
def visitPackageDeclaration(self, ctx: Java8Parser.PackageDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#packageModifier.
def visitPackageModifier(self, ctx: Java8Parser.PackageModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#importDeclaration.
def visitImportDeclaration(self, ctx: Java8Parser.ImportDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#singleTypeImportDeclaration.
def visitSingleTypeImportDeclaration(self, ctx: Java8Parser.SingleTypeImportDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeImportOnDemandDeclaration.
def visitTypeImportOnDemandDeclaration(self, ctx: Java8Parser.TypeImportOnDemandDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#singleStaticImportDeclaration.
def visitSingleStaticImportDeclaration(self, ctx: Java8Parser.SingleStaticImportDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#staticImportOnDemandDeclaration.
def visitStaticImportOnDemandDeclaration(self, ctx: Java8Parser.StaticImportOnDemandDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeDeclaration.
def visitTypeDeclaration(self, ctx: Java8Parser.TypeDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classDeclaration.
def visitClassDeclaration(self, ctx: Java8Parser.ClassDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#normalClassDeclaration.
def visitNormalClassDeclaration(self, ctx: Java8Parser.NormalClassDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classModifier.
def visitClassModifier(self, ctx: Java8Parser.ClassModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeParameters.
def visitTypeParameters(self, ctx: Java8Parser.TypeParametersContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeParameterList.
def visitTypeParameterList(self, ctx: Java8Parser.TypeParameterListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#superclass.
def visitSuperclass(self, ctx: Java8Parser.SuperclassContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#superinterfaces.
def visitSuperinterfaces(self, ctx: Java8Parser.SuperinterfacesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceTypeList.
def visitInterfaceTypeList(self, ctx: Java8Parser.InterfaceTypeListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classBody.
def visitClassBody(self, ctx: Java8Parser.ClassBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classBodyDeclaration.
def visitClassBodyDeclaration(self, ctx: Java8Parser.ClassBodyDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classMemberDeclaration.
def visitClassMemberDeclaration(self, ctx: Java8Parser.ClassMemberDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#fieldDeclaration.
def visitFieldDeclaration(self, ctx: Java8Parser.FieldDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#fieldModifier.
def visitFieldModifier(self, ctx: Java8Parser.FieldModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#variableDeclaratorList.
def visitVariableDeclaratorList(self, ctx: Java8Parser.VariableDeclaratorListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#variableDeclarator.
def visitVariableDeclarator(self, ctx: Java8Parser.VariableDeclaratorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#variableDeclaratorId.
def visitVariableDeclaratorId(self, ctx: Java8Parser.VariableDeclaratorIdContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#variableInitializer.
def visitVariableInitializer(self, ctx: Java8Parser.VariableInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannType.
def visitUnannType(self, ctx: Java8Parser.UnannTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannPrimitiveType.
def visitUnannPrimitiveType(self, ctx: Java8Parser.UnannPrimitiveTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannReferenceType.
def visitUnannReferenceType(self, ctx: Java8Parser.UnannReferenceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannClassOrInterfaceType.
def visitUnannClassOrInterfaceType(self, ctx: Java8Parser.UnannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannClassType.
def visitUnannClassType(self, ctx: Java8Parser.UnannClassTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannClassType_lf_unannClassOrInterfaceType.
def visitUnannClassType_lf_unannClassOrInterfaceType(self,
ctx:
Java8Parser.UnannClassType_lf_unannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannClassType_lfno_unannClassOrInterfaceType.
def visitUnannClassType_lfno_unannClassOrInterfaceType(self,
ctx:
Java8Parser.UnannClassType_lfno_unannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannInterfaceType.
def visitUnannInterfaceType(self, ctx: Java8Parser.UnannInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannInterfaceType_lf_unannClassOrInterfaceType.
def visitUnannInterfaceType_lf_unannClassOrInterfaceType(self,
ctx:
Java8Parser.UnannInterfaceType_lf_unannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannInterfaceType_lfno_unannClassOrInterfaceType.
def visitUnannInterfaceType_lfno_unannClassOrInterfaceType(self,
ctx:
Java8Parser.UnannInterfaceType_lfno_unannClassOrInterfaceTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannTypeVariable.
def visitUnannTypeVariable(self, ctx: Java8Parser.UnannTypeVariableContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unannArrayType.
def visitUnannArrayType(self, ctx: Java8Parser.UnannArrayTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodDeclaration.
def visitMethodDeclaration(self, ctx: Java8Parser.MethodDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodModifier.
def visitMethodModifier(self, ctx: Java8Parser.MethodModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodHeader.
def visitMethodHeader(self, ctx: Java8Parser.MethodHeaderContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#result.
def visitResult(self, ctx: Java8Parser.ResultContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodDeclarator.
def visitMethodDeclarator(self, ctx: Java8Parser.MethodDeclaratorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#formalParameterList.
def visitFormalParameterList(self, ctx: Java8Parser.FormalParameterListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#formalParameters.
def visitFormalParameters(self, ctx: Java8Parser.FormalParametersContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#formalParameter.
def visitFormalParameter(self, ctx: Java8Parser.FormalParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#variableModifier.
def visitVariableModifier(self, ctx: Java8Parser.VariableModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#lastFormalParameter.
def visitLastFormalParameter(self, ctx: Java8Parser.LastFormalParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#receiverParameter.
def visitReceiverParameter(self, ctx: Java8Parser.ReceiverParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#throws_.
def visitThrows_(self, ctx: Java8Parser.Throws_Context):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#exceptionTypeList.
def visitExceptionTypeList(self, ctx: Java8Parser.ExceptionTypeListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#exceptionType.
def visitExceptionType(self, ctx: Java8Parser.ExceptionTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodBody.
def visitMethodBody(self, ctx: Java8Parser.MethodBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#instanceInitializer.
def visitInstanceInitializer(self, ctx: Java8Parser.InstanceInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#staticInitializer.
def visitStaticInitializer(self, ctx: Java8Parser.StaticInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#constructorDeclaration.
def visitConstructorDeclaration(self, ctx: Java8Parser.ConstructorDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#constructorModifier.
def visitConstructorModifier(self, ctx: Java8Parser.ConstructorModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#constructorDeclarator.
def visitConstructorDeclarator(self, ctx: Java8Parser.ConstructorDeclaratorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#simpleTypeName.
def visitSimpleTypeName(self, ctx: Java8Parser.SimpleTypeNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#constructorBody.
def visitConstructorBody(self, ctx: Java8Parser.ConstructorBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#explicitConstructorInvocation.
def visitExplicitConstructorInvocation(self, ctx: Java8Parser.ExplicitConstructorInvocationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#enumDeclaration.
def visitEnumDeclaration(self, ctx: Java8Parser.EnumDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#enumBody.
def visitEnumBody(self, ctx: Java8Parser.EnumBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#enumConstantList.
def visitEnumConstantList(self, ctx: Java8Parser.EnumConstantListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#enumConstant.
def visitEnumConstant(self, ctx: Java8Parser.EnumConstantContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#enumConstantModifier.
def visitEnumConstantModifier(self, ctx: Java8Parser.EnumConstantModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#enumBodyDeclarations.
def visitEnumBodyDeclarations(self, ctx: Java8Parser.EnumBodyDeclarationsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceDeclaration.
def visitInterfaceDeclaration(self, ctx: Java8Parser.InterfaceDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#normalInterfaceDeclaration.
def visitNormalInterfaceDeclaration(self, ctx: Java8Parser.NormalInterfaceDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceModifier.
def visitInterfaceModifier(self, ctx: Java8Parser.InterfaceModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#extendsInterfaces.
def visitExtendsInterfaces(self, ctx: Java8Parser.ExtendsInterfacesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceBody.
def visitInterfaceBody(self, ctx: Java8Parser.InterfaceBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceMemberDeclaration.
def visitInterfaceMemberDeclaration(self, ctx: Java8Parser.InterfaceMemberDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#constantDeclaration.
def visitConstantDeclaration(self, ctx: Java8Parser.ConstantDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#constantModifier.
def visitConstantModifier(self, ctx: Java8Parser.ConstantModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceMethodDeclaration.
def visitInterfaceMethodDeclaration(self, ctx: Java8Parser.InterfaceMethodDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#interfaceMethodModifier.
def visitInterfaceMethodModifier(self, ctx: Java8Parser.InterfaceMethodModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#annotationTypeDeclaration.
def visitAnnotationTypeDeclaration(self, ctx: Java8Parser.AnnotationTypeDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#annotationTypeBody.
def visitAnnotationTypeBody(self, ctx: Java8Parser.AnnotationTypeBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#annotationTypeMemberDeclaration.
def visitAnnotationTypeMemberDeclaration(self, ctx: Java8Parser.AnnotationTypeMemberDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#annotationTypeElementDeclaration.
def visitAnnotationTypeElementDeclaration(self, ctx: Java8Parser.AnnotationTypeElementDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#annotationTypeElementModifier.
def visitAnnotationTypeElementModifier(self, ctx: Java8Parser.AnnotationTypeElementModifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#defaultValue.
def visitDefaultValue(self, ctx: Java8Parser.DefaultValueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#annotation.
def visitAnnotation(self, ctx: Java8Parser.AnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#normalAnnotation.
def visitNormalAnnotation(self, ctx: Java8Parser.NormalAnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#elementValuePairList.
def visitElementValuePairList(self, ctx: Java8Parser.ElementValuePairListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#elementValuePair.
def visitElementValuePair(self, ctx: Java8Parser.ElementValuePairContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#elementValue.
def visitElementValue(self, ctx: Java8Parser.ElementValueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#elementValueArrayInitializer.
def visitElementValueArrayInitializer(self, ctx: Java8Parser.ElementValueArrayInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#elementValueList.
def visitElementValueList(self, ctx: Java8Parser.ElementValueListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#markerAnnotation.
def visitMarkerAnnotation(self, ctx: Java8Parser.MarkerAnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#singleElementAnnotation.
def visitSingleElementAnnotation(self, ctx: Java8Parser.SingleElementAnnotationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#arrayInitializer.
def visitArrayInitializer(self, ctx: Java8Parser.ArrayInitializerContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#variableInitializerList.
def visitVariableInitializerList(self, ctx: Java8Parser.VariableInitializerListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#block.
def visitBlock(self, ctx: Java8Parser.BlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#blockStatements.
def visitBlockStatements(self, ctx: Java8Parser.BlockStatementsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#blockStatement.
def visitBlockStatement(self, ctx: Java8Parser.BlockStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#localVariableDeclarationStatement.
def visitLocalVariableDeclarationStatement(self, ctx: Java8Parser.LocalVariableDeclarationStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#localVariableDeclaration.
def visitLocalVariableDeclaration(self, ctx: Java8Parser.LocalVariableDeclarationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#statement.
def visitStatement(self, ctx: Java8Parser.StatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#statementNoShortIf.
def visitStatementNoShortIf(self, ctx: Java8Parser.StatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#statementWithoutTrailingSubstatement.
def visitStatementWithoutTrailingSubstatement(self, ctx: Java8Parser.StatementWithoutTrailingSubstatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#emptyStatement.
def visitEmptyStatement(self, ctx: Java8Parser.EmptyStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#labeledStatement.
def visitLabeledStatement(self, ctx: Java8Parser.LabeledStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#labeledStatementNoShortIf.
def visitLabeledStatementNoShortIf(self, ctx: Java8Parser.LabeledStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#expressionStatement.
def visitExpressionStatement(self, ctx: Java8Parser.ExpressionStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#statementExpression.
def visitStatementExpression(self, ctx: Java8Parser.StatementExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#ifThenStatement.
def visitIfThenStatement(self, ctx: Java8Parser.IfThenStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#ifThenElseStatement.
def visitIfThenElseStatement(self, ctx: Java8Parser.IfThenElseStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#ifThenElseStatementNoShortIf.
def visitIfThenElseStatementNoShortIf(self, ctx: Java8Parser.IfThenElseStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#assertStatement.
def visitAssertStatement(self, ctx: Java8Parser.AssertStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#switchStatement.
def visitSwitchStatement(self, ctx: Java8Parser.SwitchStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#switchBlock.
def visitSwitchBlock(self, ctx: Java8Parser.SwitchBlockContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#switchBlockStatementGroup.
def visitSwitchBlockStatementGroup(self, ctx: Java8Parser.SwitchBlockStatementGroupContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#switchLabels.
def visitSwitchLabels(self, ctx: Java8Parser.SwitchLabelsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#switchLabel.
def visitSwitchLabel(self, ctx: Java8Parser.SwitchLabelContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#enumConstantName.
def visitEnumConstantName(self, ctx: Java8Parser.EnumConstantNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#whileStatement.
def visitWhileStatement(self, ctx: Java8Parser.WhileStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#whileStatementNoShortIf.
def visitWhileStatementNoShortIf(self, ctx: Java8Parser.WhileStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#doStatement.
def visitDoStatement(self, ctx: Java8Parser.DoStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#forStatement.
def visitForStatement(self, ctx: Java8Parser.ForStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#forStatementNoShortIf.
def visitForStatementNoShortIf(self, ctx: Java8Parser.ForStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#basicForStatement.
def visitBasicForStatement(self, ctx: Java8Parser.BasicForStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#basicForStatementNoShortIf.
def visitBasicForStatementNoShortIf(self, ctx: Java8Parser.BasicForStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#forInit.
def visitForInit(self, ctx: Java8Parser.ForInitContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#forUpdate.
def visitForUpdate(self, ctx: Java8Parser.ForUpdateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#statementExpressionList.
def visitStatementExpressionList(self, ctx: Java8Parser.StatementExpressionListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#enhancedForStatement.
def visitEnhancedForStatement(self, ctx: Java8Parser.EnhancedForStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#enhancedForStatementNoShortIf.
def visitEnhancedForStatementNoShortIf(self, ctx: Java8Parser.EnhancedForStatementNoShortIfContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#breakStatement.
def visitBreakStatement(self, ctx: Java8Parser.BreakStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#continueStatement.
def visitContinueStatement(self, ctx: Java8Parser.ContinueStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#returnStatement.
def visitReturnStatement(self, ctx: Java8Parser.ReturnStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#throwStatement.
def visitThrowStatement(self, ctx: Java8Parser.ThrowStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#synchronizedStatement.
def visitSynchronizedStatement(self, ctx: Java8Parser.SynchronizedStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#tryStatement.
def visitTryStatement(self, ctx: Java8Parser.TryStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#catches.
def visitCatches(self, ctx: Java8Parser.CatchesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#catchClause.
def visitCatchClause(self, ctx: Java8Parser.CatchClauseContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#catchFormalParameter.
def visitCatchFormalParameter(self, ctx: Java8Parser.CatchFormalParameterContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#catchType.
def visitCatchType(self, ctx: Java8Parser.CatchTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#finally_.
def visitFinally_(self, ctx: Java8Parser.Finally_Context):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#tryWithResourcesStatement.
def visitTryWithResourcesStatement(self, ctx: Java8Parser.TryWithResourcesStatementContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#resourceSpecification.
def visitResourceSpecification(self, ctx: Java8Parser.ResourceSpecificationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#resourceList.
def visitResourceList(self, ctx: Java8Parser.ResourceListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#resource.
def visitResource(self, ctx: Java8Parser.ResourceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#primary.
def visitPrimary(self, ctx: Java8Parser.PrimaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#primaryNoNewArray.
def visitPrimaryNoNewArray(self, ctx: Java8Parser.PrimaryNoNewArrayContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#primaryNoNewArray_lf_arrayAccess.
def visitPrimaryNoNewArray_lf_arrayAccess(self, ctx: Java8Parser.PrimaryNoNewArray_lf_arrayAccessContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#primaryNoNewArray_lfno_arrayAccess.
def visitPrimaryNoNewArray_lfno_arrayAccess(self, ctx: Java8Parser.PrimaryNoNewArray_lfno_arrayAccessContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#primaryNoNewArray_lf_primary.
def visitPrimaryNoNewArray_lf_primary(self, ctx: Java8Parser.PrimaryNoNewArray_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#primaryNoNewArray_lf_primary_lf_arrayAccess_lf_primary.
def visitPrimaryNoNewArray_lf_primary_lf_arrayAccess_lf_primary(self,
ctx:
Java8Parser.PrimaryNoNewArray_lf_primary_lf_arrayAccess_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#primaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primary.
def visitPrimaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primary(self,
ctx:
Java8Parser.PrimaryNoNewArray_lf_primary_lfno_arrayAccess_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#primaryNoNewArray_lfno_primary.
def visitPrimaryNoNewArray_lfno_primary(self, ctx: Java8Parser.PrimaryNoNewArray_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#primaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primary.
def visitPrimaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primary(self,
ctx:
Java8Parser.PrimaryNoNewArray_lfno_primary_lf_arrayAccess_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#primaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primary.
def visitPrimaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primary(self,
ctx:
Java8Parser.PrimaryNoNewArray_lfno_primary_lfno_arrayAccess_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classInstanceCreationExpression.
def visitClassInstanceCreationExpression(self, ctx: Java8Parser.ClassInstanceCreationExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classInstanceCreationExpression_lf_primary.
def visitClassInstanceCreationExpression_lf_primary(self,
ctx:
Java8Parser.ClassInstanceCreationExpression_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#classInstanceCreationExpression_lfno_primary.
def visitClassInstanceCreationExpression_lfno_primary(self,
ctx:
Java8Parser.ClassInstanceCreationExpression_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#typeArgumentsOrDiamond.
def visitTypeArgumentsOrDiamond(self, ctx: Java8Parser.TypeArgumentsOrDiamondContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#fieldAccess.
def visitFieldAccess(self, ctx: Java8Parser.FieldAccessContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#fieldAccess_lf_primary.
def visitFieldAccess_lf_primary(self, ctx: Java8Parser.FieldAccess_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#fieldAccess_lfno_primary.
def visitFieldAccess_lfno_primary(self, ctx: Java8Parser.FieldAccess_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#arrayAccess.
def visitArrayAccess(self, ctx: Java8Parser.ArrayAccessContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#arrayAccess_lf_primary.
def visitArrayAccess_lf_primary(self, ctx: Java8Parser.ArrayAccess_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#arrayAccess_lfno_primary.
def visitArrayAccess_lfno_primary(self, ctx: Java8Parser.ArrayAccess_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodInvocation.
def visitMethodInvocation(self, ctx: Java8Parser.MethodInvocationContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodInvocation_lf_primary.
def visitMethodInvocation_lf_primary(self, ctx: Java8Parser.MethodInvocation_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodInvocation_lfno_primary.
def visitMethodInvocation_lfno_primary(self, ctx: Java8Parser.MethodInvocation_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#argumentList.
def visitArgumentList(self, ctx: Java8Parser.ArgumentListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodReference.
def visitMethodReference(self, ctx: Java8Parser.MethodReferenceContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodReference_lf_primary.
def visitMethodReference_lf_primary(self, ctx: Java8Parser.MethodReference_lf_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#methodReference_lfno_primary.
def visitMethodReference_lfno_primary(self, ctx: Java8Parser.MethodReference_lfno_primaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#arrayCreationExpression.
def visitArrayCreationExpression(self, ctx: Java8Parser.ArrayCreationExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#dimExprs.
def visitDimExprs(self, ctx: Java8Parser.DimExprsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#dimExpr.
def visitDimExpr(self, ctx: Java8Parser.DimExprContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#constantExpression.
def visitConstantExpression(self, ctx: Java8Parser.ConstantExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#expression.
def visitExpression(self, ctx: Java8Parser.ExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#lambdaExpression.
def visitLambdaExpression(self, ctx: Java8Parser.LambdaExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#lambdaParameters.
def visitLambdaParameters(self, ctx: Java8Parser.LambdaParametersContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#inferredFormalParameterList.
def visitInferredFormalParameterList(self, ctx: Java8Parser.InferredFormalParameterListContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#lambdaBody.
def visitLambdaBody(self, ctx: Java8Parser.LambdaBodyContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#assignmentExpression.
def visitAssignmentExpression(self, ctx: Java8Parser.AssignmentExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#assignment.
def visitAssignment(self, ctx: Java8Parser.AssignmentContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#leftHandSide.
def visitLeftHandSide(self, ctx: Java8Parser.LeftHandSideContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#assignmentOperator.
def visitAssignmentOperator(self, ctx: Java8Parser.AssignmentOperatorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#conditionalExpression.
def visitConditionalExpression(self, ctx: Java8Parser.ConditionalExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#conditionalOrExpression.
def visitConditionalOrExpression(self, ctx: Java8Parser.ConditionalOrExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#conditionalAndExpression.
def visitConditionalAndExpression(self, ctx: Java8Parser.ConditionalAndExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#inclusiveOrExpression.
def visitInclusiveOrExpression(self, ctx: Java8Parser.InclusiveOrExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#exclusiveOrExpression.
def visitExclusiveOrExpression(self, ctx: Java8Parser.ExclusiveOrExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#andExpression.
def visitAndExpression(self, ctx: Java8Parser.AndExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#equalityExpression.
def visitEqualityExpression(self, ctx: Java8Parser.EqualityExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#relationalExpression.
def visitRelationalExpression(self, ctx: Java8Parser.RelationalExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#shiftExpression.
def visitShiftExpression(self, ctx: Java8Parser.ShiftExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#additiveExpression.
def visitAdditiveExpression(self, ctx: Java8Parser.AdditiveExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#multiplicativeExpression.
def visitMultiplicativeExpression(self, ctx: Java8Parser.MultiplicativeExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unaryExpression.
def visitUnaryExpression(self, ctx: Java8Parser.UnaryExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#preIncrementExpression.
def visitPreIncrementExpression(self, ctx: Java8Parser.PreIncrementExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#preDecrementExpression.
def visitPreDecrementExpression(self, ctx: Java8Parser.PreDecrementExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#unaryExpressionNotPlusMinus.
def visitUnaryExpressionNotPlusMinus(self, ctx: Java8Parser.UnaryExpressionNotPlusMinusContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#postfixExpression.
def visitPostfixExpression(self, ctx: Java8Parser.PostfixExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#postIncrementExpression.
def visitPostIncrementExpression(self, ctx: Java8Parser.PostIncrementExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#postIncrementExpression_lf_postfixExpression.
def visitPostIncrementExpression_lf_postfixExpression(self,
ctx:
Java8Parser.PostIncrementExpression_lf_postfixExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#postDecrementExpression.
def visitPostDecrementExpression(self, ctx: Java8Parser.PostDecrementExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#postDecrementExpression_lf_postfixExpression.
def visitPostDecrementExpression_lf_postfixExpression(self,
ctx:
Java8Parser.PostDecrementExpression_lf_postfixExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by Java8Parser#castExpression.
def visitCastExpression(self, ctx: Java8Parser.CastExpressionContext):
return self.visitChildren(ctx)
del Java8Parser
|
#
# Created on Mon Jun 14 2021
#
# The MIT License (MIT)
# Copyright (c) 2021 Vishnu Suresh
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import os
import sys
from JAVS_Util.exceptions import JAVSError, StringNotCompleteError
from JAVS_Util.preCheckFile import PreCheckFile
from JAVS_Util.tokenize import Tokenize
from JAVS_Util.globalTape import JAVSGlobalTape
from JAVS_Util.machine import Machine
from Env.arithmeticEnv import ArithmeticEnv
# def betaCheckString(tokens):
# stringFlag=False
# stringVariable=""
# for word in tokens:
# if "'" == word:
# print("got ' ")
# stringFlag=not stringFlag
# elif "'" in word:
# print("got a word with ' ",word[1:])
# stringFlag=not stringFlag
# if stringFlag:
# stringVariable=word[1:]
# else:
# print("String Variable :- ",stringVariable)
# else:
# if stringFlag:
# stringVariable=f'{stringVariable} {word}'
# else:
# print("String Variable :- ",stringVariable)
# print(word)
# if stringFlag:
# raise StringNotCompleteError()
def main(path, print_log=False, generate_Python_code=False):
try:
input_string = PreCheckFile.load(path)
Tokenize.initNLTK()
tokenize = Tokenize.make(input_string)
if print_log:
print("Tokenized Input :- ", tokenize)
# betaCheckString(tokenize)
word_list = Tokenize.generateWordListFromEnv(ArithmeticEnv)
# print(" World_list of Env :- ", word_list)
# word_list=...(ArithmeticEnv.env_Variables)...ArithmeticEnv.env_Words_and_WordAsFunction.keys()
Tokenize.checkAllWordsInEnv(words_list=tokenize, env_words=word_list)
tape = JAVSGlobalTape.make(tokenize_Input=tokenize,
env=ArithmeticEnv, show_logs=print_log)
# print("Tape :- ", tape)
pyCode = Machine.generatePythonCode(tape, ArithmeticEnv)
if print_log:
print("Python Code :- ", "\n".join(pyCode))
Machine.executePyCode(pyCode)
if generate_Python_code:
Machine.generatePyFile(os.path.split(path)[-1][:-3], pyCode)
# print(os.path.split(path))
except JAVSError as jerror:
print(jerror)
except Exception as e:
print("Python Error :- ", e)
if __name__ == "__main__":
arguments = sys.argv[1:]
if len(arguments) == 0:
print('''JAVS-tranScripter for Achu's Programming Langauge, a branch which Support Artificial language.
**** Research based project, Not For Production in line Product. ****
Four more documention https://github.com/VishnuSuresh2000/JAVS-tranScripter
\n.ai file not found.\n
Command :-
javs [file_name.ai] -[flag]
example :- javs main.ai
if using python use the commaned :- python javs.py [file_name.ai] -[flag]
flag :-
p := To generate Python code
l := To generate Log in command Line
envWords := To get all words in Env
''')
filePath = None
generatePyCode = False
show_logs = False
show_Env_word=False
for argument in arguments:
# print(argument)
if "-" == argument[0]:
if "p" in argument:
generatePyCode = True
if "l" in argument:
show_logs = True
if "envWords" in argument:
show_Env_word=True
elif ".ai" in argument[-3:]:
# print(".ai find")
filePath = argument
if filePath:
main(filePath, print_log=show_logs, generate_Python_code=generatePyCode)
elif filePath == None and show_Env_word:
print(f'Env Name :- {ArithmeticEnv.name_of_Env}\n')
print(f'Words in the Env :-\n')
for words in Tokenize.generateWordListFromEnv(ArithmeticEnv):
print(f'{words}',end="\t")
else:
print("JAVS Error :- File Path Not Specified, Or check the Extension")
# print()
# print("path :- ",os.path.altsep)
|
import requests
from . import config
URL_OPEN_STREET_MAPS = 'https://nominatim.openstreetmap.org'
URL_TELIZE = 'https://www.telize.com'
URL_FORECAST = 'https://api.forecast.io/forecast/{}/{},{}?units=si'
def fetch_city_coords(city):
url = '{}{}'.format(URL_OPEN_STREET_MAPS, '/search')
r = requests.get(url, params={
'q': city,
'format': 'json'
})
r.raise_for_status()
json = r.json()
if len(json) == 0:
return None
json = json[0]
return {
'city': json['display_name'],
'lat': json['lat'],
'lng': json['lon']
}
def fetch_location():
url = '{}{}'.format(URL_TELIZE, '/geoip')
r = requests.get(url)
r.raise_for_status()
json = r.json()
if 'latitude' not in json or 'longitude' not in json:
return None
return {
'lat': json['latitude'],
'lng': json['longitude'],
'city': json['city'] if 'city' in json else 'Unknown',
'country': json['country'] if 'country' in json else 'Unknown'
}
def fetch_forecast(lat, lng):
api_key = config.get_config()['forecast.io']['api_key']
url = URL_FORECAST.format(api_key, lat, lng)
r = requests.get(url)
r.raise_for_status()
return r.json()
|
from typing import Optional, Any, TypeVar, Type, BinaryIO
from types import GeneratorType
from remerkleable.tree import Node, RootNode, Root, subtree_fill_to_contents, get_depth, to_gindex, \
subtree_fill_to_length, Gindex, PairNode
from remerkleable.core import View, ViewHook, zero_node, FixedByteLengthViewHelper, pack_bytes_to_chunks
from remerkleable.basic import byte, uint256
V = TypeVar('V', bound=View)
class RawBytesView(bytes, View):
def __new__(cls, *args, **kwargs):
if len(args) == 0:
return super().__new__(cls, cls.default_bytes(), **kwargs)
elif len(args) == 1:
args = args[0]
if isinstance(args, (GeneratorType, list, tuple)):
data = bytes(args)
elif isinstance(args, bytes):
data = args
elif isinstance(args, str):
if args[:2] == '0x':
args = args[2:]
data = list(bytes.fromhex(args))
else:
data = bytes(args)
return super().__new__(cls, data, **kwargs)
else:
return super().__new__(cls, bytes(args), **kwargs)
@classmethod
def default_bytes(cls) -> bytes:
raise NotImplementedError
@classmethod
def coerce_view(cls: Type[V], v: Any) -> V:
return cls(v)
@classmethod
def tree_depth(cls) -> int:
raise NotImplementedError
def set_backing(self, value):
raise Exception("cannot change the backing of a raw-bytes-like view, init a new view instead")
def __repr__(self):
return "0x" + self.hex()
def __str__(self):
return "0x" + self.hex()
@classmethod
def decode_bytes(cls: Type[V], bytez: bytes) -> V:
return cls(bytez)
def encode_bytes(self) -> bytes:
return self
def navigate_view(self, key: Any) -> View:
return byte(self.__getitem__(key))
class ByteVector(RawBytesView, FixedByteLengthViewHelper, View):
def __new__(cls, *args, **kwargs):
byte_len = cls.vector_length()
out = super().__new__(cls, *args, **kwargs)
if len(out) != byte_len:
raise Exception(f"incorrect byte length: {len(out)}, expected {byte_len}")
return out
def __class_getitem__(cls, length) -> Type["ByteVector"]:
chunk_count = (length + 31) // 32
tree_depth = get_depth(chunk_count)
class SpecialByteVectorView(ByteVector):
@classmethod
def default_node(cls) -> Node:
return subtree_fill_to_length(zero_node(0), tree_depth, chunk_count)
@classmethod
def tree_depth(cls) -> int:
return tree_depth
@classmethod
def type_byte_length(cls) -> int:
return length
return SpecialByteVectorView
@classmethod
def vector_length(cls):
return cls.type_byte_length()
@classmethod
def default_bytes(cls) -> bytes:
return b"\x00" * cls.vector_length()
@classmethod
def type_repr(cls) -> str:
return f"ByteVector[{cls.vector_length()}]"
@classmethod
def view_from_backing(cls: Type[V], node: Node, hook: Optional[ViewHook[V]] = None) -> V:
depth = cls.tree_depth()
byte_len = cls.vector_length()
if depth == 0:
return cls.decode_bytes(node.merkle_root()[:byte_len])
else:
chunk_count = (byte_len + 31) // 32
chunks = [node.getter(to_gindex(i, depth)) for i in range(chunk_count)]
bytez = b"".join(ch.merkle_root() for ch in chunks)[:byte_len]
return cls.decode_bytes(bytez)
def get_backing(self) -> Node:
if len(self) == 32: # super common case, optimize for it
return RootNode(Root(self))
elif len(self) < 32:
return RootNode(Root(self + b"\x00" * (32 - len(self))))
else:
return subtree_fill_to_contents(pack_bytes_to_chunks(self), self.__class__.tree_depth())
@classmethod
def navigate_type(cls, key: Any) -> Type[View]:
if key < 0 or key > cls.vector_length():
raise KeyError
return byte
@classmethod
def key_to_static_gindex(cls, key: Any) -> Gindex:
depth = cls.tree_depth()
byte_len = cls.vector_length()
if key < 0 or key >= byte_len:
raise KeyError
chunk_i = key // 32
return to_gindex(chunk_i, depth)
# Define common special Byte vector view types, these are bytes-like:
# raw representation instead of backed by a binary tree. Inheriting Python "bytes"
Bytes1 = ByteVector[1]
Bytes4 = ByteVector[4]
Bytes8 = ByteVector[8]
Bytes32 = ByteVector[32]
Bytes48 = ByteVector[48]
Bytes96 = ByteVector[96]
class ByteList(RawBytesView, FixedByteLengthViewHelper, View):
def __new__(cls, *args, **kwargs):
byte_limit = cls.limit()
out = super().__new__(cls, *args, **kwargs)
if len(out) > byte_limit:
raise Exception(f"incorrect byte length: {len(out)}, cannot be more than limit {byte_limit}")
return out
def __class_getitem__(cls, limit) -> Type["ByteList"]:
chunk_count = (limit + 31) // 32
contents_depth = get_depth(chunk_count)
class SpecialByteListView(ByteList):
@classmethod
def contents_depth(cls) -> int:
return contents_depth
@classmethod
def limit(cls) -> int:
return limit
return SpecialByteListView
@classmethod
def limit(cls) -> int:
raise NotImplementedError
@classmethod
def default_bytes(cls) -> bytes:
return b""
@classmethod
def type_repr(cls) -> str:
return f"ByteList[{cls.limit()}]"
@classmethod
def view_from_backing(cls: Type[V], node: Node, hook: Optional[ViewHook[V]] = None) -> V:
contents_depth = cls.contents_depth()
contents_node = node.get_left()
length = uint256.view_from_backing(node.get_right())
if length > cls.limit():
raise Exception("ByteList backing declared length exceeds limit")
if contents_depth == 0:
return cls.decode_bytes(contents_node.root[:length])
else:
chunk_count = (length + 31) // 32
chunks = [contents_node.getter(to_gindex(i, contents_depth)) for i in range(chunk_count)]
bytez = b"".join(ch.root for ch in chunks)[:length]
return cls.decode_bytes(bytez)
def get_backing(self) -> Node:
return PairNode(
subtree_fill_to_contents(pack_bytes_to_chunks(self), self.__class__.contents_depth()),
uint256(len(self)).get_backing()
)
@classmethod
def contents_depth(cls) -> int:
raise NotImplementedError
@classmethod
def tree_depth(cls) -> int:
return cls.contents_depth() + 1 # 1 extra for length mix-in
@classmethod
def default_node(cls) -> Node:
return PairNode(zero_node(cls.contents_depth()), zero_node(0)) # mix-in 0 as list length
@classmethod
def navigate_type(cls, key: Any) -> Type[View]:
if key < 0 or key > cls.limit():
raise KeyError
return byte
@classmethod
def key_to_static_gindex(cls, key: Any) -> Gindex:
depth = cls.tree_depth()
byte_limit = cls.limit()
if key < 0 or key >= byte_limit:
raise KeyError
chunk_i = key // 32
return to_gindex(chunk_i, depth)
@classmethod
def is_fixed_byte_length(cls) -> bool:
return False
@classmethod
def min_byte_length(cls) -> int:
return 0
@classmethod
def max_byte_length(cls) -> int:
return cls.limit()
@classmethod
def deserialize(cls: Type[V], stream: BinaryIO, scope: int) -> V:
return cls.decode_bytes(stream.read(scope))
def value_byte_length(self) -> int:
return len(self)
|
from format_input_string import main as format_input_string
def test_input_output_pair(input_str, output_str):
assert format_input_string(input_str) == output_str
print(f"Check OK: formatted '{input_str}' = '{output_str}'")
def main(input_output_pairs):
assert type(input_output_pairs) == list, f"Input should be a list"
for e in input_output_pairs:
assert type(e) == tuple, f"elements in list should be tuples; found '{e}'"
test_input_output_pair(e[0], e[1])
print(f"{len(input_output_pairs)} tests passed successfully")
if __name__ == "__main__":
pairs = [
("51 01 00 04 8a b4 62 61 74 74", "51 01 00 04 8a b4 62 61 74 74"),
(
"0x51 0x01 0x00 0x04 0x8a 0xb4 0x62 0x61 0x74 0x74",
"51 01 00 04 8a b4 62 61 74 74"
),
("51, 01, 00, 04, 8a, b4, 62, 61, 74, 74", "51 01 00 04 8a b4 62 61 74 74"),
("51 01 00 04 8A B4 62 61 74 74", "51 01 00 04 8a b4 62 61 74 74"),
("51 1 0 4 8a b4 62 61 74 74", "51 01 00 04 8a b4 62 61 74 74"),
(
"0x51,01,0x0 4,0x8A,0xB4,0x62, 0x61, 74, 0x74",
"51 01 00 04 8a b4 62 61 74 74"
),
]
main(pairs)
|
from __future__ import division
from datetime import datetime
def timestamp_from_dt(dt, epoch=datetime(1970, 1, 1)):
"""
Convert a datetime to a timestamp.
https://stackoverflow.com/a/8778548/141395
"""
delta = dt - epoch
# return delta.total_seconds()
return delta.seconds + delta.days * 86400
def convert_datetimes_to_timestamps(data, datetime_attrs):
"""
Given a dictionary of data, and a dictionary of datetime attributes,
return a new dictionary that converts any datetime attributes that may
be present to their timestamped equivalent.
"""
if not data:
return data
new_data = {}
for key, value in data.items():
if key in datetime_attrs and isinstance(value, datetime):
new_key = datetime_attrs[key]
new_data[new_key] = timestamp_from_dt(value)
else:
new_data[key] = value
return new_data
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from flask import Flask
import confusionflow
from confusionflow.server.blueprints import api, web
from confusionflow.utils import check_folderpath, get_logdir_from_env
def create_app(logdir=None):
app = Flask(__name__)
if logdir is None:
logdir = get_logdir_from_env()
static_file_path = os.path.join(
os.path.dirname(os.path.realpath(confusionflow.__file__)), "static"
)
# setup api
app.config["LOGDIR"] = check_folderpath(logdir)
app.register_blueprint(api.bp, url_prefix="/api")
# setup web
# only setup web if not in development mode
if app.config["ENV"] != "development":
app.config["STATIC_FILE_PATH"] = check_folderpath(static_file_path)
app.register_blueprint(web.bp)
app.add_url_rule("/", endpoint="index")
return app
|
from djongo import models
from inventory_item.models import Item, ItemFields
from audit_template.models import AuditTemplate
USER_MODEL = 'user_account.CustomUser'
PENDING = 'Pending'
COMPLETE = 'Complete'
class Audit(models.Model):
audit_id = models.AutoField(primary_key=True)
organization = models.ForeignKey(to='organization.Organization',
on_delete=models.CASCADE,
blank=False,
null=False,
default=0)
inventory_items = models.ManyToManyField(Item, blank=True, default=0)
assigned_sk = models.ManyToManyField(to='user_account.CustomUser', blank=True, default=0)
initiated_by = models.ForeignKey(
to=USER_MODEL,
on_delete=models.CASCADE,
related_name='initiated_by')
initiated_on = models.DateTimeField(auto_now_add=True) # Auto set when object is first created
last_modified_on = models.DateTimeField(auto_now=True) # Auto set every time object is saved
assigned_sk = models.ManyToManyField(to=USER_MODEL, blank=True, default=0)
template_id = models.ForeignKey(AuditTemplate, on_delete=models.CASCADE, blank=True, null=True)
accuracy = models.FloatField(default=0.0)
ACTIVE = 'Active'
AUDIT_STATUS = [(
PENDING, 'Pending'),
(COMPLETE, 'Complete'),
(ACTIVE, 'Active')
]
status = models.CharField(max_length=12, choices=AUDIT_STATUS, default=PENDING)
class Assignment(models.Model):
audit=models.ForeignKey(Audit, on_delete=models.CASCADE)
assigned_sk = models.ForeignKey(to=USER_MODEL, on_delete=models.CASCADE)
seen=models.BooleanField(default=False)
seen_on=models.DateTimeField(auto_now=True)
class BinToSK(models.Model):
bin_id = models.AutoField(primary_key=True)
Bin = models.CharField(max_length=256, blank=False, null=False)
init_audit = models.ForeignKey(Audit, on_delete=models.CASCADE)
customuser = models.ForeignKey(to=USER_MODEL, on_delete=models.CASCADE)
item_ids = models.JSONField(blank=True, null=True)
accuracy = models.FloatField(default=0.0)
BIN_STATUS = [(
PENDING, 'Pending'),
(COMPLETE, 'Complete'),
]
status = models.CharField(max_length=12, choices=BIN_STATUS, default=PENDING)
class Record(ItemFields):
record_id = models.AutoField(primary_key=True)
item_id = models.CharField(max_length=256, null=False)
audit = models.ForeignKey(Audit, on_delete=models.CASCADE)
bin_to_sk = models.ForeignKey(BinToSK, on_delete=models.CASCADE)
comment = models.TextField(blank=True)
flagged = models.BooleanField(default=False)
first_verified_on = models.DateTimeField(auto_now_add=True)
last_verified_on = models.DateTimeField(auto_now=True)
PROVIDED = 'Provided'
MISSING = 'Missing'
NEW = 'New'
RECORD_STATUS = [
(PENDING, 'Pending'),
(PROVIDED, 'Provided'),
(MISSING, 'Missing'),
(NEW, 'New')
]
status = models.CharField(max_length=8, choices=RECORD_STATUS, default=PENDING)
class Comment(models.Model):
ref_audit = models.IntegerField(null=False)
org_id = models.IntegerField(null=False)
content = models.TextField(blank=True)
author = models.CharField(max_length=256, null=False)
created_timestamp = models.DateTimeField(auto_now_add=True)
|
# -*- coding: utf-8 -*-
import numpy as np
import cv2
# 1、整个条形码的算法流程如下:
# 2、计算x方向和y方向上的Scharr梯度幅值表示
# 3、将x-gradient减去y-gradient来显示条形码区域
# 4、模糊并二值化图像
# 5、对二值化图像应用闭运算内核
# 6、进行系列的腐蚀、膨胀
# 7、找到图像中的最大轮廓,大概便是条形码
# 注:该方法做了关于图像梯度表示的假设,因此只对水平条形码有效。
def detect_bar(image):
# 读入图片并灰度化
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# 计算图像x方向和y方向的梯度
gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradY = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=0, dy=1, ksize=-1)
# 利用x-gradient减去y-gradient,通过这一步减法操作,得到包含高水平梯度和低竖直梯度的图形区域
gradient = cv2.subtract(gradX, gradY)
gradient = cv2.convertScaleAbs(gradient)
# 利用去噪仅关注条形码区域,使用9*9的内核对梯度图进行平均模糊,
# 有助于平滑梯度表征的图形中的高频噪声,然后进行二值化
blurred = cv2.blur(gradient, (9, 9))
(_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY)
# 对二值化图进行形态学操作,消除条形码竖杠之间的缝隙
# 使用cv2.getStructuringElement构造一个长方形内核。这个内核的宽度大于长度,
# 因此我们可以消除条形码中垂直条之间的缝隙。
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))
closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
#
# # 然后图像中还存在一些小斑点,于是用腐蚀和膨胀来消除旁边的小斑点
# # 腐蚀操作将会腐蚀图像中白色像素,以此来消除小斑点,
# # 而膨胀操作将使剩余的白色像素扩张并重新增长回去。
closed = cv2.erode(closed, None, iterations=4)
closed = cv2.dilate(closed, None, iterations=4)
#
# # 最后找图像中国条形码的轮廓
_,cnts, _ = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# for cnt in cnts:
# print(cv2.contourArea(cnt))
# # 通过对轮廓面积进行排序,找到面积最大的轮廓即为最外层轮廓
c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
#
# # # 计算最大轮廓的包围box
rect = cv2.minAreaRect(c)
box = np.int0(cv2.boxPoints(rect))
# #
# # # 将box画在原始图像中显示出来,这样便成功检测到了条形码
cv2.drawContours(image, [box], -1, (0, 255, 0), 3)
return image
if __name__ == '__main__':
image = cv2.imread("../Images/1497676486031.jpg")
bar_image = detect_bar(image)
cv2.imshow("bar", bar_image)
#cv2.imwrite("../Images/bar_image.jpg", bar_image)
cv2.waitKey(0)
# 用摄像头获取视频
# cap = cv2.VideoCapture(0)
# ret, frame = cap.read()
# isVideoCapture = True
# if not ret:
# print('获取不到视频')
# isVideoCapture = False
# cap.release()
#
# while (isVideoCapture):
# # 获取每一帧
# ret, frame = cap.read()
# # bar_image = detect_bar(frame)
# bar_image = frame
# bar_image = cv2.cvtColor(bar_image, cv2.COLOR_BGR2GRAY)
# cv2.imshow("bar", bar_image)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
|
print("GUESSING GAME")
#terminal based number guessing game having 1/6 of wining probability. This one is the hard level.
while(True):
x= str(input("Shall we start the game: \n"))
y=x.lower()
import random
n= random.randint(1,30)
if y=="yes":
name = str(input("Enter your name:"))
print("Hello " + name + ",\n In this game I will choose a number between 1 and 30 and you have to guess it")
a1=int(input("Your guess is:"))
print("Great your guess is right \n Wow, you won ;-)") if a1==n else print("Oh no, you must try again \n Try again you have 4 more chances to guess")
if a1==n:
print("Thanks for playing")
else:
a2=int(input("Your second guess is:"))
print("Great your guess is right \n Wow, you won ;-)") if a2==n else print("Oh no, you was close \n Try again you have 3 more chances to guess")
if a2==n:
print("Thanks for playing")
else:
a3= int(input("Your third guess is:"))
print("Great your guess is right \n Wow, you won ;-)") if a3==n else print("Oh no, you was too close \n Try again you have 2 more chances to guess")
if a3==n:
print("Thanks for playing")
else:
a4= int(input("Your fourth guess is:"))
print("Great your guess is right \n Wow, you won ;-)") if a4==n else print("Oh no, you was almost there \n Try again you have 1 more last chance to guess")
if a4==n:
print("Thanks for playing")
else:
a5= int(input("Your last guess is:"))
print("Great your guess is right \n Wow, you won ;-)") if a5==n else print(f"Better luck next time and answer was {n}.\nGAME OVER")
else:
print("Hope you will come again")
|
"""Project loader for reading BUILD and BUILD.conf files."""
import importlib
import sys
import traceback
import cobble.env
def load(root, build_dir):
"""Loads a Project, given the paths to the project root and build output
directory."""
# Create a key registry initialized with keys defined internally to Cobble.
kr = cobble.env.KeyRegistry()
for k in cobble.target.KEYS: kr.define(k)
# Create working data structures.
project = cobble.project.Project(root, build_dir)
packages_to_visit = []
installed_modules = {}
# Function that will be exposed to BUILD.conf files as 'seed()'
def _build_conf_seed(*paths):
nonlocal packages_to_visit
packages_to_visit += paths
# Function that will be exposed to BUILD.conf files as 'install()'
def _build_conf_install(module_name):
nonlocal kr
module = importlib.import_module(module_name)
if hasattr(module, 'KEYS'):
for k in module.KEYS:
kr.define(k)
installed_modules[module.__name__] = module
# Function that will be exposed to BUILD.conf files as 'environment()'
def _build_conf_environment(name, base = None, contents = {}):
assert name not in project.named_envs, \
"More than one environment named %r" % name
if base:
assert base in project.named_envs, \
"Base environment %r does not exist (must appear before)" \
% base
base_env = project.named_envs[base]
else:
base_env = cobble.env.Env(kr, {})
env = base_env.derive(cobble.env.prepare_delta(contents))
project.named_envs[name] = env
# Function that will be exposed to BUILD.conf files as 'define_key()'
def _build_conf_define_key(name, *, type):
if type == 'string':
key = cobble.env.overrideable_string_key(name)
elif type == 'bool':
key = cobble.env.overrideable_bool_key(name)
else:
raise Exception('Unknown key type: %r' % type)
kr.define(key)
# Function that will be exposed to BUILD.conf files as 'plugin_path()'
def _build_conf_plugin_path(*paths):
sys.path += [project.inpath(p) for p in paths]
# Read in BUILD.conf and eval it for its side effects
_compile_and_exec(
path = project.inpath('BUILD.conf'),
kind = 'BUILD.conf file',
globals = {
# Block access to builtins. TODO: this might be too aggressive.
'__builtins__': {},
'seed': _build_conf_seed,
'install': _build_conf_install,
'environment': _build_conf_environment,
'define_key': _build_conf_define_key,
'plugin_path': _build_conf_plugin_path,
'ROOT': project.root,
'BUILD': project.build_dir,
},
)
# Process the package worklist. We're also extending the worklist in this
# algorithm, treating it like a stack (rather than a queue). This means the
# order of package processing is a little hard to predict. Because packages
# can define keys that have effects on other packages, this should probably
# get fixed (TODO).
while packages_to_visit:
ident = packages_to_visit.pop()
# Check if we've done this one.
relpath = _get_relpath(ident)
if relpath in project.packages:
continue
package = cobble.project.Package(project, relpath)
# Prepare the global environment for eval-ing the package. We provide
# a few variables by default:
pkg_env = {
# Block access to builtins. TODO: this might be too aggressive.
'__builtins__': {},
# Easy access to the path from the build dir to the package
'PKG': package.inpath(),
# Easy access to the path from the build dir to the project
'ROOT': project.root,
# Location of the build dir
'BUILD': project.build_dir,
'define_key': _build_conf_define_key,
}
# The rest of the variables are provided by items registered in
# plugins.
for mod in installed_modules.values():
if hasattr(mod, 'package_verbs'):
for name, fn in mod.package_verbs.items():
pkg_env[name] = _wrap_verb(package, fn, packages_to_visit)
if hasattr(mod, 'global_functions'):
for name, fn in mod.global_functions.items():
pkg_env[name] = fn
# And now, the evaluation!
_compile_and_exec(
path = package.inpath('BUILD'),
kind = 'BUILD file',
globals = pkg_env,
)
# Register all plugins' ninja rules. We could probably do this earlier, but
# hey.
for mod in installed_modules.values():
if hasattr(mod, 'ninja_rules'):
project.add_ninja_rules(mod.ninja_rules)
return project
def _wrap_verb(package, verb, packages_to_visit):
"""Instruments a package-verb function 'verb' from 'package' with code to
register the resulting target and scan deps to discover new packages.
'packages_to_visit' is a reference to a (mutable) list containing relpaths
we should visit. The function returned from '_wrap_verb' will append
relpaths of deps to that list. Some of them will be redundant; the worklist
processing code is expected to deal with this.
"""
def verb_wrapper(*pos, **kw):
nonlocal packages_to_visit
tgt = verb(package, *pos, **kw)
if tgt:
package.add_target(tgt)
# TODO this is where we'd return for extend_when
packages_to_visit += tgt.deps
return verb_wrapper
def _get_relpath(ident):
"""Extracts the relative path from the project root to the directory
containing the BUILD file defining a target named by an ident."""
assert ident.startswith('//'), "bogus ident got in: %r" % ident
return ident[2:].split(':')[0]
class BuildError(Exception):
"""Exception raised if processing of a BUILD/BUILD.conf file fails."""
def __init__(self, exc_info, kind, path, limit):
"""Creates a BuildError.
'exc_info' is the information on the exception as received from
'sys.exc_info()`.
'kind' is a human-readable str description of what we were processing.
'path' is a path to the file being processed.
'limit' is the depth of the traceback that is relevant to the user
error, i.e. does not include Cobble stack frames.
"""
self.exc_info = exc_info
self.kind = kind
self.path = path
self.limit = limit
def _compile_and_exec(path, kind, globals):
"""Implementation factor of BUILD and BUILD.conf evaluation. Loads the file
at 'path' and execs it in an environment of 'globals', reporting the
failure as 'kind' if it occurs."""
with open(path, 'r') as f:
try:
mod = compile(
source = f.read(),
filename = path,
mode = 'exec',
dont_inherit = 1,
)
exec(mod, globals)
except:
exc_info = sys.exc_info()
limit = len(traceback.extract_tb(exc_info[2])) - 1
raise BuildError(
exc_info = exc_info,
limit = limit,
kind = kind,
path = path) from exc_info[1]
|
import json, shutil
def run(json_path, train_json_path, test_json_path):
with open("/home/jitesh/jg/openimages-personcar-localize_and_classify/trainval/annotations/bbox-annotations_val.json", 'rt', encoding='UTF-8') as annotations:
data = json.load(annotations)
count = 0
for image in data["images"]:
file_name = image["file_name"]
print(image["id"], file_name)
shutil.copyfile(f'trainval/images/{file_name}', f'trainval/images_val/{file_name}')
count +=1
print(count)
if __name__ == "__main__":
json_path = "/home/jitesh/jg/eagleview/trainval/annotations/bbox-annotations.json"
json_path_split = json_path.split(".")
train_json_path = ".".join(json_path_split[:-1]) + "_train.json"
val_json_path = ".".join(json_path_split[:-1]) + "_val.json"
print(train_json_path)
run(json_path, train_json_path, val_json_path)
import os
l = os.listdir("/home/jitesh/jg/openimages-personcar-localize_and_classify/trainval/images_val")
print(len(l))
|
import re
from livestreamer.compat import urlparse
from livestreamer.plugin import Plugin
from livestreamer.plugin.api import http, validate
from livestreamer.stream import RTMPStream
AJAX_HEADERS = {
"Referer": "http://www.filmon.com",
"X-Requested-With": "XMLHttpRequest",
"User-Agent": "Mozilla/5.0"
}
CHINFO_URL = "http://www.filmon.com/ajax/getChannelInfo"
VODINFO_URL = "http://www.filmon.com/vod/info/{0}"
QUALITY_WEIGHTS = {
"high": 720,
"low": 480
}
SWF_URL = "http://www.filmon.com/tv/modules/FilmOnTV/files/flashapp/filmon/FilmonPlayer.swf"
_url_re = re.compile("http(s)?://(\w+\.)?filmon.com/(channel|tv|vod)/")
_channel_id_re = re.compile("/channels/(\d+)/extra_big_logo.png")
_vod_id_re = re.compile("movie_id=(\d+)")
_channel_schema = validate.Schema({
"streams": [{
"name": validate.text,
"quality": validate.text,
"url": validate.url(scheme="rtmp")
}]
})
_vod_schema = validate.Schema(
{
"data": {
"streams": {
validate.text: {
"name": validate.text,
"url": validate.url(scheme="rtmp")
}
}
}
},
validate.get("data")
)
class Filmon(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, key):
weight = QUALITY_WEIGHTS.get(key)
if weight:
return weight, "filmon"
return Plugin.stream_weight(key)
def _get_rtmp_app(self, rtmp):
parsed = urlparse(rtmp)
if parsed.query:
app = "{0}?{1}".format(parsed.path[1:], parsed.query)
else:
app = parsed.path[1:]
return app
def _get_live_streams(self, channel_id):
params = dict(channel_id=channel_id)
res = http.post(CHINFO_URL, data=params, headers=AJAX_HEADERS)
channel = http.json(res, schema=_channel_schema)
streams = {}
for stream in channel["streams"]:
name = stream["quality"]
rtmp = stream["url"]
playpath = stream["name"]
app = self._get_rtmp_app(rtmp)
stream = RTMPStream(self.session, {
"rtmp": rtmp,
"pageUrl": self.url,
"swfUrl": SWF_URL,
"playpath": playpath,
"app": app,
"live": True
})
streams[name] = stream
return streams
def _get_vod_streams(self, movie_id):
res = http.get(VODINFO_URL.format(movie_id), headers=AJAX_HEADERS)
vod = http.json(res, schema=_vod_schema)
streams = {}
for name, stream_info in vod["streams"].items():
rtmp = stream_info["url"]
app = self._get_rtmp_app(rtmp)
playpath = stream_info["name"]
if playpath.endswith(".mp4"):
playpath = "mp4:" + playpath
stream = RTMPStream(self.session, {
"rtmp": rtmp,
"pageUrl": self.url,
"swfUrl": SWF_URL,
"playpath": playpath,
"app": app,
})
streams[name] = stream
return streams
def _get_streams(self):
res = http.get(self.url)
match = _vod_id_re.search(res.text)
if match:
return self._get_vod_streams(match.group(1))
match = _channel_id_re.search(res.text)
if match:
return self._get_live_streams(match.group(1))
__plugin__ = Filmon
|
""" OpenMDAO class definition for ParamComp"""
import collections
from six import string_types
from openmdao.core.component import Component
class ParamComp(Component):
"""A Component that provides an output to connect to a parameter."""
def __init__(self, name, val=None, **kwargs):
super(ParamComp, self).__init__()
if isinstance(name, string_types):
if val is None:
raise ValueError('ParamComp init: a value must be provided as the second arg.')
self.add_output(name, val, **kwargs)
elif isinstance(name, collections.Iterable):
for tup in name:
badtup = None
if isinstance(tup, tuple):
if len(tup) == 3:
n, v, kw = tup
elif len(tup) == 2:
n, v = tup
kw = {}
else:
badtup = tup
else:
badtup = tup
if badtup:
if isinstance(badtup, string_types):
badtup = name
raise ValueError("ParamComp init: arg %s must be a tuple of the form "
"(name, value) or (name, value, keyword_dict)." %
str(badtup))
self.add_output(n, v, **kw)
else:
raise ValueError("first argument to ParamComp init must be either of type "
"`str` or an iterable of tuples of the form (name, value) or "
"(name, value, keyword_dict).")
def apply_linear(self, mode, ls_inputs=None, vois=(None, )):
"""For `ParamComp`, just pass on the incoming values.
Args
----
mode : string
Derivative mode, can be 'fwd' or 'rev'.
ls_inputs : dict
We can only solve derivatives for the inputs the instigating
system has access to. (Not used here.)
vois: list of strings
List of all quantities of interest to key into the mats.
"""
if mode == 'fwd':
sol_vec, rhs_vec = self.dumat, self.drmat
else:
sol_vec, rhs_vec = self.drmat, self.dumat
for voi in vois:
rhs_vec[voi].vec[:] += sol_vec[voi].vec[:]
def solve_nonlinear(self, params, unknowns, resids):
""" Performs no operation. """
pass
|
from werkzeug.exceptions import HTTPException
from flask_script import Manager, Server
from app import create_app
from app.common.libs.error import APIException
from app.common.libs.error_code import ServerError
from app.common.libs.response_code import Const
app = create_app()
manager = Manager(app)
manager.add_command("runserver",
Server(host='0.0.0.0', port=app.config['SERVER_PORT'], use_debugger=True, use_reloader=True))
@app.errorhandler(Exception)
def framework_error(e):
if isinstance(e, APIException):
return e
if isinstance(e, HTTPException):
code = e.code
msg = e.description
error_code = Const.HTTP_ERROR
return APIException(msg, code, error_code)
else:
# 调试模式
# log
if not app.config['DEBUG']:
return ServerError()
else:
raise e
def main():
manager.run( )
if __name__ == '__main__':
# app.run(debug=True)
try:
import sys
sys.exit(main() )
except Exception as e:
import traceback
traceback.print_exc()
|
""" ./examples/label/suite/custom_suite.rst """
from hamcrest import assert_that
from allure_commons_test.report import has_test_case
from allure_commons_test.label import has_suite, has_parent_suite, has_sub_suite
def test_custom_suite(executed_docstring_path):
assert_that(executed_docstring_path.allure_report,
has_test_case("test_custom_suite",
has_suite("suite name"),
has_parent_suite("parent suite name"),
has_sub_suite("sub suite name")
)
)
|
import os
import shutil
root_folder = "audio"
all_audio_files = os.listdir(root_folder)
def subject_wise(all_files):
for f in all_files:
sub_id = f.split("_")[0]
sub_folder = f"vadtestsubject_{sub_id}"
audio_fpath = os.path.join(root_folder, f)
if not os.path.exists(sub_folder): os.mkdir(sub_folder)
shutil.copy(audio_fpath, sub_folder)
# subject_wise(all_audio_files)
def rename_files(rn_list_file):
with open(rn_list_file, "r") as rf:
for fn in rf.readlines():
fn = fn.replace("\n", "")
n_fn = fn.replace("_Exhale_Wheeze_Inhale_", "_Breath_")
print(fn, "to", n_fn)
shutil.move(fn, n_fn)
rename_files("to_rename.txt")
|
#!/usr/bin/env python3
"""Socket Perf Test.
"""
import argparse
import sys
from pathlib import Path
sys.path.append(str(Path(".").parent.absolute().joinpath("tacview_client")))
from tacview_client import serve_file # type: ignore
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--filename",
required=True,
type=Path,
help="Path to a taview acmi file that should be served locally.",
)
parser.add_argument(
"--port",
required=False,
type=int,
default=5555,
help="Port on which the data should be served.",
)
args = parser.parse_args()
serve_file.main(filename=args.filename, port=args.port)
|
from .kd import KDDistiller
from .hint import *
from .attention import AttentionDistiller
from .nst import NSTDistiller
from .sp import SPDistiller
from .rkd import RKDDistiller
from .pkt import PKTDistiller
from .svd import SVDDistiller
from .cc import *
from .vid import *
from . import data_free
|
import sys
import itertools
from math import sqrt
from operator import add
from os.path import join, isfile, dirname
import numpy as np
from pyspark import SparkConf, SparkContext
from pyspark.mllib.recommendation import ALS
from pyspark.mllib.clustering import KMeans, KMeansModel
def parseRating(line):
"""
Parses a rating record in MovieLens format userId::movieId::rating::timestamp .
"""
fields = line.strip().split("::")
return long(fields[3]) % 10, (int(fields[0]), int(fields[1]), float(fields[2]))
def parseMovie(line):
"""
Parses a movie record in MovieLens format movieId::movieTitle .
"""
fields = line.strip().split("::")
return int(fields[0]), fields[1]
def loadRatings(ratingsFile):
"""
Load ratings from file.
"""
if not isfile(ratingsFile):
print ("File %s does not exist." % ratingsFile)
sys.exit(1)
f = open(ratingsFile, 'r')
ratings = filter(lambda r: r[2] > 0, [parseRating(line)[1] for line in f])
f.close()
if not ratings:
print ("No ratings provided.")
sys.exit(1)
else:
return ratings
# set up environment
conf = SparkConf() \
.setAppName("MovieLensALS") \
.set("spark.executor.memory", "2g")
sc = SparkContext(conf=conf)
f= open("/vagrant/files/HW04/kmeans_output.txt","w")
# load personal ratings
myRatings = loadRatings(sys.argv[2])
myRatingsRDD = sc.parallelize(myRatings, 1)
# load ratings and movie titles
movieLensHomeDir = sys.argv[1]
# ratings is an RDD of (last digit of timestamp, (userId, movieId, rating))
ratings = sc.textFile(join(movieLensHomeDir, "ratings.dat")).map(parseRating)
movies = sc.textFile(join(movieLensHomeDir, "movies.dat")).cache()
numRatings = ratings.count()
numUsers = ratings.values().map(lambda r: r[0]).distinct().count()
numMovies = ratings.values().map(lambda r: r[1]).distinct().count()
f.write( "Got %d ratings from %d users on %d movies.\n" % (numRatings, numUsers, numMovies) )
numPartitions = 4
training = ratings.filter(lambda x: x[0] < 8) \
.values() \
.union(myRatingsRDD) \
.repartition(numPartitions) \
.cache()
test = ratings.filter(lambda x: x[0] >= 8).values().cache()
numTraining = training.count()
numTest = test.count()
f.write( "Training: %d, test: %d\n\n" % (numTraining, numTest))
# get movie types
def parseMovieType(line):
typePart= (line.strip().split("::"))[2].strip().split("|")
for i in typePart:
if i not in types:
types.append(i)
return types
types=[]
collectTypes= movies.map(parseMovieType).collect()
movieTypes = collectTypes[-1]
# add types column
def addMovieTypes(line):
fields=line.strip().split("::")
typePart =fields[2].strip().split("|")
for i in range(typeNum):
if movieTypes[i] in typePart:
fields.append(1)
else:
fields.append(0)
fields[0]= int(fields[0])
fields.pop(2)
return fields
typeNum=len(movieTypes)
movieComplete= np.array (movies.map(addMovieTypes).collect())
movieTypeInfo= movieComplete[:,2:-1].astype(np.int)
movieTypeInfo=sc.parallelize(movieTypeInfo)
# get movie clusters Model
def clusterMovies(movieTypeInfo):
i = 2
def getWcv(point,kModel):
center = kModel.centers[kModel.predict(point)]
return sum([x**2 for x in (point - center)])
def getBcv(point, kModel ):
center = kModel.centers[kModel.predict(point)]
return sum([x**2 for x in (center - xAve)])
pointsNum = len(movieComplete)
xAve=( sum(movieTypeInfo.collect()) .astype(np.float))/pointsNum
bestCH= 0
bestKModel= None
K=i
records=[]
while i<=20:
movieCluster = KMeans.train(movieTypeInfo, i, maxIterations=100, initializationMode="random")
# Due to the Within Cluster Variation would always decrease while K increase
# WCV is not a good criteria to judge the performance of K-Means Cluster
# Therefore, I choose to use the the CH Index to find the best K
wcv= movieTypeInfo.map(lambda point: getWcv(point,movieCluster)).reduce(lambda x, y: x + y)
bcv= movieTypeInfo.map(lambda point: getBcv(point,movieCluster)).reduce(lambda x, y: x + y)
chIndex = ( bcv/(i-1) )/ (wcv/(pointsNum-i))
# Find the highest CH Index
if bestCH< chIndex:
bestKModel = movieCluster
bestCH=chIndex
K=i
records.append((chIndex, wcv,bcv))
f.write("When K = {}, the value of CH index is {}.\n".format(i, chIndex))
i += 1
return bestKModel,K,chIndex,oooo#,sse2, tempCriterion
movieCluster = clusterMovies(movieTypeInfo)
kMeansModel= movieCluster[0]
f.write("We get the best K-Means Model when K = {}, the value of CH index is {}.\n".format(movieCluster[1], movieCluster[2] ))
# combine movie with Cluster
tempClusters= movieTypeInfo.map(lambda movie: kMeansModel.predict(movie) ).collect()
length = len(tempClusters)
movieWithCluster=[]
for i in range(length):
temp=[int(movieComplete[i,0]), movieComplete[i,1], tempClusters[i]]
movieWithCluster.append(temp)
# A Matrix of [movieID, movieTitle, movieCluster]
movieWithCluster= np.array(movieWithCluster)
# get cluster num to each rating row
def getClusterRating(line):
movieID = str(line[1])
movieScore = line[2]
tempIndex = np.argwhere(movieWithCluster[:,0]==movieID)[0][0]
clusterInd = int( movieWithCluster[tempIndex, 2])
return clusterInd
# a vector with cluster nums
trainingClusters= training.map(lambda movie: getClusterRating(movie) ).collect()
# get each cluster score for each user
def getUserClusterRating(line):
userID = int( line[0])
movieID = str(line[1])
movieScore = line[2]
tempIndex = np.argwhere(movieWithCluster[:,0]==movieID)[0][0]
clusterInd = int( movieWithCluster[tempIndex, 2])
return (userID*100+clusterInd, movieScore)
userClusterRatings = training.map(getUserClusterRating).groupByKey().mapValues(lambda x: sum(x)/len(x)).collect()
tempList=[]
for cluRating in userClusterRatings:
userID = cluRating[0]//100
clusterInd = cluRating[0]%100
score = round(cluRating[1],4)
tempList.append([userID, clusterInd, score])
# A matrix [userID, clusterID, clusterAveScore ]
userClusterRatings= np.array(tempList).astype(np.float)
# return the selected user's cluster score
def getClusterAveRating(userID):
userIndex = np.argwhere(userClusterRatings[:,0]==userID)
clusterAveRating=[]
for i in userIndex:
clusterAveRating.append(userClusterRatings[i[0],1:])
return clusterAveRating
# here got the info for user 0
clusterAveRating = np.array(getClusterAveRating(0))
# compute the RMSE for K-Means model
def computeKmeansRmse(line):
userID = int( line[0])
movieID = str(line[1])
movieScore = line[2]
clusterAveRating = getClusterAveRating(userID)
predictScore = getScore(movieID)
error =(predictScore-movieScore)**2
return error
kmeansRmse = sqrt ( test.map(computeKmeansRmse).reduce(add) / numTest)
f.write("The best K-Means model was trained with K = {}, and its RMSE on the test set is {}.\n".format(movieCluster[1], round(kmeansRmse,4)) )
# recommandation
moviesDict = dict(movies.map(parseMovie).collect())
myRatedMovieIds = set([x[1] for x in myRatings])
candidates = sc.parallelize([m for m in moviesDict if m not in myRatedMovieIds])
def getScore(movieID):
movieIndex= np.argwhere(movieWithCluster[:,0]==str(movieID))[0][0]
clusterNum = movieWithCluster[movieIndex, 2]
clusterNum = int(clusterNum)
if clusterNum not in clusterAveRating[:,0]:
score =0
else:
clusterIndex = np.argwhere(clusterAveRating[:,0]== clusterNum)[0][0]
score = clusterAveRating[clusterIndex,1]
return score
predictScores = candidates.map(getScore).collect()
predictions= np.c_[candidates.collect(), predictScores]
recommendations = sorted(predictions, key=lambda x: x[1], reverse=True)[:50]
for i in xrange(len(recommendations)):
f.write ( ("%2d: %s\n" % (i + 1, moviesDict[recommendations[i][0]])).encode('ascii', 'ignore') )
f.close()
# clean up
sc.stop()
|
"""Create managedojects_schemas table
Revision ID: e83c5549560c
Revises: 0b7905a9ba5b
Create Date: 2018-02-03 23:20:13.704000
"""
from alembic import op
import sqlalchemy as sa
import datetime
# revision identifiers, used by Alembic.
revision = 'e83c5549560c'
down_revision = '0b7905a9ba5b'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'managedobjects_schemas',
sa.Column('pk', sa.Integer, primary_key=True),
sa.Column('name', sa.String(50), nullable=False),
sa.Column('notes', sa.Text),
sa.Column('tech_pk', sa.Integer),
sa.Column('vendor_pk', sa.Integer),
sa.Column('modified_by', sa.Integer),
sa.Column('added_by', sa.Integer),
sa.Column('date_added', sa.TIMESTAMP, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow),
sa.Column('date_modified', sa.TIMESTAMP, default=datetime.datetime.utcnow)
)
op.execute('ALTER SEQUENCE managedobjects_schemas_pk_seq RENAME TO seq_managedobjects_schemas_pk')
managedobjects_schemas = sa.sql.table(
'managedobjects_schemas',
sa.Column('pk', sa.Integer, sa.Sequence('seq_managedobjects_schemas_pk', ), primary_key=True, nullable=False),
sa.Column('name', sa.String(50), nullable=False),
sa.Column('notes', sa.Text),
sa.Column('tech_pk', sa.Integer),
sa.Column('vendor_pk', sa.Integer),
sa.Column('modified_by', sa.Integer),
sa.Column('added_by', sa.Integer),
sa.Column('date_added', sa.TIMESTAMP, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow),
sa.Column('date_modified', sa.TIMESTAMP, default=datetime.datetime.utcnow)
)
op.bulk_insert(managedobjects_schemas, [
{'name': 'ericsson_cm_2g', 'parent_pk': 0, 'vendor_pk': 1, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'ericsson_cm_3g', 'parent_pk': 0, 'vendor_pk': 1, 'tech_pk': 2, 'modified_by': 0, 'added_by': 0},
{'name': 'ericsson_cm_4g', 'parent_pk': 0, 'vendor_pk': 1, 'tech_pk': 3, 'modified_by': 0, 'added_by': 0},
{'name': 'huawei_cm_2g', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'huawei_cm_3g', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 2, 'modified_by': 0, 'added_by': 0},
{'name': 'huawei_cm_4g', 'parent_pk': 0, 'vendor_pk': 2, 'tech_pk': 3, 'modified_by': 0, 'added_by': 0},
{'name': 'zte_cm_2g', 'parent_pk': 0, 'vendor_pk': 3, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'zte_cm_3g', 'parent_pk': 0, 'vendor_pk': 3, 'tech_pk': 2, 'modified_by': 0, 'added_by': 0},
{'name': 'zte_cm_4g', 'parent_pk': 0, 'vendor_pk': 3, 'tech_pk': 3, 'modified_by': 0, 'added_by': 0},
{'name': 'nokia_cm_2g', 'parent_pk': 0, 'vendor_pk': 4, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'nokia_cm_3g', 'parent_pk': 0, 'vendor_pk': 4, 'tech_pk': 2, 'modified_by': 0, 'added_by': 0},
{'name': 'nokia_cm_4g', 'parent_pk': 0, 'vendor_pk': 4, 'tech_pk': 3, 'modified_by': 0, 'added_by': 0},
{'name': 'samsung_cm_2g', 'parent_pk': 0, 'vendor_pk': 5, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'samsung_cm_3g', 'parent_pk': 0, 'vendor_pk': 5, 'tech_pk': 2, 'modified_by': 0, 'added_by': 0},
{'name': 'samsung_cm_4g', 'parent_pk': 0, 'vendor_pk': 5, 'tech_pk': 3, 'modified_by': 0, 'added_by': 0},
{'name': 'alcatel_cm_2g', 'parent_pk': 0, 'vendor_pk': 6, 'tech_pk': 1, 'modified_by': 0, 'added_by': 0},
{'name': 'alcatel_cm_3g', 'parent_pk': 0, 'vendor_pk': 6, 'tech_pk': 2, 'modified_by': 0, 'added_by': 0},
{'name': 'alcatel_cm_4g', 'parent_pk': 0, 'vendor_pk': 6, 'tech_pk': 3, 'modified_by': 0, 'added_by': 0},
])
# Create cm loads table
op.create_table(
'cm_loads',
sa.Column('pk', sa.Integer, primary_key=True),
sa.Column('load_status', sa.CHAR(length=250)), # RUNNING,FAILED,SUCCEEDED
sa.Column('is_current_load', sa.Boolean, default=False), #
sa.Column('load_start', sa.TIMESTAMP, default=datetime.datetime.utcnow, onupdate=datetime.datetime.utcnow),
sa.Column('load_end', sa.TIMESTAMP, default=datetime.datetime.utcnow)
)
op.execute('ALTER SEQUENCE cm_loads_pk_seq RENAME TO seq_cm_loads_pk')
def downgrade():
op.drop_table('managedobjects_schemas')
op.drop_table('cm_loads')
|
import importlib
import numpy as np
from supersuit import dtype_v0
# TODO: We will eventually want to provide visualization support for the MPE, but not needed yet
def petting_zoo_mpe(env_config, spec_only=False):
env_config = env_config.copy()
env_name = env_config.pop("scenario")
# Load appropriate PettingZoo class
env_module = importlib.import_module("pettingzoo.mpe." + env_name)
# Build PettingZoo environment
env = env_module.parallel_env(**env_config) # NOTE: The parallel env API is closer to the gym api than PettingZoo's AEC API
return dtype_v0(env, dtype=np.float32) # NOTE: Make sure the obs tensors are the right type
class PettingZooMPE:
"""
DEPRECATED: We can just return raw environments for now, as the interfaces are compatible
"""
def __init__(self, env_config, spec_only=False):
env_config = env_config.copy()
env_name = env_config.pop("scenario")
# Load appropriate PettingZoo class
env_module = importlib.import_module("pettingzoo.mpe." + env_name)
# Build PettingZoo environment
env = env_module.parallel_env(**env_config) # NOTE: The parallel env API is closer to the gym api that PettingZoo's default API
self.env = dtype_v0(env, dtype=np.float32) # NOTE: Not sure if this typecasting is still necessary
@property
def observation_space(self):
return self.env.observation_spaces # TODO: Double-check that these are actually dictionaries
@property
def action_space(self): # NOTE: Do we actually use these?
return self.env.action_spaces
def reset(self):
return self.env.reset()
def step(self, actions):
return self.env.step(actions)
def render(self, mode="human"):
return self.render(mode)
|
from flask import session, abort, flash, url_for, make_response, request, \
render_template, redirect, g, jsonify
from sqlalchemy.exc import IntegrityError
from flask.ext.login import login_user, logout_user, login_required, \
current_user
import uuid
import json
from models import Hunt, Participant, Item, Admin, Setting
from forms import HuntForm, AdminForm, AdminLoginForm, ParticipantForm, \
ItemForm, SettingForm
from hunt import app, login_manager, db, bcrypt
from utils import get_admin, get_settings, get_item, \
get_participant, item_path, validate_participant, get_intended_url, \
get_items, initialize_hunt, create_new_participant, \
valid_login, finished_setting, participant_registered,\
num_items_remaining, hunt_requirements_completed, found_ids_list
from xapi import WaxCommunicator
import logging
logger = logging.getLogger(__name__)
login_manager.login_view = "login"
def get_db():
return db
@app.errorhandler(500)
def internal_error(error):
logger.error("Problem!", error)
return "500 error"
@app.before_request
def before_request():
g.db = get_db()
@login_manager.user_loader
def load_user(userid):
return Admin.query.get(userid)
#################### ADMIN ROUTES ####################
@app.route('/login', methods=['GET', 'POST'])
def login():
errors = None
form = AdminLoginForm(request.form)
if request.method == 'POST' and form.validate():
admin = get_admin(g.db, form.email.data)
if valid_login(admin, form.email.data, form.password.data):
login_user(admin)
return redirect(url_for('hunts'))
flash('Invalid email and password combination')
else:
errors = form.errors
return make_response(render_template(
'homepage.html', form=form, display_login_link=True))
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('login'))
@app.route('/')
def root():
return login()
# create or list admins who can create hunts
@app.route('/admins', methods=['POST'])
def admins():
if request.method == 'POST':
form = AdminForm(request.form)
if form.validate():
admin = Admin()
form.populate_obj(admin)
admin.pw_hash = bcrypt.generate_password_hash(form.password.data)
g.db.session.add(admin)
g.db.session.commit()
login_user(get_admin(g.db, admin.email))
flash('Welcome to xAPI Scavenger Hunt', 'success')
logger.info(
'Admin registration form was submitted successfully for %s',
admin.email)
return make_response(render_template(
'settings.html', form=SettingForm()))
logger.info(
'Admin registration form was submitted with'
' invalid information. Errors: %s', form.errors)
flash(
'There was an error creating your admin profile.'
' Please try again.', 'warning')
return render_template(
'homepage.html', form=form, display_login_link=True)
return login()
# settings page primarily for connecting to Wax LRS
@app.route('/settings', methods=['GET', 'POST'])
@login_required
def settings():
errors = None
admin_settings = get_settings(
g.db, admin_id=current_user.admin_id) or Setting()
form = SettingForm(request.form)
if request.method == 'POST':
if form.validate():
already_completed = finished_setting(admin_settings)
form.populate_obj(admin_settings)
admin_settings.admin_id = current_user.admin_id
g.db.session.add(admin_settings)
g.db.session.commit()
url = 'hunts' if already_completed else 'new_hunt'
flash('Settings have been updated successfully', 'success')
return make_response(redirect(url_for(url)))
else:
logger.info(
'%s attempted to submit settings information'
' resulting in errors: %s', current_user.email, form.errors)
return make_response(render_template(
'settings.html', login=admin_settings.login, form=form,
password=admin_settings.password, wax_site=admin_settings.wax_site
))
# list hunts
@app.route('/hunts', methods=['GET'])
@login_required
def hunts():
hunts = Hunt.list_for_admin_id(g.db, current_user.admin_id)
return render_template('hunts.html', hunts=hunts)
# form to create new hunt
@app.route('/new_hunt', methods=['GET', 'POST'])
@login_required
def new_hunt():
setting = get_settings(g.db, admin_id=current_user.admin_id)
if not finished_setting(setting):
flash('You must complete your settings information before'
' creating a hunt', 'warning')
return redirect(url_for('settings'))
hunt = Hunt()
form = HuntForm(request.form)
if request.method == 'POST':
if form.validate():
hunt = initialize_hunt(form, hunt, current_user.admin_id, request)
try:
g.db.session.add(hunt)
g.db.session.commit()
except IntegrityError as e:
logger.warning(
'Exception found while creating hunt with an existing '
' name: %s\n Form data: %s ', e, form.data)
return jsonify(
{'hunt name': [{'name': ['hunt name already taken']}]}), 400
else:
flash('New scavenger hunt added', 'success')
logger.info('hunt, %s, created for admin with id, %s',
hunt.name, hunt.admin_id)
saved_hunt = g.db.session.query(Hunt).order_by(
Hunt.hunt_id.desc()).first()
return jsonify({'hunt_id': saved_hunt.hunt_id})
else:
logger.warning('Error creating hunt.\nForm errors: %s\nForm data: '
'%s ', form.errors, form.data)
return jsonify(form.errors), 400
domain = current_user.email.split('@')[-1]
return make_response(
render_template('new_hunt.html', form=form, domain=domain))
# page to view hunt
@app.route('/hunts/<int:hunt_id>', methods=['GET'])
@login_required
def hunt(hunt_id):
hunt = Hunt.find_by_id(g.db, hunt_id)
if hunt:
registered = []
unregistered = []
for participant in hunt.participants:
if participant.registered:
registered.append(participant)
else:
unregistered.append(participant)
return render_template(
'show_hunt.html', hunt=hunt, registered_participants=registered,
unregistered_participants=unregistered)
logger.info('Someone attempted to visit a hunt with id, %s, but it '
'does not exist', hunt_id)
abort(404)
# check googlecharts infographics api in April 2015 when they may or may
# not change the qrcode api
def get_qr_codes_response(hunt_id, item_id, condition):
hunt = Hunt.find_by_id(g.db, hunt_id)
if hunt:
item_paths = [
{'name': item.name, 'path': item_path(hunt_id, item.item_id)}
for item in hunt.items if condition(item, item_id)
]
return make_response(render_template(
'qrcodes.html', item_paths=item_paths))
abort(404)
@app.route('/hunts/<int:hunt_id>/qrcodes')
@login_required
def show_item_codes(hunt_id):
return get_qr_codes_response(hunt_id, '', lambda x, y: True)
@app.route('/hunts/<int:hunt_id>/items/<int:item_id>/qrcode', methods=['GET'])
@login_required
def show_item_code(hunt_id, item_id):
return get_qr_codes_response(
hunt_id, item_id, lambda item, item_id: item.item_id == item_id)
@app.route('/hunts/<int:hunt_id>/delete')
@login_required
def delete_hunt(hunt_id):
hunt = Hunt.find_by_id(g.db, hunt_id)
if hunt and hunt.admin_id == current_user.admin_id:
logger.info(
'preparing to delete hunt with hunt_id, {}'.format(hunt_id))
g.db.session.delete(hunt)
g.db.session.commit()
flash('Successfully deleted hunt: {}'.format(hunt.name), 'success')
hunts = Hunt.list_for_admin_id(g.db, current_user.admin_id)
return make_response(render_template('hunts.html', hunts=hunts))
abort(404)
################ SCAVENGER HUNT PARTICIPANT ROUTES ####################
# maybe just get rid of this
# form for scavenger hunt participant to enter email and name
@app.route('/get_started/hunts/<int:hunt_id>', methods=['GET'])
def get_started(hunt_id):
# todo: track duration
hunt = Hunt.find_by_id(g.db, hunt_id)
logger.info("Rendering getting started form for hunt, '%s'.", hunt.name)
return render_template('get_started.html', form=ParticipantForm(),
hunt_id=hunt_id, hunt=hunt)
# validate and register participant before redirecting back to hunt
@app.route('/register_participant', methods=['POST'])
def register_participant():
hunt_id = request.args['hunt_id']
hunt = Hunt.find_by_id(g.db, hunt_id)
if hunt:
form = ParticipantForm(request.form)
if form.validate():
email = form.email.data
logger.info(
'Participant registration form validated for hunt, "%s", and'
' email, %s.\nPreparing to validate participant against hunt'
' participation rules.', hunt.name, email)
participant_valid, err_msg = validate_participant(
g.db, email, hunt_id, hunt.participant_rule)
if participant_valid:
logger.info('The registering participant, %s, has been'
' validated against the hunt participation rules.'
' Preparing to find email in participant database'
' table.', email)
if not get_participant(g.db, email, hunt_id):
logger.info(
'Preparing to save new participant with email, %s,'
' to hunt, %s', email, hunt.name)
create_new_participant(g.db, form, hunt_id)
scavenger_info = {'email': email, 'name': form.name.data}
session.update(scavenger_info)
admin_settings = get_settings(g.db, hunt_id=hunt_id)
logger.info(
"Retrieved settings associated with hunt with id, %s: %s",
hunt_id, admin_settings)
try:
lrs = WaxCommunicator(
admin_settings, request.host_url, hunt, None,
scavenger_info=scavenger_info)
except Exception as e:
logger.exception(
"Error instantiating WaxCommunicator while registering"
" participant: %s", e)
raise e
try:
lrs.send_began_hunt_statement()
except Exception as e:
logger.exception(
"Error sending began hunt statement: %s", e)
raise e
logger.info(
"name and email set to %s, and %s\n"
"preparing requested item information.",
session['name'], email)
redirect_url = get_intended_url(session, hunt_id)
return make_response(redirect(redirect_url))
else:
logger.info('participant attempted to register for'
' hunt with invalid form information.\n'
'Error message: %s\n. Form data: %s',
err_msg, request.form)
return err_msg
else:
# i don't think this can happen ever in the app
logger.warning('A user attempted to register for hunt with id, %s,'
' but the hunt could not be found. Form data: %s',
hunt_id, request.form)
abort(400)
# list of items for scavengers to scavenge
@app.route('/hunts/<int:hunt_id>/items', methods=['GET'])
def index_items(hunt_id):
hunt = Hunt.find_by_id(g.db, hunt_id)
if hunt:
email = session.get('email')
if email:
admin_settings = get_settings(g.db, hunt_id=hunt_id)
lrs = WaxCommunicator(
admin_settings, request.host_url, hunt, None,
{'email': email, 'name': session.get('name')})
state = lrs.get_state()
logger.info(
'preparing to render items from hunt_id, %s, for user, %s',
hunt_id, email)
return make_response(render_template(
'items.html', state=state, hunt=hunt,
num_remaining=num_items_remaining(state, hunt.items)))
session['intended_url'] = '/hunts/{}/items'.format(hunt_id)
return make_response(
render_template('welcome.html', hunt=hunt,
welcome=hunt.welcome_message,
action_url="/get_started/hunts/{}".format(
hunt_id)))
logger.info('Someone attempted to visit the items list for hunt with id, '
'%s, but this hunt does not exist', hunt_id)
abort(404)
# information about one item for scavenger to read
@app.route('/hunts/<int:hunt_id>/items/<int:item_id>', methods=['GET'])
def find_item(hunt_id, item_id):
logger.info(
'Participant is visiting route: /hunts/%s/items/%s', hunt_id, item_id)
admin_settings = get_settings(g.db, hunt_id=hunt_id)
# admin_settings found through hunt_id means hunt exists
logger.info("Settings retrieved for hunt with id, %s", hunt_id)
if finished_setting(admin_settings):
logger.info(
"Settings are complete. Preparing to retrieve item with id, %s",
item_id)
item = get_item(g.db, item_id, hunt_id)
if item:
logger.info(
"Item found. Preparing to retrieve hunt with id, %s ", hunt_id)
hunt = Hunt.find_by_id(g.db, hunt_id)
if participant_registered(g.db, session.get('email'), hunt_id):
logger.info(
"Participant, %s, has registered. Preparing to"
" retrieve data from the state api.", session.get('email'))
lrs = WaxCommunicator(
admin_settings, request.host_url, hunt, item,
scavenger_info={
'email': session.get('email'),
'name': session.get('name')
})
state = lrs.get_state()
found_again = str(item_id) in state
lrs.send_found_item_statement(found_again=found_again)
updated_state = {str(item.item_id): True}
hunt_previously_completed = state.get('hunt_completed')
# TODO: Don't send the whole state object, as discussed
state.update(updated_state)
if hunt_requirements_completed(state, hunt):
logger.info(
'Requirements for hunt, "%s", have been completed.',
hunt.name)
if not hunt_previously_completed:
lrs.send_completed_hunt_statement()
updated_state['hunt_completed'] = True
state.update(updated_state)
lrs.update_state_api_doc(updated_state)
found_ids = found_ids_list(state)
return make_response(render_template(
'items.html', item=item, hunt=hunt,
username=session.get('name'), found_ids=found_ids,
hunt_now_completed=state.get('hunt_completed'),
num_found=len(found_ids), num_items=len(hunt.items),
num_remaining=num_items_remaining(state, hunt.items),
found_again=found_again,
previously_completed=hunt_previously_completed))
else:
logger.info(
"Page visitor is not yet registered for this hunt."
" Preparing to redirect to the getting started page.")
session['intended_url'] = '/hunts/{}/items/{}'.format(
hunt_id, item_id)
return make_response(render_template(
'welcome.html', hunt=hunt, welcome=hunt.welcome_message,
action_url="/get_started/hunts/{}".format(hunt_id)))
abort(404)
@app.route('/oops')
def oops():
session.clear()
return make_response(render_template('goodbye.html'))
@app.route('/failblog')
def failblog():
try:
return doesnotexistsowillerror
except Exception as e:
logger.exception("Error for the failblog: %s", e)
raise e
|
# Undergraduate Student: Arturo Burgos
# Professor: João Rodrigo Andrade
# Federal University of Uberlândia - UFU, Fluid Mechanics Laboratory - MFLab, Block 5P, Uberlândia, MG, Brazil
# Fourth exercise: Solving a Linear System --> ax = b
# Here I first set conditions
import numpy as np
from numpy import linalg as La
import matplotlib.pyplot as plt
import time
np.seterr(divide='ignore', invalid='ignore')
print('\n')
n = 9
K = np.sqrt(n)
k = int(K)
#################
# aX = b
#################
# Here I set the Matrix
a = np.zeros((n,n))
for i in range(n):
for j in range(n):
if i == j:
a[i,j] = -4
elif i == j-k or i ==j+k:
a[i,j] = 1
elif ((i+1) % k != 0 and i == j-1) or ((i+1) % k != 1 and i == j+1): # (i+1) because in Python we start from 0
a[i,j] = 1
print('The coefficient Matrix is:')
print(a)
print('\n')
b = np.zeros(n)
for i in range(k):
if i < k-1: # (k-1) because in Python we start from 0
b[i] = -50
else:
b[i] = -150
for i in range(k,n-k):
if (i+1)%k != 0: # (i+1) because in Python we start from 0
b[i] = 0
else:
b[i] = -100
for i in range(n-k,n):
if i < n-1: # (k-1) because in Python we start from 0
b[i] = -50
else:
b[i] = -150
print('The result Matrix is:')
print(b)
print('\n')
def linearsystem(coeff,resul,size):
# Initial x_k and x_k1 value
x_k = np.zeros(size)
x_k1 = np.ones(size)
# Here I set the tolerance
tolerance = 1e-9
# Here I set the iterations
ite = 0
# Here I set the error based in the Infinite norm
erro = La.norm((x_k1 - x_k),np.inf)
#erro = (x_k1 - x_k)/x_k1
while (erro > tolerance): #
for i in range(0,size):
x_k1[i] = resul[i]
for j in range(0,size):
if j!=i:
x_k1[i] = x_k1[i] - coeff[i,j]*x_k[j]
x_k1[i] = x_k1[i]/ coeff[i,i]
#erro = (x_k1 - x_k)/x_k1
erro = La.norm((x_k1 - x_k),np.inf)
x_k = x_k1.copy()
#x_k[:] = x_k1[:] # -> the same as above
ite = ite + 1
print('The number of iterations is: ')
print(ite)
print('\n')
print('Note that now the error is not an array anymore, but is normalized :')
print(erro)
print('\n')
return x_k1
t_initial = time.time()
res = linearsystem(a,b,n)
t_final = time.time()
print('The solution is:')
print(res)
print('\n')
print("\n\n--- %s seconds ---\n" % (t_final - t_initial))
# PLOT OF THE MATRIX
def samplemat(dims,bb):
aa = np.zeros(dims)
#print(bb)
aa = np.reshape(bb,(dims))
return aa
# Display matrix
plt.matshow(samplemat((k, k),res))
plt.colorbar()
plt.show()
|
n1 = float(input("Quantos reais você tem? R$"))
print("R$",n1,"reais")
d = n1*5.15
print("{} reais dá para comprar {:.2f} dolares".format(n1, d))
|
# Generated by Django 2.2 on 2019-04-24 20:41
from django.db import migrations
def create_initial_products(apps, schema_editor):
Product = apps.get_model('catalog', 'Product')
Product(name='Salame', description='Salame Toscano', price=12).save()
Product(name='Olio Balsamico',
description='Olio balsamico di Modena', price=10).save()
Product(name='Parmigiano', description='Parmigiano Reggiano',
price=8.50).save()
Product(name='Olio', description='Olio Oliva Toscano', price=13).save()
Product(name='Porchetta',
description='Porchetta toscana cotta a legna', price=7.50).save()
Product(name='Cantucci', description='Cantucci di Prato', price=4).save()
Product(name='Vino Rosso',
description='Vino Rosso del Chianti', price=9.50).save()
Product(name='Brigidini', description='Brigidini di Lamporecchio',
price=3.50).save()
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.RunPython(create_initial_products),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-07 11:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('markets', '0019_auto_20160907_1129'),
]
operations = [
migrations.AlterField(
model_name='market',
name='deposit_amount',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='market',
name='membership_fees',
field=models.FloatField(blank=True, default=0, help_text='Converted to GBP', null=True,
verbose_name='Pricing/Fees - Membership fees'),
),
migrations.AlterField(
model_name='market',
name='web_traffic',
field=models.FloatField(blank=True, default=0, help_text='in millions', null=True,
verbose_name='Number of registered users'),
),
]
|
from discord import ButtonStyle, Interaction, SelectOption, ui
from utils import BlooContext
class Select(ui.Select):
def __init__(self, versions):
super().__init__(custom_id="Some identifier", placeholder="Select a version...", min_values=1, max_values=1,
options=[SelectOption(label=version) for version in versions])
self.value = None
async def callback(self, interaction: Interaction):
self.value = interaction.data
self.view.stop()
class FirmwareDropdown(ui.View):
def __init__(self, firmware_list):
super().__init__()
self.ctx = None
self.pagination_index = 0
self.max_index = len(
firmware_list) // 25 if len(firmware_list) % 25 == 0 else (len(firmware_list) // 25) + 1
self.firmware_list = firmware_list
self.current_dropdown = Select(firmware_list[:25])
async def start(self, ctx: BlooContext):
self.ctx = ctx
self.add_item(self.current_dropdown)
await ctx.respond_or_edit(content="Choose a firmware for your device", view=self, ephemeral=True)
await self.wait()
return self.current_dropdown.value.get('values')[0] if self.current_dropdown.value.get('values') else None
@ui.button(label='Older firmwares', style=ButtonStyle.secondary, row=1)
async def older(self, button: ui.Button, interaction: Interaction):
if interaction.user == self.ctx.author and self.pagination_index + 1 <= self.max_index:
self.pagination_index += 1
await self.refresh_current_dropdown(interaction)
@ui.button(label='Newer firmwares', style=ButtonStyle.secondary, disabled=True, row=1)
async def newer(self, button: ui.Button, interaction: Interaction):
if interaction.user == self.ctx.author and self.pagination_index > 0:
self.pagination_index -= 1
await self.refresh_current_dropdown(interaction)
async def refresh_current_dropdown(self, interaction):
self.remove_item(self.current_dropdown)
self.current_dropdown = Select(
self.firmware_list[self.pagination_index*25:(self.pagination_index+1)*25])
for child in self.children:
if child.label == "Older firmwares":
child.disabled = self.pagination_index + 1 == self.max_index
elif child.label == "Newer firmwares":
child.disabled = self.pagination_index == 0
self.add_item(self.current_dropdown)
await interaction.response.edit_message(content="Choose a firmware for your device", view=self)
class Confirm(ui.View):
def __init__(self, ctx: BlooContext, true_response, false_response):
super().__init__()
self.ctx = ctx
self.value = None
self.true_response = true_response
self.false_response = false_response
# When the confirm button is pressed, set the inner value to `True` and
# stop the View from listening to more input.
# We also send the user an ephemeral message that we're confirming their choice.
@ui.button(label='Yes', style=ButtonStyle.success)
async def confirm(self, button: ui.Button, interaction: Interaction):
if interaction.user == self.ctx.author:
self.value = True
self.stop()
# This one is similar to the confirmation button except sets the inner value to `False`
@ui.button(label='No', style=ButtonStyle.grey)
async def cancel(self, button: ui.Button, interaction: Interaction):
if interaction.user == self.ctx.author:
await self.ctx.send_warning(description=self.false_response)
self.value = False
self.stop()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 10 12:41:42 2019
@author: Chaobo
"""
import tensorflow as tf
import numpy as np
import os
import cv2
import yolo.config as cfg
import time
import pickle as cPickle
import skimage.draw
from yolo.yolo3_net_pos import YOLONet
from utils.voc_eval_mask import voc_eval
class MAP(object):
def __init__(self, test_path, evaluation=True):
self.num_class = len(cfg.CLASSES)
self.classid = [i for i in range(self.num_class)]
self.class_to_ind = dict(zip(cfg.CLASSES, range(self.num_class)))
self.test_path = test_path
self.imagesetfile = os.path.join(self.test_path, 'cache', 'test.txt')
if evaluation:
self.groundtruth = self.get_groundtruth()
def get_groundtruth(self):
cache_path = cache_path = os.path.join(self.test_path, 'cache')
test_labels_cache = os.path.join(cache_path, 'gt_labels_' + 'test' + '.pkl')
if os.path.isfile(test_labels_cache):
print('Loading testing labels from: ' + test_labels_cache)
with open(test_labels_cache, 'rb') as f:
recs = cPickle.load(f)
print('Number of testing data: ' +str(len(recs[0])))
return recs
ground_truth_cache = os.path.join(cache_path, 'ground_truth_cache.pkl')
print('Processing testing labels from: ' + ground_truth_cache)
with open(ground_truth_cache, 'rb') as f:
annotations = cPickle.load(f)
# annotations. Skip unannotated images.
annotations = [a for a in annotations if a['regions']]
with open(self.imagesetfile, 'r') as f:
test_index = [x.strip() for x in f.readlines()]
assert len(test_index)==len(annotations)
recs_mask = {}
recs_mergemask = {}
recs_size = {}
for i, index in enumerate(test_index):
a = annotations[i]
filename = os.path.splitext(a['filename'])[0]
assert filename == index
polygons = [r['shape_attributes'] for r in a['regions'].values()]
class_names = [r['region_attributes'] for r in a['regions'].values()]
# return a list[{'imageid':filename, 'classid':classid, 'difficult':int(0), 'mask':bool[image_h, image_w]]}]
image_h, image_w = a['size']
mask_label, merged_mask = self.load_masklabel(filename, image_h, image_w, polygons, class_names)
recs_mask[index] = mask_label
recs_mergemask[index] = merged_mask
recs_size[index] = [image_h, image_w]
recs = [recs_mask, recs_mergemask, recs_size, test_index]
print('Saving testing labels to: ' + test_labels_cache)
with open(test_labels_cache, 'wb') as f:
cPickle.dump(recs, f)
print('Number of testing data: ' +str(len(recs_mask)))
return recs
def load_masklabel(self, imname, image_h, image_w, polygons, class_names):
mask = np.zeros([len(polygons), image_h, image_w], dtype=np.bool)
merged_annotatemask = np.zeros((image_h, image_w), dtype=np.uint8)
for i, each_instance in enumerate(polygons):
each_mask = np.zeros([image_h, image_w], dtype=np.bool)
for each_poly in each_instance:
subtype = each_poly['type']
x_points = each_poly['all_points_x']
y_points = each_poly['all_points_y']
rr, cc = skimage.draw.polygon(y_points, x_points)
if subtype == 'out':
each_mask[rr, cc] = True
each_mask[np.array(y_points), np.array(x_points)] = True
else:
each_mask[rr, cc] = False
each_mask[np.array(y_points), np.array(x_points)] = True
mask[i, :,:] = each_mask
# generate merged mask for computing mIoU
if class_names[i] == 'crack':
merged_annotatemask[mask[i,...]==True] = 1
elif class_names[i] == 'spall':
merged_annotatemask[mask[i,...]==True] = 2
elif class_names[i] == 'rebar':
merged_annotatemask[mask[i,...]==True] = 3
# generate masklabel for computing mask-level mAP
mask_index = np.where(np.any(mask, axis= (1,2)))[0]
assert len(mask_index)==len(class_names)
masklabel = []
for index in mask_index:
eachclass = class_names[index]
classid = self.class_to_ind[eachclass]
eachmask = mask[index,...]
masklabel.append({'imageid': imname, 'classid': classid, 'difficult': int(0), 'mask': eachmask})
return masklabel, merged_annotatemask
def correct_yolo_boxes(self, x1, y1, x2, y2, image_h, image_w, net_h, net_w):
if (float(net_w)/image_w) < (float(net_h)/image_h):
new_w = net_w
new_h = (image_h*net_w)//image_w
else:
new_h = net_h
new_w = (image_w*net_h)//image_h
x_offset, x_scale = float((net_w - new_w)//2)/net_w, float(new_w)/net_w
y_offset, y_scale = float((net_h - new_h)//2)/net_h, float(new_h)/net_h
x1 = max(min(np.around((x1 - x_offset) / x_scale * image_w).astype(np.int32), image_w), 0)
x2 = max(min(np.around((x2 - x_offset) / x_scale * image_w).astype(np.int32), image_w), 0)
y1 = max(min(np.around((y1 - y_offset) / y_scale * image_h).astype(np.int32), image_h), 0)
y2 = max(min(np.around((y2 - y_offset) / y_scale * image_h).astype(np.int32), image_h), 0)
return x1, y1, x2, y2
# def sigmoid(self, x):
# return 1. / (1. + np.exp(-x))
def sigmoid(self, x):
a = -1. * x
a = np.clip(a, -50., 50.)
a = 1. / (1. + np.exp(a))
return a
def image_read(image_rgb, image_size):
window = np.array([0., 0., 1., 1.], dtype=np.float32)
imgh, imgw, _ = image_rgb.shape
if (float(image_size)/imgw) < (float(image_size)/imgh):
imgh = (imgh * image_size)//imgw
imgw = image_size
else:
imgw = (imgw * image_size)//imgh
imgh = image_size
image = image_rgb.astype(np.float32)
image = cv2.resize(image, (imgw, imgh),interpolation = cv2.INTER_LINEAR)
# prepare the window for clip_boxes in testing mode
top = (image_size - imgh)//2
left = (image_size - imgw)//2
window[0] = top / image_size
window[1] = left / image_size
window[2] = (imgh + top) / image_size
window[3] = (imgw + left) / image_size
# embed the image into standard letter box
new_image = np.ones((image_size, image_size, 3)) * 127.
new_image[(image_size - imgh)//2:(image_size + imgh)//2,
(image_size - imgw)//2:(image_size + imgw)//2, :]= image
new_image = new_image / 255.0
return new_image, window
''' Computing mask-level mAP and mIoU '''
def evaluate(weights_file, test_path, net, eval_map):
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, weights_file)
txtname = os.path.join(test_path, 'cache', 'test.txt')
with open(txtname, 'r') as f:
image_index = [x.strip() for x in f.readlines()]
val_mask = eval_map.groundtruth[0]
val_mergemask = eval_map.groundtruth[1]
val_index = eval_map.groundtruth[3]
t_prediction = 0
t_crop_assemble = 0
det_masks = {}
detfile = {}
cracklist = []
spalllist = []
rebarlist = []
for i, index in enumerate(image_index):
print(index)
assert index==val_index[i]
imname = os.path.join(test_path, 'images', index + '.jpg')
image_rgb = cv2.cvtColor(cv2.imread(imname), cv2.COLOR_BGR2RGB)
image_h, image_w, _ = image_rgb.shape
input_image, input_window = image_read(image_rgb, cfg.TEST_SIZE)
image_array = np.expand_dims(input_image, 0)
window_array = np.expand_dims(input_window, 0)
feed_val = {net.is_training: False, net.det_thresh: [np.float32(cfg.OBJ_THRESHOLD)],
net.clip_window: window_array, net.images: image_array}
t = time.time()
det_box, det_mask = sess.run(net.evaluation, feed_dict=feed_val)
t_prediction += (time.time() - t)
if np.sum(det_mask[0]) == 0.0:
merged_detectmask = np.zeros((image_h, image_w), dtype=np.uint8)
det_masks[index] = merged_detectmask
continue
proposals = det_box[0][:, :4]
classids = (det_box[0][:, 4]).astype(int)
class_confs = det_box[0][:, 5]
mask_out = det_mask[0]
merged_detectmask = np.zeros((image_h, image_w), dtype=np.uint8)
# correct the boxes and masks into original image size
for k in range(len(classids)):
classid = classids[k]
score = class_confs[k]
pred_mask = mask_out[k]
# correct boxes
y1_norm, x1_norm, y2_norm, x2_norm = proposals[k,:]
x1, y1, x2, y2 = eval_map.correct_yolo_boxes(x1_norm, y1_norm, x2_norm, y2_norm, image_h, image_w, cfg.TEST_SIZE, cfg.TEST_SIZE)
if (y2-y1)*(x2-x1) <= 0:
continue
# correct masks
t = time.time()
size = pred_mask.shape[0]
y1_norm = np.around(y1_norm * size).astype(np.int32)
x1_norm = np.around(x1_norm * size).astype(np.int32)
y2_norm = np.around(y2_norm * size).astype(np.int32)
x2_norm = np.around(x2_norm * size).astype(np.int32)
crop_mask = pred_mask[y1_norm:y2_norm, x1_norm:x2_norm]
mask = cv2.resize(crop_mask, (x2 - x1, y2 - y1), interpolation = cv2.INTER_LINEAR)
mask = np.where(mask > 0.5, 1, 0).astype(np.bool)
full_mask = np.zeros([image_h, image_w], dtype=np.bool)
full_mask[y1:y2, x1:x2] = mask
t_crop_assemble += (time.time() - t)
if classid==0:
cracklist.append({'imageid': index, 'score': score, 'mask': full_mask})
merged_detectmask[full_mask==True] = 1
elif classid==1:
spalllist.append({'imageid': index, 'score': score, 'mask': full_mask})
merged_detectmask[full_mask==True] = 2
elif classid==2:
rebarlist.append({'imageid': index, 'score': score, 'mask': full_mask})
merged_detectmask[full_mask==True] = 3
det_masks[index] = merged_detectmask
detfile['0']=cracklist
detfile['1']=spalllist
detfile['2']=rebarlist
# compute mask-level AP and mAP
thresh = 0.5
thresh_out = []
res = []
pres = []
aps = []
for i, clsid in enumerate(eval_map.classid):
if not detfile[str(clsid)]:
recall = 0.
precision = 0.
ap = 0.
res += [recall]
pres += [precision]
aps += [ap]
continue
recall, precision, ap = voc_eval(detfile[str(clsid)], val_mask, txtname,
clsid, ovthresh= thresh, use_07_metric = False)
res += [recall]
pres += [precision]
aps += [ap]
mean_rec = np.mean(res)
mean_prec = np.mean(pres)
mean_ap = np.mean(aps)
thresh_out.append({'thresh': thresh, 'AP': aps, 'mAP': [mean_rec, mean_prec, mean_ap]})
t_prediction = t_prediction + t_crop_assemble
print("Prediction time: {}. Average {}/image".format(t_prediction, t_prediction / len(image_index)))
# compute semantic segmentation accuracy mIoU
p_bg = [0, 0, 0, 0]
p_crack = [0, 0, 0, 0]
p_spall = [0, 0, 0, 0]
p_rebar = [0, 0, 0, 0]
num_all_true_pixels = 0
for index in val_index:
true_mask = val_mergemask[index]
pred_mask = det_masks[index]
assert true_mask.shape == pred_mask.shape
num_all_true_pixels = num_all_true_pixels + int(true_mask.shape[0] * true_mask.shape[1])
# prediction = background(bg)
p_bg[0] = p_bg[0] + np.sum((true_mask==0) * (pred_mask==0))
p_crack[0] = p_crack[0] + np.sum((true_mask==1) * (pred_mask==0))
p_spall[0] = p_spall[0] + np.sum((true_mask==2) * (pred_mask==0))
p_rebar[0] = p_rebar[0] + np.sum((true_mask==3) * (pred_mask==0))
# prediction = crack
p_bg[1] = p_bg[1] + np.sum((true_mask==0) * (pred_mask==1))
p_crack[1] = p_crack[1] + np.sum((true_mask==1) * (pred_mask==1))
p_spall[1] = p_spall[1] + np.sum((true_mask==2) * (pred_mask==1))
p_rebar[1] = p_rebar[1] + np.sum((true_mask==3) * (pred_mask==1))
# prediction = spall
p_bg[2] = p_bg[2] + np.sum((true_mask==0) * (pred_mask==2))
p_crack[2] = p_crack[2] + np.sum((true_mask==1) * (pred_mask==2))
p_spall[2] = p_spall[2] + np.sum((true_mask==2) * (pred_mask==2))
p_rebar[2] = p_rebar[2] + np.sum((true_mask==3) * (pred_mask==2))
# prediction = rebar
p_bg[3] = p_bg[3] + np.sum((true_mask==0) * (pred_mask==3))
p_crack[3] = p_crack[3] + np.sum((true_mask==1) * (pred_mask==3))
p_spall[3] = p_spall[3] + np.sum((true_mask==2) * (pred_mask==3))
p_rebar[3] = p_rebar[3] + np.sum((true_mask==3) * (pred_mask==3))
bg_iou = p_bg[0] / (np.sum(p_bg) + p_bg[0] + p_crack[0] + p_spall[0] + p_rebar[0] - p_bg[0])
crack_iou = p_crack[1] / (np.sum(p_crack) + p_bg[1] + p_crack[1] + p_spall[1] + p_rebar[1] - p_crack[1])
spall_iou = p_spall[2] / (np.sum(p_spall) + p_bg[2] + p_crack[2] + p_spall[2] + p_rebar[2] - p_spall[2])
rebar_iou = p_rebar[3] / (np.sum(p_rebar) + p_bg[3] + p_crack[3] + p_spall[3] + p_rebar[3] - p_rebar[3])
miou = np.mean([bg_iou, crack_iou, spall_iou, rebar_iou])
mask_acc = [bg_iou, crack_iou, spall_iou, rebar_iou, miou]
return thresh_out, mask_acc
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU
cfg.BATCH_SIZE = 1
yolo = YOLONet(False)
test_path = os.path.join(cfg.DATASET, "test")
test_weight = os.path.join(cfg.OUTPUT_DIR, "TRAINED MODEL")
eval_map = MAP(test_path, evaluation=True)
thresh_out, mask_acc = evaluate(test_weight, test_path, yolo, eval_map)
print('AP of each class: ' + ' crack ' + str(format(thresh_out[0]['AP'][0], '.3f'))
+ ' spall ' + str(format(thresh_out[0]['AP'][1], '.3f'))
+ ' rebar ' + str(format(thresh_out[0]['AP'][2], '.3f')))
print('mAP: ' + ' recall ' + str(format(thresh_out[0]['mAP'][0], '.3f'))
+ ' precision ' + str(format(thresh_out[0]['mAP'][1], '.3f'))
+ ' mAP ' + str(format(thresh_out[0]['mAP'][2], '.3f')))
|
# *args and **kwargs tutorial
# *vars and **kvars tutorial
def function_1(name, age, rollno):
print("This name of the student is ", name,
"and age is ", age, "and rollno is ", rollno)
function_1("Mahi", 22, 4532)
def argsfunction(*args):
if (len(args)==2):
print("This name of the student is ", args[0],
"and age is ", args[1])
else:
print("This name of the student is ", args[0],"and age is ", args[1], "and rollno is ", args[2])
argsfunction("Shivam", 22, 5432)
parchi = ["Harry", 22]
argsfunction(*parchi)
# KWARGS
# It is like a dictionary in which we can fill any amount of key value pair
def printmarks(**kwargs):
print(type(kwargs))
for name, their_marks in kwargs.items():
print(name,their_marks)
marklist={"Mahi": 23, "Shicam ": 23,"sfsfa":22, "jdsajf": 22, "dsgaswe": 23, "Arohi": 25}
printmarks(**marklist)
|
class AsupContentType(basestring):
"""
Type of AutoSupport content
Possible values:
<ul>
<li> "basic" - ASUP will contain minimal set of
data for this subsystem,
<li> "troubleshooting" - ASUP will contain detailed
collection of data for this subsystem
</ul>
"""
@staticmethod
def get_api_name():
return "asup-content-type"
|
from cryptography import fernet
from cryptography.fernet import Fernet
message = input("Enter text to be encrpyted: ")
key = Fernet.generate_key()
fernet = Fernet(key)
enrpt_message = fernet.encrypt(message.encode())
print(enrpt_message)
decrpt_message = fernet.decrypt(enrpt_message).decode()
print(decrpt_message)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2016-11-29 10:11:27
# @Author : Arms (526923945@qq.com)
# @Link : https://armszhou.github.io
# @Version : $Id$
import os
# 标识符与关键字
# 引导字符 + 后续字符
# 引导字符:字母、下划线(_)或大多数非英文语言字母
# 后续字符:任意非空字符
# 标识符是大小写敏感的
# 整型
# 二进制 0b 或者 0B
# 八进制 0o 或者 0O
# 十六进制 0x 或者 0X
print(bin(1980))
# 0b11110111100
print(oct(1980))
# 0o3674
print(hex(1980))
# 0x7bc
print(int('7bc', 16)) # int(s, base) 将字符串 s 转换为整数,base 表示对应的进制
# 1980
print(int('3674', 8))
# 1980
print(int('11110111100', 2))
# 1980
# 使用/对两个整数进行整除得到的是一个浮点数,如果需要整数除法,可以使用//。
# 布尔类型
# True
# False
# 浮点型
# 浮点型的相等性比较是不可靠的
# float
# complex
# decimal.Decimal 计算时运行速度较慢,但精度高于以上两种,适用于财政计算
print(float())
# 0.0
# 如何进行浮点数相等比较
import sys
def equal_float(a, b):
return abs(a - b) <= sys.float_info.epsilon
# 16进制转换
s = 14.25.hex()
print(s) # 0x1.c800000000000p+3
f = float.fromhex(s)
print(f) # 14.25
t = f.hex()
print(t) # 0x1.c800000000000p+3
# 指数使用 p 表示,因为16进制中 e 表示一个有效的16进制数字
# 复数
# 存放着一堆浮点数,一个表示实数部分,一个表示虚数部分。
# 复数的两个部分都以属性名的形式存在,分别为 real 和 imag。
# 数学家使用字母 i 表示根号-1,但 Python 遵循工程传统,使用 j 表示。
z = -89.5 + 2.125j
print(z.real, z.imag)
# 精准的十进制数字
# 使用 Decimals
import decimal
a = decimal.Decimal(9876)
b = decimal.Decimal('50000.0123456789876543210')
print(a + b) # 59876.0123456789876543210
# decimal.Decimal() 函数接受一个整数或者字符串作为参数,不接受浮点数,因为浮点数是不准确的。
# 可以使用 decimal.Decimal 的 from_float 函数将 float 转换为最接近的 decimal 类型。
print(decimal.Decimal.from_float(0.12345))
# 0.123450000000000004174438572590588591992855072021484375
# 字符串
print(str())
# ' '
a = 'a'
# " "
b = "b"
# """ """ 三引号直接使用换行而无需使用 \n 转义
c = """abcdefg
higklmn
opqrst
uvwxyz
"""
print(c)
# r"str" 引导的字符串无需转义
import re
phoneRegex = re.compile(r"^((?:[0\d+[]])?\s*\d+(?:.-\d+)?)$")
# 如果要写一个长字符串,跨越了两行但是不使用三引号则可以
t = "abcedfghijklmnopqrst" + \
"uvwxyz"
# abcedfghijklmnopqrstuvwxyz
s = ("abcedfghijklmnopqrst"
"uvwxyz")
# abcedfghijklmnopqrstuvwxyz
# ord() 获取某个字符的整数值
print(ord('A'))
# 65
# 分片与步距
# []
# seq[start]
# seq[start:end] 包含 satrt 不包含 end
# seq[start:end:step] 包含 satrt 不包含 end
# seq 可以是任意的序列,如列表、字符串或元组。
s = 'abcdef'
print(s[0])
print(s[-1])
print(s[0:5])
print(s[:5]) #忽略时 start 为0
print(s[0:]) #忽略时 end 为 len(s)
print(s[:]) #都忽略时 等价于 s[0:len(s)]
print(s[0:len(s)])
# a
# f
# abcde
# abcde
# abcdef
# abcdef
# abcdef
# 使用 step 时,step 不能为0
# 忽略 start 则 start 为0,除非 step 是负值。同理忽略 end 则 end 为 len(s),除非 step 是负值。 step == -1 则相当于翻转序列。
s = 'he ate camel food'
print(s[::-1])
print(s[::-2])
# doof lemac eta eh
# do ea t h
# 常用方法
# join
t = ['a', 'b', 'c']
print(' '.join(t))
print('-'.join(t))
# a b c
# a-b-c
# reversed
s = 'abc'
print("".join(reversed(s)))
print(s[::-1])
# cba
# cba
# * 操作符提供了赋值功能
s = '=' * 5
print(s)
# =====
s *= 10
print(s)
# ==================================================
s = 'it is a <tag> in this string'
# find(t, start , end) 返回 t 在 s 中最左位置,如果没有返回 -1
print(s.find('tag'))
print(s.find('notfound'))
# 9
# -1
# index(t, start , end) 返回 t 在 s 中最左位置,如果产生 valueError 异常
print(s.index('tag'))
# print(s.index('notfound'))
# 9
# Traceback (most recent call last):
# File "test_2.py", line 170, in <module>
# print(s.index('notfound'))
# ValueError: substring not found
# 想想为什么 index() 会比 find() 好?
# count(t, start , end) 返回字符串 s 中子字符串 t 出现的次数
print(s.count('is', 2, len(s)))
# 2
# 等价于
print(s[2:].count('is'))
# 2
# partition(t) 返回包含3个字符串的元组 (字符串 s 在 t 左边的部分, t, 字符串 s 在 t 右边的部分),如果没有则返回 (s,"","")
s = 'ab/cd'
print(s.partition('/'))
print(s.partition('-'))
# ('ab', '/', 'cd')
# ('ab/cd', '', '')
# split(t, n) 使用 t 在字符串 s 中进行分割 n 次,并返回一个列表。如果 t 不指定则在空白处分割,如果 n 不指定则尽可能分割多次。
s = '1980-11-28'
print(s.split('-'))
print(s.split())
s = '1980 11 28'
print(s.split('-'))
print(s.split())
# ['1980', '11', '28']
# ['1980-11-28']
# ['1980 11 28']
# ['1980', '11', '28']
# 使用 str.format() 方法进行字符串格式化
print('The novel {0} was published in {1}'.format('Hard Times', 1970))
# The novel Hard Times was published in 1970
# 如果需要在格式化字符串中包含花括号,则需要将其复写
print('{{{0}}} {1}'.format("i'm in braces", "i'm not"))
# {i'm in braces} i'm not
# 使用字段名来格式化参数
print('{who} turned {age} this year'.format(who='She', age=88))
# She turned 88 this year
print('The {who} wan {0} last week'.format(12, who='boy'))
# The boy wan 12 last week
# 可以看到关键字参数总是在位置参数后
# 字段名可以引用集合数据类型
stock = ['paper', 'envelopes', 'notepads', 'pens', 'paper clips']
print('We have {0[1]} and {0[2]} in stock'.format(stock))
# We have envelopes and notepads in stock
# 这里的0引用的是位置参数,如果有多个位置参数则依次排列
stock0 = ['paper', 'envelopes', 'notepads']
stock1 = ['pens', 'paper clips']
print('We have {0[0]} and {1[0]} in stock'.format(stock0, stock1))
# We have paper and pens in stock
|
import argparse
import numpy as np
import pickle
from gen_mem import gen_mem
# train code from http://iamtrask.github.io/2015/07/12/basic-python-network/
# sigmoid function
def sigmoid(x,deriv=False):
if(deriv==True):
return x*(1-x)
return 1/(1+np.exp(-x))
def relu(X):
map_func = np.vectorize(lambda x: max(x, 0))
return map_func(X)
def train(path, lpath):
X = np.load(path)
y = np.load(lpath)
print 'X: {}'.format(X)
print 'y: {}'.format(y)
# seed random numbers to make calculation
# deterministic (just a good practice)
np.random.seed(1)
# initialize weights randomly with mean 0
syn0 = np.random.random((X.shape[-1], y.shape[-1]))
nonlin = sigmoid
for iter in xrange(10000):
# forward propagation
l0 = X
l1 = nonlin(np.dot(l0,syn0))
# how much did we miss?
l1_error = y - l1
# multiply how much we missed by the
# slope of the sigmoid at the values in l1
l1_delta = l1_error * nonlin(l1)
# update weights
syn0 += np.dot(l0.T,l1_delta)
print 'syn0: {}'.format(syn0)
print 'l1: {}'.format(l1)
with open('simple_nn_gt', 'w') as f:
pickle.dump((l1, syn0), f)
f.close()
syn0 = float2byte(syn0)
gen_mem('simple_nn_weight_dram', syn0)
args = None
def float2byte(mat):
pos_mat = np.vectorize(lambda x: np.abs(x))(mat)
max_w = np.amax(pos_mat)
mat = np.vectorize(lambda x: (127 * x/max_w).astype(np.int8))(mat)
return mat.reshape(1, 8, 8)
def parse_args():
global args
parser = argparse.ArgumentParser()
parser.add_argument('--path', action='store',
help='path to dataset file.')
parser.add_argument('--label', action='store',
help='path to the label file.')
parser.add_argument('--debug', action='store_true',
help='switch debug prints.')
args = parser.parse_args()
if __name__ == '__main__':
parse_args()
train(args.path, args.label)
|
import argparse
import pathlib
import json
def parse_args():
"""Argument parsing function
:return: namespace containing user provided arguments
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser(
description="Setup a directory from a template."
)
parser.add_argument(dest="template", default=None,
help="Path to json file that contains template.")
parser.add_argument(dest="target", default=None,
help="Path to the target directory. If it does not exist, it will be created. \
If it is not empty and the -F flag is not provided, the program will stop.")
parser.add_argument("-F", "--force", dest="force", action="store_true",
help="Flag that forces the directory to be setup, regardless of whether the directory is empty or not.\
Also, if this flag is provided, non-existing parents of the target dir will be created. Use wisely.")
args = parser.parse_args()
args.template = pathlib.Path(args.template)
args.target = pathlib.Path(args.target)
return args
def read_template(args):
"""Reads JSON template into a python dictionary
:param args: Namespace of user provided arguments
:type args: argparse.Namespace
:return: Template dictionary from json file
:rtype: dict
"""
with open(args.template, "r") as f:
template = json.load(f)
return template
def check_target_dir(args):
"""Checks the target directory to see if it exists, is it a directory or a file, and considers the force flag.
Will raise FileExistsErrors if the directory exits, is not empty, and the force flag is not provided or if the target exists and is a file.
:param args: Namespace of user provided arguments
:type args: argparse.Namespace
:raises FileExistsError: If the target directory is not empty and the force flag if not provided.
:raises FileExistsError: If the target directory exists as a file
:return: True if the program should proceed
:rtype: bool
"""
target = args.target
if target.exists():
if target.is_dir():
if len(list(target.iterdir())) == 0 or args.force:
return True
else:
raise FileExistsError(
f"Target directory provided ['{target}'] is not empty.")
else:
raise FileExistsError(f"Target directory provided ['{target}'] is a file.")
else:
target.mkdir(parents=args.force)
return True
def setup_dir(args):
"""Creates the directory structure and basic files.
:param args: Namespace of user provided arguments.
:type args: argparse.Namespace
"""
def make_dir(key, value):
if isinstance(value, dict):
for key1, value1 in value.items():
make_dir(key/key1, value1)
else:
key.mkdir(parents=True)
for file in value:
(key/file).touch()
for key, value in args.template.items():
if key == ".":
for file in value:
(args.target/file).touch()
else:
cur_dir = (args.target / key)
make_dir(cur_dir, value)
if __name__ == "__main__":
from IPython import embed as II
args = parse_args()
args.template = read_template(args)
check_target_dir(args)
setup_dir(args)
|
from dataclasses import dataclass
import glob
import os
import re
from pele_platform.Utilities.Parameters import parameters
from pele_platform.Utilities.Helpers import helpers
from pele_platform.Adaptive import simulation
from pele_platform.analysis import analysis
from frag_pele.Covalent import pdb_corrector
@dataclass
class CovalentDocking:
env: parameters.Parameters
original_dir: str = os.path.abspath(os.getcwd())
refinement_dir: str = None
working_folder: str = None
job1: parameters.Parameters = None
job2: parameters.Parameters = None
def run(self):
"""
Runs the whole covalent docking pipeline.
Returns
-------
A tuple of EnviroBuilder objects with job variables for both simulations.
"""
self.env.residue_type = self.get_residue_type()
self.correct_system()
self.set_general_perturbation_params()
self.job1 = simulation.run_adaptive(self.env)
if not self.env.debug:
self.choose_refinement_input()
self.set_refinement_perturbation_params()
self.job2 = simulation.run_adaptive(self.env)
else:
self.job2 = None
return self.job1, self.job2
def set_general_perturbation_params(self):
"""
Sets parameters for the initial side chain perturbation, making sure we set the correct working folder and
ignore refinement distance for now.
"""
self.env.perturbation = False
self.env._refinement_angle = self.env.refinement_angle
self.env.refinement_angle = None
self.set_top_level_directory()
self.env.folder = os.path.join(self.working_folder, "1_covalent_docking")
if isinstance(self.env.skip_ligand_prep, list):
self.env.skip_ligand_prep.append(self.env.residue)
else:
self.env.skip_ligand_prep = [self.env.residue]
def correct_system(self):
"""
Moves the covalent ligand to the other residues, replaces HETATM with ATOM, then assigns the resulting PDB
as the new system. Then adds original CONECT lines back to the extracted LIG.pdb.
"""
corrected_system = os.path.join(
self.original_dir,
os.path.basename(self.env.system.replace(".pdb", "_corrected.pdb")),
)
chain, residue_number = self.env.covalent_residue.split(":")
pdb_corrector.run(
self.env.system,
chain,
int(residue_number),
corrected_system,
ligand_resname=self.env.residue,
ligand_chain=self.env.chain,
)
self.retrieve_ligand_conects()
self.env.system = corrected_system
def set_top_level_directory(self):
"""
Sets top level working folder to contain all simulation steps.
"""
working_folder = os.path.abspath("{}_Pele".format(self.env.residue))
if not self.env.folder:
self.working_folder = (
helpers.get_next_peledir(working_folder)
if not self.env.adaptive_restart
else helpers.get_next_peledir(working_folder)
)
else:
self.working_folder = os.path.abspath(self.env.folder)
def choose_refinement_input(self):
"""
Extracts 1000 lowest binding energy structures and clusters them based on heavy atom ligand coordinates using
Gaussian Mixture Model. A lowest energy representative from each cluster is selected as input for the refinement
simulation.
"""
self.refinement_dir = os.path.join(self.working_folder, "refinement_input")
n_inputs = int(self.job1.cpus / 6)
max_top_clusters = n_inputs if n_inputs > 1 else 1 # tests only have 5 CPUs
output_path = os.path.join(self.job1.pele_dir, self.job1.output)
analysis_object = analysis.Analysis(
simulation_output=output_path,
resname=self.job1.residue,
chain=self.job1.chain,
traj=self.job1.traj_name,
topology=self.job1.topology,
cpus=1,
skip_initial_structures=False,
)
analysis_object.generate_clusters(
self.refinement_dir,
clustering_type="meanshift",
representatives_criterion="local_nonbonding_energy",
max_top_clusters=max_top_clusters,
)
def set_refinement_perturbation_params(self):
"""
Sets parameters for the refinement side chain perturbation, including the refinement distance (default = 10 A).
"""
self.env.refinement_angle = self.env._refinement_angle
self.env.folder = os.path.join(self.working_folder, "2_refinement")
self.env.system = os.path.join(self.refinement_dir, "cluster*.pdb")
self.env.no_ppp = True
self.env.covalent_docking_refinement = True
self.recover_templates_from_job1()
def recover_templates_from_job1(self):
"""
Sets templates created in the first part of the simulation as external templates and ensures
the parametrization of those ligands is skipped during refinement.
"""
templates = glob.glob(
os.path.join(self.job1.pele_dir, "DataLocal/Templates/OPLS2005/Protein/*")
) + glob.glob(
os.path.join(
self.job1.pele_dir, "DataLocal/Templates/OPLS2005/HeteroAtoms/*"
)
)
self.env.template = [
template
for template in templates
if os.path.basename(template) != "templates_generated"
]
self.env.rotamers = glob.glob(
os.path.join(self.job1.pele_dir, "DataLocal/LigandRotamerLibs/*")
)
for template in self.env.rotamers:
self.env.skip_ligand_prep.append(
os.path.basename(template.strip(".rot.assign"))
)
self.env.skip_ligand_prep = list(set(self.env.skip_ligand_prep))
def get_residue_type(self):
"""
Extracts name of the residue the covalent ligand is bound to before correcting the system.
"""
chain, residue_number = self.env.covalent_residue.split(":")
residue_type = helpers.get_residue_name(self.env.system, chain, residue_number)
return residue_type
def retrieve_ligand_conects(self):
"""
Maps atom numbers from the original system PDB and adds modified CONECT lines to the extracted ligand PDB.
"""
original_atom_numbers = list()
original_conects = list()
covalent_chain, covalent_resnum = self.env.covalent_residue.split(":")
extracted_ligand = os.path.join(os.getcwd(), f"{self.env.residue}.pdb")
# Load original PDB
with open(self.env.system, "r") as system_file:
system_lines = [
line
for line in system_file.readlines()
if line.startswith("ATOM")
or line.startswith("HETATM")
or line.startswith("CONECT")
]
# Find ligand covalent residue lines in the original PDB and extract atom numbers
for line in system_lines:
if line[17:20].strip() == self.env.residue:
original_atom_numbers.append(line[7:11].strip())
if line[22:26].strip() == covalent_resnum and line[21] == covalent_chain:
original_atom_numbers.append(line[7:11].strip())
# Extract CONECT lines containing relevant atom numbers (residue and ligand)
for line in system_lines:
if line.startswith("CONECT") and any(
number in line for number in original_atom_numbers
):
original_conects.append(line)
original_conects = "".join(original_conects)
# Extract new ligand atom numbers
with open(extracted_ligand, "r") as ligand_file:
ligand_lines = ligand_file.readlines()
new_atom_numbers = [line[7:11].strip() for line in ligand_lines]
# Schrodinger needs 4 spaces, otherwise it makes a mess
new_atom_numbers = [f" {number}" for number in new_atom_numbers]
# Go over CONECT lines and update atom numbers
for old, new in zip(original_atom_numbers, new_atom_numbers):
original_conects = re.sub(rf"\b{old}\b", new, original_conects)
# Append mapped CONECT lines to the extracted LIG.pdb
with open(extracted_ligand, "a") as new_ligand:
new_ligand.write(original_conects)
|
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import collections
import os
import platform
import sys # noqa
from concurrent.futures import ProcessPoolExecutor
from decimal import Decimal
from textwrap import dedent
import pytest
from path import Path
from pytablewriter import dumps_tabledata
from tabledata import TableData
from typepy import Integer, RealNumber, String
import pytablereader as ptr
from pytablereader import InvalidTableNameError
from pytablereader.interface import AbstractTableReader
from ._common import TYPE_HINT_RULES, fifo_writer
Data = collections.namedtuple("Data", "value expected")
test_data_00 = Data(
"\n".join(
[
'"attr_a","attr_b","attr_c"',
'1,4,"a"',
'2,2.1,"bb"',
'3,120.9,"ccc"',
]
),
[
TableData(
"tmp",
["attr_a", "attr_b", "attr_c"],
[
[1, 4, "a"],
[2, Decimal("2.1"), "bb"],
[3, Decimal("120.9"), "ccc"],
],
)
],
)
test_data_01 = Data(
"\n".join(
[
'"attr_a","attr_b","attr_c"',
' 1,4,"a"',
'2, 2.1,"bb"',
'3,120.9, "ccc"',
]
),
[
TableData(
"foo_bar",
["attr_a", "attr_b", "attr_c"],
[
["attr_a", "attr_b", "attr_c"],
[1, 4, "a"],
[2, Decimal("2.1"), "bb"],
[3, Decimal("120.9"), "ccc"],
],
)
],
)
test_data_02 = Data(
"\n".join(['3,120.9,"ccc"']),
[TableData("foo_bar", ["attr_a", "attr_b", "attr_c"], [[3, "120.9", "ccc"]])],
)
test_data_03 = Data(
"\n".join(['"attr_a","attr_b","attr_c"', '1,4,"a"', '2,2.1,"bb"', '3,120.9,"ccc"', "", ""]),
[
TableData(
"tmp",
["attr_a", "attr_b", "attr_c"],
[[1, 4, "a"], [2, Decimal("2.1"), "bb"], [3, Decimal("120.9"), "ccc"]],
)
],
)
test_data_04 = Data(
dedent(
"""\
"attr_a","attr_b","attr_c"
1,4,"a"
2,2.1,"bb"
3,120.9,"ccc"
"""
),
[
TableData(
"tmp",
["attr_a", "attr_b", "attr_c"],
[[1, 4, "a"], [2, "2.1", "bb"], [3, "120.9", "ccc"]],
)
],
)
test_data_05 = Data(
dedent(
"""\
"姓","名","生年月日","郵便番号","住所","電話番号"
"山田","太郎","2001/1/1","100-0002","東京都千代田区皇居外苑","03-1234-5678"
"山田","次郎","2001/1/2","251-0036","神奈川県藤沢市江の島1丁目","03-9999-9999"
"""
),
[
TableData(
"tmp",
["姓", "名", "生年月日", "郵便番号", "住所", "電話番号"],
[
["山田", "太郎", "2001/1/1", "100-0002", "東京都千代田区皇居外苑", "03-1234-5678"],
["山田", "次郎", "2001/1/2", "251-0036", "神奈川県藤沢市江の島1丁目", "03-9999-9999"],
],
)
],
)
test_data_06 = Data(
dedent(
"""\
smokey,Linux 3.0-ARCH,x86
12345678901,12345 1234567890123,123
12345678901,1234567890123456789,12345
11 bytes,19 bytes,5 byt
test line:,"Some \"\"comma, quote\"\"",foo
skylight,Linux 3.0-ARCH,x86
polaris,Linux 3.0-ARCH,amd64
asgard,Windows 6.1.7600,amd64
galileo,Windows 6.2.8102,x86
kepler,Windows 6.2.8123,amd64
wrfbox,Windows 6.2.8133,amd64
"""
),
[
TableData(
"tmp",
["smokey", "Linux 3.0-ARCH", "x86"],
[
["12345678901", "12345 1234567890123", "123"],
["12345678901", "1234567890123456789", "12345"],
["11 bytes", "19 bytes", "5 byt"],
["test line:", 'Some "comma, quote"', "foo"],
["skylight", "Linux 3.0-ARCH", "x86"],
["polaris", "Linux 3.0-ARCH", "amd64"],
["asgard", "Windows 6.1.7600", "amd64"],
["galileo", "Windows 6.2.8102", "x86"],
["kepler", "Windows 6.2.8123", "amd64"],
["wrfbox", "Windows 6.2.8133", "amd64"],
],
)
],
)
test_data_multibyte = Data(
dedent(
"""\
"姓","名","生年月日","郵便番号","住所","電話番号"
"山田","太郎","2001/1/1","100-0002","東京都千代田区皇居外苑","03-1234-5678"
"山田","次郎","2001/1/2","251-0036","神奈川県藤沢市江の島1丁目","03-9999-9999"
"""
),
[
TableData(
"multibyte",
["姓", "名", "生年月日", "郵便番号", "住所", "電話番号"],
[
["山田", "太郎", "2001/1/1", "100-0002", "東京都千代田区皇居外苑", "03-1234-5678"],
["山田", "次郎", "2001/1/2", "251-0036", "神奈川県藤沢市江の島1丁目", "03-9999-9999"],
],
)
],
)
class Test_CsvTableFileLoader_make_table_name:
def setup_method(self, method):
AbstractTableReader.clear_table_count()
@pytest.mark.parametrize(
["value", "source", "expected"],
[
["%(default)s", "/path/to/data.csv", "data"],
["%(filename)s", "/path/to/data.csv", "data"],
["prefix_%(filename)s", "/path/to/data.csv", "prefix_data"],
["%(filename)s_suffix", "/path/to/data.csv", "data_suffix"],
["prefix_%(filename)s_suffix", "/path/to/data.csv", "prefix_data_suffix"],
["%(filename)s%(filename)s", "/path/to/data.csv", "datadata"],
["%(format_name)s%(format_id)s_%(filename)s", "/path/to/data.csv", "csv0_data"],
["%(%(filename)s)", "/path/to/data.csv", "%(data)"],
],
)
def test_normal(self, value, source, expected):
loader = ptr.CsvTableFileLoader(source)
loader.table_name = value
assert loader.make_table_name() == expected
@pytest.mark.parametrize(
["value", "source", "expected"],
[
[None, "/path/to/data.csv", ValueError],
["", "/path/to/data.csv", ValueError],
["%(filename)s", None, InvalidTableNameError],
["%(filename)s", "", InvalidTableNameError],
],
)
def test_exception(self, value, source, expected):
loader = ptr.CsvTableFileLoader(source)
loader.table_name = value
with pytest.raises(expected):
loader.make_table_name()
class Test_CsvTableFileLoader_load:
def setup_method(self, method):
AbstractTableReader.clear_table_count()
@pytest.mark.parametrize(
["test_id", "table_text", "filename", "headers", "type_hints", "expected"],
[
[0, test_data_00.value, "tmp.csv", [], [], test_data_00.expected],
[
1,
test_data_01.value,
"hoge/foo_bar.csv",
["attr_a", "attr_b", "attr_c"],
[Integer, RealNumber, String],
test_data_01.expected,
],
[
2,
test_data_02.value,
"hoge/foo_bar.csv",
["attr_a", "attr_b", "attr_c"],
[Integer, RealNumber, String],
test_data_02.expected,
],
[3, test_data_03.value, "tmp.csv", [], [], test_data_03.expected],
[4, test_data_04.value, "tmp.csv", [], [], test_data_04.expected],
[5, test_data_05.value, "tmp.csv", [], [], test_data_05.expected],
[6, test_data_06.value, "tmp.csv", [], [], test_data_06.expected],
],
)
def test_normal(self, tmpdir, test_id, table_text, filename, headers, type_hints, expected):
file_path = Path(str(tmpdir.join(filename)))
file_path.parent.makedirs_p()
with open(file_path, "w", encoding="utf-8") as f:
f.write(table_text)
loader = ptr.CsvTableFileLoader(file_path, type_hints=type_hints)
loader.headers = headers
for tabledata in loader.load():
print(f"test-id={test_id}")
print(dumps_tabledata(tabledata))
assert tabledata.in_tabledata_list(expected)
@pytest.mark.parametrize(
["test_id", "table_text", "filename", "encoding", "headers", "expected"],
[
[
7,
test_data_multibyte.value,
"multibyte.csv",
"utf16",
[],
test_data_multibyte.expected,
]
],
)
def test_normal_multibyte(
self, tmpdir, test_id, table_text, filename, encoding, headers, expected
):
file_path = Path(str(tmpdir.join(filename)))
file_path.parent.makedirs_p()
with open(file_path, "w", encoding=encoding) as f:
f.write(table_text)
loader = ptr.CsvTableFileLoader(file_path)
loader.headers = headers
for tabledata in loader.load():
print(f"test-id={test_id}")
print(dumps_tabledata(tabledata))
assert tabledata.in_tabledata_list(expected)
@pytest.mark.skipif(platform.system() == "Windows", reason="platform dependent tests")
@pytest.mark.parametrize(
["table_text", "fifo_name", "expected"],
[[test_data_06.value, "tmp", test_data_06.expected]],
)
def test_normal_fifo(self, tmpdir, table_text, fifo_name, expected):
namedpipe = str(tmpdir.join(fifo_name))
os.mkfifo(namedpipe)
loader = ptr.CsvTableFileLoader(namedpipe)
with ProcessPoolExecutor() as executor:
executor.submit(fifo_writer, namedpipe, table_text)
for tabledata in loader.load():
print(dumps_tabledata(tabledata))
assert tabledata.in_tabledata_list(expected)
@pytest.mark.parametrize(
["table_text", "filename", "headers", "expected"],
[
["", "hoge.csv", [], ptr.DataError],
["\n".join(['"attr_a","attr_b","attr_c"']), "hoge.csv", [], ptr.DataError],
["\n".join([]), "hoge.csv", ["attr_a", "attr_b", "attr_c"], ptr.DataError],
],
)
def test_exception(self, tmpdir, table_text, filename, headers, expected):
p_csv = tmpdir.join(filename)
with open(str(p_csv), "w", encoding="utf8") as f:
f.write(table_text)
loader = ptr.CsvTableFileLoader(str(p_csv))
loader.headers = headers
with pytest.raises(expected):
for _tabletuple in loader.load():
pass
@pytest.mark.parametrize(
["filename", "headers", "expected"],
[["", [], ptr.InvalidFilePathError], [None, [], ptr.InvalidFilePathError]],
)
def test_null(self, tmpdir, filename, headers, expected):
loader = ptr.CsvTableFileLoader(filename)
loader.headers = headers
with pytest.raises(expected):
for _tabletuple in loader.load():
pass
class Test_CsvTableTextLoader_make_table_name:
def setup_method(self, method):
AbstractTableReader.clear_table_count()
@pytest.mark.parametrize(
["value", "expected"],
[["%(format_name)s%(format_id)s", "csv0"], ["tablename", "tablename"]],
)
def test_normal(self, value, expected):
loader = ptr.CsvTableTextLoader("dummy")
loader.table_name = value
assert loader.make_table_name() == expected
@pytest.mark.parametrize(
["value", "source", "expected"],
[[None, "tablename", ValueError], ["", "tablename", ValueError]],
)
def test_exception(self, value, source, expected):
loader = ptr.CsvTableFileLoader(source)
loader.table_name = value
with pytest.raises(expected):
loader.make_table_name()
class Test_CsvTableTextLoader_load:
def setup_method(self, method):
AbstractTableReader.clear_table_count()
@pytest.mark.parametrize(
["table_text", "table_name", "headers", "type_hints", "expected"],
[
[test_data_00.value, "tmp", [], [], test_data_00.expected],
[
test_data_01.value,
"foo_bar",
["attr_a", "attr_b", "attr_c"],
[Integer, RealNumber, String],
test_data_01.expected,
],
[
test_data_02.value,
"foo_bar",
["attr_a", "attr_b", "attr_c"],
[Integer, RealNumber, String],
test_data_02.expected,
],
[test_data_03.value, "tmp", [], [], test_data_03.expected],
],
)
def test_normal(self, table_text, table_name, headers, type_hints, expected):
loader = ptr.CsvTableTextLoader(table_text, type_hints=type_hints)
loader.table_name = table_name
loader.headers = headers
for tabledata in loader.load():
print(dumps_tabledata(tabledata))
for e in expected:
print(dumps_tabledata(e))
assert tabledata.in_tabledata_list(expected)
def test_normal_type_hint_rules(self):
table_text = dedent(
"""\
"a text","b integer","c real"
01,"01","1.1"
20,"20","1.2"
030,"030","1.3"
"""
)
loader = ptr.CsvTableTextLoader(table_text)
loader.table_name = "type hint rules"
loader.type_hint_rules = TYPE_HINT_RULES
for tbldata in loader.load():
assert tbldata.headers == ["a text", "b integer", "c real"]
assert tbldata.value_matrix == [
["01", 1, Decimal("1.1")],
["20", 20, Decimal("1.2")],
["030", 30, Decimal("1.3")],
]
@pytest.mark.parametrize(
["table_text", "table_name", "headers", "expected"],
[
["", "hoge", [], ValueError],
["\n".join(['"attr_a","attr_b","attr_c"']), "hoge", [], ptr.DataError],
["\n".join([]), "hoge", ["attr_a", "attr_b", "attr_c"], ValueError],
],
)
def test_exception_insufficient_data(self, table_text, table_name, headers, expected):
loader = ptr.CsvTableTextLoader(table_text)
loader.table_name = table_name
loader.headers = headers
with pytest.raises(expected):
for _tabletuple in loader.load():
pass
def test_exception_invalid_csv(self):
table_text = dedent(
"""\
nan = float("nan")
inf = float("inf")
TEST_TABLE_NAME = "test_table"
TEST_DB_NAME = "test_db"
NOT_EXIT_FILE_PATH = "/not/existing/file/__path__"
NamedTuple = namedtuple("NamedTuple", "attr_a attr_b")
NamedTupleEx = namedtuple("NamedTupleEx", "attr_a attr_b attr_c")
"""
)
loader = ptr.CsvTableTextLoader(table_text)
loader.table_name = "dummy"
with pytest.raises(ptr.DataError):
for _tabletuple in loader.load():
pass
@pytest.mark.parametrize(
["table_name", "headers", "expected"], [["", [], ValueError], [None, [], ValueError]]
)
def test_null(self, table_name, headers, expected):
loader = ptr.CsvTableTextLoader("dummy")
loader.table_name = table_name
loader.headers = headers
with pytest.raises(expected):
for _tabletuple in loader.load():
pass
|
#!/usr/bin/env python
import rospy, cv2, cv_bridge, numpy
from tf.transformations import decompose_matrix, euler_from_quaternion
from sensor_msgs.msg import Image
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from ros_numpy import numpify
import numpy as np
from kobuki_msgs.msg import Led
from kobuki_msgs.msg import Sound
import smach
import smach_ros
integral = 0
previous_error = 0
cur_pos = [0, 0]
cur_heading = 0
current_twist = Twist()
# Three flag to check touching the line or not
stop_line_flag = False
flag_line_flag = False
backing_flag = False
twist_pub = rospy.Publisher("/cmd_vel_mux/input/teleop", Twist, queue_size=1)
led_pub_1 = rospy.Publisher('/mobile_base/commands/led1', Led, queue_size=1)
led_pub_2 = rospy.Publisher('/mobile_base/commands/led2', Led, queue_size=1)
sound_pub = rospy.Publisher('/mobile_base/commands/sound', Sound, queue_size=1)
max_rotate_vel = 0.5
max_linear_vel = 0.25
degree_ninty = 4.0 / 2
counter_loc1 = 0
counter_loc2 = 0
location_index = 1
#
TRIANGLE = 1
CIRCLE = 2
RECTANGLE = 3
object_type = CIRCLE
current_type = 0
# checking object or not
isChecking = False
# moving back from loc2 or not
is_loc2_backing = False
# the stop times of location 3
loc3_stop_time = 2
# the index of checked object of location 3
loc3_step_index = 1
# to change the size of window, so that it can see the vertical red line
is_finishing_loc2 = False
time_after_stop = 2
# flag of moving forward
moving_after_stop_flag = False
# some of the waypoints
center_waypoints = [(), ()]
square_waypoints = [(), ()]
# location 1 states
class moving_forward(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop', 'flag_line', 'moving'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'stop_flag', 'flag_flag'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'stop_flag', 'flag_flag'])
def execute(self, userdata):
global stop_line_flag, flag_line_flag, moving_after_stop_flag
if stop_line_flag == True:
stop_line_flag = False
userdata.cur_time = rospy.Time.now()
twist_pub.publish(Twist())
flag_line_flag = False
return 'stop'
elif flag_line_flag == True:
flag_line_flag = False
userdata.cur_time = rospy.Time.now()
return 'flag_line'
# if userdata.cur_time + rospy.Duration(1) < rospy.Time.now():
#
# userdata.cur_time = rospy.Time.now()
# userdata.flag_line_flag = cur_heading
# temp_twist = Twist()
# temp_twist.linear.x = 0
# temp_twist.angular.z = max_rotate_vel
# twist_pub.publish(temp_twist)
# return 'flag_line'
# else:
# twist_pub.publish(current_twist)
# return 'moving'
else:
twist_pub.publish(current_twist)
return 'moving'
class stop(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['keep', 'recover'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'])
def execute(self, userdata):
global flag_line_flag, loc3_stop_time
if userdata.cur_time + rospy.Duration(3) > rospy.Time.now():
# userdata.cur_time = rospy.Time.now()
twist_pub.publish(Twist())
flag_line_flag = False
return 'keep'
else:
twist_pub.publish(current_twist)
userdata.cur_time = rospy.Time.now()
flag_line_flag = False
if location_index == 3:
loc3_stop_time -= 1
return 'recover'
class moving_after_stop(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['moving', 'stop'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'])
def execute(self, userdata):
global stop_line_flag, flag_line_flag
if userdata.cur_time + rospy.Duration(2.0) > rospy.Time.now():
twist_pub.publish(current_twist)
stop_line_flag = False
flag_line_flag = False
return 'moving'
else:
twist_pub.publish(current_twist)
userdata.cur_time = rospy.Time.now()
stop_line_flag = False
flag_line_flag = False
return 'stop'
class turning_left(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['moving_a_bit', 'left_turning', 'stop_turning_loc1', 'stop_turning_loc2',
'stop_turning_loc3'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'])
def execute(self, userdata):
global flag_line_flag, isChecking
if userdata.cur_time + rospy.Duration(1.0) > rospy.Time.now():
twist_pub.publish(current_twist)
flag_line_flag = False
return 'moving_a_bit'
elif userdata.cur_time + rospy.Duration(degree_ninty / max_rotate_vel + 1.0) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = max_rotate_vel
twist_pub.publish(temp_twist)
flag_line_flag = False
return 'left_turning'
else:
if location_index == 1:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
return 'stop_turning_loc1'
elif location_index == 2:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
isChecking = True
return 'stop_turning_loc2'
else:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
isChecking = True
return 'stop_turning_loc3'
class checking_object_loc1(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['overtime', 'get_sth', 'not_get_sth'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
global counter_loc1, isChecking
if userdata.cur_time + rospy.Duration(10) < rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
userdata.cur_time = rospy.Time.now()
isChecking = False
return 'overtime'
elif counter_loc1 > 2:
print(counter_loc1)
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
userdata.cur_time = rospy.Time.now()
led_pub_1.publish(Led.GREEN)
led_pub_2.publish(Led.GREEN)
for i in range(3):
sound_pub.publish(0)
rospy.sleep(1)
counter_loc1 = 0
isChecking = False
return 'get_sth'
elif counter_loc1 == 2:
print(counter_loc1)
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
userdata.cur_time = rospy.Time.now()
led_pub_1.publish(Led.GREEN)
sound_pub.publish(0)
rospy.sleep(1)
sound_pub.publish(0)
counter_loc1 = 0
isChecking = False
return 'get_sth'
elif counter_loc1 == 1:
print(counter_loc1)
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
userdata.cur_time = rospy.Time.now()
led_pub_2.publish(Led.GREEN)
sound_pub.publish(0)
counter_loc1 = 0
isChecking = False
return 'get_sth'
else:
isChecking = True
return 'not_get_sth'
class turning_back(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['back_turning', 'stop_back'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading'])
def execute(self, userdata):
global location_index, isChecking, stop_line_flag, flag_line_flag
if userdata.cur_time + rospy.Duration((degree_ninty + 0.2) / max_rotate_vel) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
isChecking = False
return 'back_turning'
else:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
location_index += 1
# led_pub_1.publish(Led.BLACK)
# led_pub_2.publish(Led.BLACK)
isChecking = False
stop_line_flag = False
flag_line_flag = False
return 'stop_back'
# location 2 states
class moving_loc2(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop', 'moving'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
global isChecking
if counter_loc2 > 0:
if counter_loc2 == 1:
led_pub_2.publish(Led.ORANGE)
sound_pub.publish(0)
elif counter_loc2 == 2:
led_pub_1.publish(Led.ORANGE)
sound_pub.publish(0)
sound_pub.publish(0)
else:
led_pub_1.publish(Led.ORANGE)
led_pub_2.publish(Led.ORANGE)
sound_pub.publish(0)
sound_pub.publish(0)
sound_pub.publish(0)
if userdata.cur_time + rospy.Duration(0.5) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = max_linear_vel
temp_twist.angular.z = 0
twist_pub.publish(temp_twist)
return 'moving'
else:
twist_pub.publish(Twist())
isChecking = False
userdata.cur_time = rospy.Time.now()
return 'stop'
else:
twist_pub.publish(current_twist)
userdata.cur_time = rospy.Time.now()
return 'moving'
class back_dirction(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop', 'rotating'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
global is_loc2_backing, is_finishing_loc2
if userdata.cur_time + rospy.Duration((degree_ninty * 2 - 0.8) / max_rotate_vel) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = max_rotate_vel
twist_pub.publish(temp_twist)
return 'rotating'
else:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
is_loc2_backing = True
is_finishing_loc2 = True
return 'stop'
class moving_back_loc2(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop', 'moving'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
global backing_flag, is_loc2_backing, is_finishing_loc2
if backing_flag:
if userdata.cur_time + rospy.Duration(2.15) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = max_linear_vel
temp_twist.angular.z = 0
twist_pub.publish(temp_twist)
is_finishing_loc2 = False
return 'moving'
else:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
backing_flag = False
is_loc2_backing = False
is_finishing_loc2 = False
return 'stop'
else:
twist_pub.publish(current_twist)
userdata.cur_time = rospy.Time.now()
return 'moving'
class finish_loc2(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop', 'rotating'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
global location_index, flag_line_flag, loc3_stop_time
if userdata.cur_time + rospy.Duration((degree_ninty - 0.5) / max_rotate_vel) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = max_rotate_vel
twist_pub.publish(temp_twist)
return 'rotating'
else:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
location_index = 3
flag_line_flag = False
loc3_stop_time = 2
# led_pub_1.publish(Led.BLACK)
# led_pub_2.publish(Led.BLACK)
return 'stop'
# location 4 states
# Move to the center of the location 4
class moving_center(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['right_shape', 'wrong_shape', 'stay'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
pass
# location 3 states
class checking_object_loc3(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['right_shape', 'wrong_shape', 'stay'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
global counter_loc1, isChecking, loc3_step_index
if userdata.cur_time + rospy.Duration(3) > rospy.Time.now():
twist_pub.publish(Twist())
return 'stay'
elif object_type != current_type:
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
userdata.cur_time = rospy.Time.now()
isChecking = False
loc3_step_index += 1
return 'wrong_shape'
else:
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
userdata.cur_time = rospy.Time.now()
sound_pub.publish(0)
led_pub_1.publish(Led.BLACK)
led_pub_2.publish(Led.RED)
isChecking = False
return 'right_shape'
class right_turning_back(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop', 'rotating'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
global stop_line_flag
if userdata.cur_time + rospy.Duration(degree_ninty / max_rotate_vel) > rospy.Time.now():
temp_twist = Twist()
temp_twist.linear.x = 0
temp_twist.angular.z = -max_rotate_vel
twist_pub.publish(temp_twist)
return 'rotating'
else:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
stop_line_flag = False
return 'stop'
class moving_terminate(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['stop', 'moving'],
input_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'],
output_keys=['cur_time', 'cur_pose', 'cur_loc', 'cur_heading', 'coun_loc1'])
def execute(self, userdata):
if not stop_line_flag:
twist_pub.publish(current_twist)
return 'moving'
else:
twist_pub.publish(Twist())
userdata.cur_time = rospy.Time.now()
sound_pub.publish(1)
return 'stop'
class main_controller():
def __init__(self):
rospy.init_node('following_line')
self.sm = smach.StateMachine(outcomes=['success'])
self.sis = smach_ros.IntrospectionServer('server_name', self.sm, '/SM_ROOT')
self.sis.start()
self.bridge = cv_bridge.CvBridge()
self.integral = 0
self.previous_error = 0
self.Kp = - 1 / 200.0
self.Kd = 1 / 3000.0
self.Ki = 0.0
rospy.Subscriber('usb_cam/image_raw', Image, self.usb_image_callback)
rospy.Subscriber('camera/rgb/image_raw', Image, self.kinect_image_callback)
self.sm.userdata.current_time = rospy.Time.now()
self.sm.userdata.current_pose = cur_pos
self.sm.userdata.current_heading = cur_heading
self.sm.userdata.current_loc = 1
self.sm.userdata.stop_flag = stop_line_flag
self.sm.userdata.flag_flag = flag_line_flag
def kinect_image_callback(self, msg):
global stop_line_flag, flag_line_flag, counter_loc1, counter_loc2, object_type, backing_flag, current_type, max_linear_vel, time_after_stop
image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# kinect red
lower_red = numpy.array([0, 200, 50])
upper_red = numpy.array([360, 256, 256])
if loc3_step_index > 1 and location_index == 3:
max_linear_vel = 0.2
if isChecking:
if location_index == 1:
mask = cv2.inRange(hsv, lower_red, upper_red)
im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for item in contours:
area = cv2.contourArea(item)
if area > 1000:
# peri = cv2.arcLength(item, True)
# approx = cv2.approxPolyDP(item, 0.04 * peri, True)
# if len(approx) == 4:
counter_loc1 += 1
elif location_index == 2:
lower_green = numpy.array([65, 60, 60])
upper_green = numpy.array([170, 256, 256])
# lower_red = numpy.array([160, 100, 100])
# upper_red = numpy.array([360, 256, 256])
red_mask = cv2.inRange(hsv, lower_red, upper_red)
green_mask = cv2.inRange(hsv, lower_green, upper_green)
im2, red_contours, hierarchy = cv2.findContours(red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
im2, green_contours, hierarchy = cv2.findContours(green_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for item in green_contours:
if cv2.contourArea(item) > 1000:
peri = cv2.arcLength(item, True)
approx = cv2.approxPolyDP(item, 0.04 * peri, True)
if len(approx) == 3:
object_type = TRIANGLE
elif len(approx) == 4:
object_type = RECTANGLE
else:
object_type = CIRCLE
counter_loc2 = 1
for item in red_contours:
if cv2.contourArea(item) > 1000:
counter_loc2 += 1
break
elif location_index == 3:
red_mask = cv2.inRange(hsv, lower_red, upper_red)
if loc3_step_index == 2:
h, w, d = image.shape
red_mask = red_mask[0:h, 0:(w / 4 * 3)]
im2, red_contours, hierarchy = cv2.findContours(red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for item in red_contours:
if cv2.contourArea(item) > 100:
cv2.imwrite("test.png", red_mask)
peri = cv2.arcLength(item, True)
approx = cv2.approxPolyDP(item, 0.04 * peri, True)
if len(approx) == 3:
current_type = TRIANGLE
elif len(approx) == 4:
current_type = RECTANGLE
else:
current_type = CIRCLE
if current_type == object_type:
break
else:
pass
red_mask = cv2.inRange(hsv, lower_red, upper_red)
cv2.imshow("refer", red_mask)
cv2.waitKey(3)
def usb_image_callback(self, msg):
global stop_line_flag, flag_line_flag, counter_loc1, counter_loc2, object_type, backing_flag, current_type, max_linear_vel, time_after_stop
image = self.bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# white color
lower_white = numpy.array([0, 0, 170])
upper_white = numpy.array([360, 30, 255])
mask = cv2.inRange(hsv, lower_white, upper_white)
h, w, d = image.shape
search_top = 3 * h / 4 + 20
search_bot = 3 * h / 4 + 30
mask[0:search_top, 0:w] = 0
mask[search_bot:h, 0:w] = 0
M = cv2.moments(mask)
if M['m00'] > 0:
self.cx_white = int(M['m10'] / M['m00'])
self.cy_white = int(M['m01'] / M['m00'])
cv2.circle(image, (self.cx_white, self.cy_white), 20, (0, 0, 255), -1)
# BEGIN CONTROL
err = self.cx_white - w / 2
current_twist.linear.x = max_linear_vel # and <= 1.7
self.integral = self.integral + err * 0.05
self.derivative = (err - self.previous_error) / 0.05
current_twist.angular.z = float(err) * self.Kp + (self.Ki * float(self.integral)) + (
self.Kd * float(self.derivative))
self.previous_error = err
# usb red
lower_red = numpy.array([0, 100, 100])
upper_red = numpy.array([360, 256, 256])
# if loc3_stop_time == 0:
# lower_red = numpy.array([0, 150, 50])
# upper_red = numpy.array([360, 256, 256])
mask = cv2.inRange(hsv, lower_red, upper_red)
h, w, d = image.shape
if location_index == 3:
search_top = h - 50
search_bot = h - 1
else:
search_top = h - 40
search_bot = h - 1
if is_finishing_loc2:
search_top = h - 150
search_bot = h - 1
mask[0:search_top, 0:w] = 0
mask[search_bot:h, 0:w] = 0
im2, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if loc3_step_index > 1 and location_index == 3:
max_linear_vel = 0.2
if len(contours) > 0:
for item in contours:
area = cv2.contourArea(item)
if area > 5000:
M = cv2.moments(item)
self.cx_red = int(M['m10'] / M['m00'])
self.cy_red = int(M['m01'] / M['m00'])
(x, y), radius = cv2.minEnclosingCircle(item)
center = (int(x), int(y))
radius = int(radius)
cv2.circle(image, center, radius, (0, 255, 0), 2)
if center[0] + radius > self.cx_white + 30 and center[1] > h - 25:
stop_line_flag = True
elif center[0] + radius < self.cx_white + 10 and center[1] > h - 25:
if loc3_stop_time > 0 and location_index == 3:
stop_line_flag = True
flag_line_flag = False
else:
flag_line_flag = True
else:
pass
elif area > 1000:
M = cv2.moments(item)
self.cx_red = int(M['m10'] / M['m00'])
self.cy_red = int(M['m01'] / M['m00'])
(x, y), radius = cv2.minEnclosingCircle(item)
center = (int(x), int(y))
radius = int(radius)
cv2.circle(image, center, radius, (0, 255, 0), 2)
if center[0] + radius < self.cx_white + 20 and center[1] > h - 25:
if loc3_stop_time > 0 and location_index == 3:
stop_line_flag = True
flag_line_flag = False
else:
flag_line_flag = True
if is_loc2_backing:
backing_flag = True
elif area > 1000 and is_loc2_backing:
backing_flag = True
# red_mask = cv2.inRange(hsv, lower_red, upper_red)
# cv2.imshow("refer", image)
# cv2.waitKey(3)
def odom_callback(self, msg):
global cur_pos, cur_heading
pose_msg = msg.pose.pose
pose = numpify(pose_msg)
__, __, angles, translate, __ = decompose_matrix(pose)
cur_pos = translate[0:2]
cur_heading = angles[2]
def do_controll(self):
# Open the container
with self.sm:
# location 1
smach.StateMachine.add('moving_forward', moving_forward(),
transitions={'stop': 'stop',
'flag_line': 'turning_left',
'moving': 'moving_forward'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading',
'stop_flag': 'stop_line_flag',
'flag_flag': 'flag_line_flag'})
smach.StateMachine.add('stop', stop(),
transitions={'keep': 'stop',
'recover': 'moving_after_stop'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading'})
smach.StateMachine.add('moving_after_stop', moving_after_stop(),
transitions={'moving': 'moving_after_stop',
'stop': 'moving_forward'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading'})
smach.StateMachine.add('turning_left', turning_left(),
transitions={'moving_a_bit': 'turning_left',
'left_turning': 'turning_left',
'stop_turning_loc1': 'checking_object_loc1',
'stop_turning_loc2': 'moving_loc2',
'stop_turning_loc3': 'checking_object_loc3'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading'})
smach.StateMachine.add('checking_object_loc1', checking_object_loc1(),
transitions={'overtime': 'turning_back',
'get_sth': 'turning_back',
'not_get_sth': 'checking_object_loc1'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading',
'coun_loc1': 'counter_loc1'})
smach.StateMachine.add('turning_back', turning_back(),
transitions={'back_turning': 'turning_back',
'stop_back': 'moving_forward'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading'})
# location 2
smach.StateMachine.add('moving_loc2', moving_loc2(),
transitions={'stop': 'back_dirction',
'moving': 'moving_loc2'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading',
'coun_loc1': 'counter_loc1'})
smach.StateMachine.add('back_dirction', back_dirction(),
transitions={'stop': 'moving_back_loc2',
'rotating': 'back_dirction'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading',
'coun_loc1': 'counter_loc1'})
smach.StateMachine.add('moving_back_loc2', moving_back_loc2(),
transitions={'stop': 'finish_loc2',
'moving': 'moving_back_loc2'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading',
'coun_loc1': 'counter_loc1'})
smach.StateMachine.add('finish_loc2', finish_loc2(),
transitions={'stop': 'moving_forward',
'rotating': 'finish_loc2'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading',
'coun_loc1': 'counter_loc1'})
# location 4
# location 3
smach.StateMachine.add('checking_object_loc3', checking_object_loc3(),
transitions={'stay': 'checking_object_loc3',
'right_shape': 'right_turning_back',
'wrong_shape': 'turning_back'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading',
'coun_loc1': 'counter_loc1'})
smach.StateMachine.add('right_turning_back', right_turning_back(),
transitions={'stop': 'moving_terminate',
'rotating': 'right_turning_back'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading',
'coun_loc1': 'counter_loc1'})
smach.StateMachine.add('moving_terminate', moving_terminate(),
transitions={'stop': 'success',
'moving': 'moving_terminate'},
remapping={'cur_time': 'current_time',
'cur_pose': 'current_pose',
'cur_loc': 'current_loc',
'cur_heading': 'current_heading',
'coun_loc1': 'counter_loc1'})
# Execute SMACH plan
outcome = self.sm.execute()
# rate.sleep()
rospy.spin()
self.sis.stop()
if __name__ == "__main__":
mc = main_controller()
mc.do_controll()
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# List of contributors:
# Jordi Esteve <jesteve@zikzakmedia.com>
# Ignacio Ibeas <ignacio@acysos.com>
# Dpto. Consultoría Grupo Opentia <consultoria@opentia.es>
# Pedro M. Baeza <pedro.baeza@tecnativa.com>
# Carlos Liébana <carlos.liebana@factorlibre.com>
# Hugo Santos <hugo.santos@factorlibre.com>
# Albert Cabedo <albert@gafic.com>
# Olivier Colson <oco@odoo.com>
# Roberto Lizana <robertolizana@trey.es>
{
"name" : "Spain - Accounting (PGCE 2008)",
"version" : "4.0",
"author" : "Spanish Localization Team",
'category': 'Localization',
"description": """
Spanish charts of accounts (PGCE 2008).
========================================
* Defines the following chart of account templates:
* Spanish general chart of accounts 2008
* Spanish general chart of accounts 2008 for small and medium companies
* Spanish general chart of accounts 2008 for associations
* Defines templates for sale and purchase VAT
* Defines tax templates
* Defines fiscal positions for spanish fiscal legislation
* Defines tax reports mod 111, 115 and 303
""",
"depends" : [
"account",
"base_iban",
"base_vat",
],
"data" : [
'data/account_group.xml',
'data/account_chart_template_data.xml',
'data/account.account.template-common.csv',
'data/account.account.template-pymes.csv',
'data/account.account.template-assoc.csv',
'data/account.account.template-full.csv',
'data/account_chart_template_account_account_link.xml',
'data/account_data.xml',
'data/account_tax_data.xml',
'data/account_fiscal_position_template_data.xml',
'data/account_chart_template_configure_data.xml',
],
}
|
from typing import List, Optional
from pydantic import *
from fastapi.encoders import jsonable_encoder
from sqlalchemy.orm import Session
from app.schemas.reservation import (
HospReservationCreate,
ShopReservationCreate,
ShopReservationUpdate,
)
from app.models.reservation import HospReservation, ShopReservation
def read_multi_hosp(
db: Session, hospital_id: int, skip: int, limit: int
) -> List[HospReservation]:
return (
db.query(HospReservation)
.filter(HospReservation.hosp_id == hospital_id)
.offset(skip)
.limit(limit)
.all()
)
def read_hosp(
db: Session,
hospital_id: Optional[int] = None,
reservation_id: Optional[int] = None,
) -> Optional[HospReservation]:
if hospital_id is not None and reservation_id is not None:
return (
db.query(HospReservation)
.filter(HospReservation.hosp_id == hospital_id)
.filter(HospReservation.id == reservation_id)
.first()
)
elif hospital_id is not None:
return (
db.query(HospReservation)
.filter(HospReservation.hosp_id == hospital_id)
.first()
)
elif reservation_id is not None:
return (
db.query(HospReservation)
.filter(HospReservation.id == reservation_id)
.first()
)
else:
return db.query(HospReservation).first()
def create_hosp(
db: Session, hospital_id: int, rsrv_in: HospReservationCreate
) -> HospReservation:
db_reservation = HospReservation(
user_id=rsrv_in.user_id, hosp_id=hospital_id, date=rsrv_in.date
)
db.add(db_reservation)
db.commit()
db.refresh(db_reservation)
return db_reservation
def delete_hosp(db: Session, rsrv: HospReservation) -> Optional[HospReservation]:
rsrv_id = rsrv.id
db.delete(rsrv)
db.commit()
return db.query(HospReservation).filter(HospReservation.id == rsrv_id).first()
def read_multi_shop(
db: Session, shop_id: int, skip: int, limit: int,
) -> List[ShopReservation]:
return (
db.query(ShopReservation)
.filter(ShopReservation.shop_id == shop_id)
.offset(skip)
.limit(limit)
.all()
)
def read_shop(
db: Session, shop_id: int, reservation_id: int
) -> Optional[ShopReservation]:
return (
db.query(ShopReservation)
.filter(ShopReservation.shop_id == shop_id)
.filter(ShopReservation.id == reservation_id)
.first()
)
def create_shop(
db: Session, shop_id: int, rsrv_in: ShopReservationCreate
) -> ShopReservation:
db_reservation = ShopReservation(
user_id=rsrv_in.user_id,
shop_id=shop_id,
date=rsrv_in.date,
prescription_id=rsrv_in.prescription_id,
)
db.add(db_reservation)
db.commit()
db.refresh(db_reservation)
return db_reservation
def update_shop(
db: Session, rsrv: ShopReservation, rsrv_in: ShopReservationUpdate
) -> ShopReservation:
reservation_data = jsonable_encoder(rsrv)
update_data = rsrv_in.dict(skip_defaults=True)
for field in reservation_data:
if field in update_data:
setattr(rsrv, field, update_data[field])
db.add(rsrv)
db.commit()
db.refresh(rsrv)
return rsrv
|
print("I'm spam")
def hello(name):
print('Hello %s' % name)
|
import random
import time
from django.contrib.auth.hashers import make_password, check_password
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
# Create your views here.
from app.models import UserModel
from userauth.models import UserLogin
def login(request):
if request.method == 'GET':
return render(request, 'user/user_login.html')
if request.method == 'POST':
user = request.POST.get('username')
password = request.POST.get('password')
users = UserModel.objects.filter(username=user, is_delete=0).exists()
if users:
User = UserModel.objects.get(username=user)
if check_password(password, User.password):
s = '1234567890qwertyuiopasdfghjklzxcvbnm'
ticket = ''
for i in range(15):
ticket += random.choice(s)
ticket += str(int(time.time()))
outtime = int(time.time()) + 3000
isexists = UserLogin.objects.filter(user=User.id).exists()
if isexists:
userlogin = UserLogin.objects.get(user=User)
userlogin.ticket = ticket
userlogin.out_time = outtime
userlogin.save()
else:
UserLogin.objects.create(
user=User,
ticket=ticket,
out_time=outtime,
)
path = request.GET.get('path')
if path == None:
path = 'cart'
response = HttpResponseRedirect('/app/%s' % path)
# response.set_cookie('ticket', ticket, expires='过期日期')
response.set_cookie('ticket', ticket, max_age=3000)
return response
else:
return render(request, 'user/user_login.html')
else:
return render(request, 'user/user_login.html')
def regist(request):
if request.method == 'GET':
return render(request, 'user/user_register.html')
if request.method == 'POST':
user = request.POST.get('username')
password = request.POST.get('password')
email = request.POST.get('email')
icon = request.FILES.get('icon')
password = make_password(password)
UserModel.objects.create(
username=user,
password=password,
email=email,
icon=icon,
)
return HttpResponseRedirect('/auth/login/')
def logout(request):
ticket = request.COOKIES.get('ticket')
response = HttpResponseRedirect('/app/mine')
response.delete_cookie('ticket')
request.user = ''
UserLogin.objects.get(ticket=ticket).delete()
return response
|
from asyncio import sleep
from typing import List
from cozmo.objects import EvtObjectTapped
from cozmo.objects import LightCubeIDs
from .cube import NoteCubes
from .cube_mat import CubeMat
from .song_robot import SongRobot
from .sound_effects import play_collect_point_sound
class OptionPrompter:
"""A class to help the user select an option from three different choices."""
def __init__(self, song_robot: SongRobot):
self._song_robot = song_robot
async def get_option(self, prompt: str, options: List[str]) -> int:
"""Prompts the user to select from three different options by tapping a cube.
1. Cozmo will prompt the user with ``prompt``.
2. Cozmo will point to each cube saying the corresponding ``option``.
3. The light chaser effect will start signaling the game is awaiting user input.
4. Upon successful tap ``collect-point.wav`` is played and the cube flashes green.
:param prompt: The prompt for Cozmo to say.
:param options: A list of options associated with each cube.
:return: :attr:`~cozmo.objects.LightCube.cube_id` of the tapped cube.
"""
assert len(options) == 3
await self._song_robot.say_text(prompt).wait_for_completed()
sleep(1)
for i, cube_id in enumerate(LightCubeIDs):
prompt = options[i]
await self._song_robot.say_text(prompt).wait_for_completed()
mat_position = CubeMat.cube_id_to_position(cube_id)
action = await self._song_robot.tap_cube(mat_position)
await action.wait_for_completed()
note_cubes = NoteCubes.of(self._song_robot)
note_cubes.start_light_chasers()
event = await self._song_robot.world.wait_for(EvtObjectTapped)
cube_id = event.obj.cube_id
note_cubes.stop_light_chasers()
play_collect_point_sound()
await note_cubes.flash_single_cube_green(cube_id)
await sleep(1)
return CubeMat.cube_id_to_position(cube_id)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-12 19:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('history', '0002_auto_20160811_1946'),
]
operations = [
migrations.AlterField(
model_name='paramhistory',
name='health_states',
field=models.ManyToManyField(blank=True, related_name='history_health_states', to='dictionaries.Dictionary', verbose_name='Состояние здоровья'),
),
migrations.AlterField(
model_name='paramhistory',
name='parents_status',
field=models.ManyToManyField(blank=True, related_name='history_parents_status', to='dictionaries.Dictionary', verbose_name='Статус семьи'),
),
]
|
"""
__name__ = accuracy.py
__author__ = Yash Patel
__description__ = Visualizes the output dumps provided by gem5, specifically
focusing on the differences in accuracy resulting from running the various
branch predictors, i.e. accuracy relative to the same input programs. Also
highlights the latency/time in prediction
"""
import settings as s
import os
import numpy as np
from plotly.graph_objs import Bar, Figure, Layout
from plotly.offline import plot
def visualize_bps(isa, executable):
"""
Given int corresponding to the executable, takes all the outputs saved
in the output directory (specified in settings) and plots/saves
parallel bar graphs of the outputs
@param executable The integer corresponding to which executable to run
@return void
"""
files = os.listdir("{}/{}".format(s.OUTPUT_DIR, isa))
exec_name = s.EXEC_NAMES[executable]
to_visualize = [f for f in files if exec_name in f]
data = []
for f in to_visualize:
props = open("{}/{}/{}".format(s.OUTPUT_DIR, isa, f), "r").readlines()
prop_values = [float(prop.split(":")[1].strip()) for prop in props]
prop_labels = ["Conditional", "Indirect", "Latency"]
data.append(Bar(x=prop_labels, y=prop_values, name=f.split("_")[0]))
layout = Layout(
barmode='group'
)
fig = Figure(data=data, layout=layout)
plot(fig, filename="{}/{}.html".format(
s.FIGURE_DIR.format(isa), exec_name), auto_open=False)
def create_table(isa, pred):
"""
Given int corresponding to the branch predictor (from settings), creates
the HTML formatted code to update the website page
@param pred The integer corresponding to which BP to analyze
@return void
"""
template = """
<tr>
<td>{}</td>
<td>{}</td>
<td>{}</td>
<td>{}</td>
</tr>"""
files = os.listdir(s.OUTPUT_DIR)
bp_name = s.BP_NAMES[pred]
to_tabulate = [f for f in files if bp_name in f]
full_table = []
for f in to_tabulate:
props = open("{}/{}/{}".format(s.OUTPUT_DIR, isa, f), "r").readlines()
print(props)
full_table.append(template.format(
f.split("_")[1],
props[0].split(":")[1].strip(),
props[1].split(":")[1].strip(),
props[2].split(":")[1].strip()))
return "\n".join(full_table)
def analyze_executable(isa, executable):
"""
Given int corresponding to the executable to test on, runs the
simulations for all the branch predictors, outputting results to
the gem5/m5cached directory, as both a figure and text output
@param executable The integer corresponding to which executable to run
@return void
"""
command = "build/{}/gem5.opt configs/branch/predict.py --exec {} --pred {}"
cond_incorrects = {}
indirect_incorrect = {}
for i in range(6):
os.system(command.format(isa, executable, i))
name = s.BP_NAMES[i]
dump = open(s.INPUT_FILE, "r").readlines()
attributes = {"conditional" : "condIncorrect",
"indirect" : "branchPredindirectMispredicted",
"latency" : "host_seconds"}
attribute_values = [[l.strip() for l in dump
if attribute in l][0].split()[1] for attribute in attributes.values()]
print("===============================================")
print("Completed {}".format(name))
print("===============================================")
with open("{}/{}/{}_{}.txt".format(s.OUTPUT_DIR, isa,
name, s.EXEC_NAMES[executable]), "w") as f:
for attribute, value in zip(attributes, attribute_values):
f.write("{} : {}\n".format(attribute, value))
if __name__ == "__main__":
for executable in range(len(s.EXEC_NAMES)): # range(, 13):
# analyze_executable("X86", executable)
analyze_executable("ARM", executable)
# visualize_bps("ARM", executable)
"""
for bp in range(len(s.BP_NAMES)):
name = s.BP_NAMES[bp]
table = create_table(bp)
with open("{}/{}_table.txt".format(s.TABLE_DIR, name) as f):
f.write(table)
"""
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cupy as cp
import cusignal
import pytest
from cusignal.test.utils import array_equal
from scipy import signal
cusignal.precompile_kernels()
# Missing
# vectorstrength
class BenchSpectral:
@pytest.mark.benchmark(group="LombScargle")
@pytest.mark.parametrize("num_in_samps", [2 ** 10])
@pytest.mark.parametrize("num_out_samps", [2 ** 16, 2 ** 18])
@pytest.mark.parametrize("precenter", [True, False])
@pytest.mark.parametrize("normalize", [True, False])
class BenchLombScargle:
def cpu_version(self, x, y, f, precenter, normalize):
return signal.lombscargle(x, y, f, precenter, normalize)
def bench_lombscargle_cpu(
self,
lombscargle_gen,
benchmark,
num_in_samps,
num_out_samps,
precenter,
normalize,
):
cpu_x, cpu_y, cpu_f, _, _, _ = lombscargle_gen(
num_in_samps, num_out_samps
)
benchmark(
self.cpu_version, cpu_x, cpu_y, cpu_f, precenter, normalize
)
def bench_lombscargle_gpu(
self,
lombscargle_gen,
benchmark,
num_in_samps,
num_out_samps,
precenter,
normalize,
):
cpu_x, cpu_y, cpu_f, gpu_x, gpu_y, gpu_f = lombscargle_gen(
num_in_samps, num_out_samps
)
output = benchmark(
cusignal.lombscargle,
gpu_x,
gpu_y,
gpu_f,
precenter,
normalize,
)
key = self.cpu_version(cpu_x, cpu_y, cpu_f, precenter, normalize)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="Periodogram")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("window", ["flattop", "nuttall"])
@pytest.mark.parametrize("scaling", ["spectrum", "density"])
class BenchPeriodogram:
def cpu_version(self, cpu_sig, fs, window, scaling):
return signal.periodogram(
cpu_sig, fs, window=window, scaling=scaling
)
def bench_periodogram_cpu(
self, rand_data_gen, benchmark, num_samps, fs, window, scaling
):
cpu_sig, _ = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, window, scaling)
def bench_periodogram_gpu(
self, rand_data_gen, benchmark, num_samps, fs, window, scaling
):
cpu_sig, gpu_sig = rand_data_gen(num_samps)
_, output = benchmark(
cusignal.periodogram,
gpu_sig,
fs,
window=window,
scaling=scaling,
)
_, key = self.cpu_version(cpu_sig, fs, window, scaling)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="PeriodogramComplex")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("window", ["flattop", "nuttall"])
@pytest.mark.parametrize("scaling", ["spectrum", "density"])
class BenchPeriodogramComplex:
def cpu_version(self, cpu_sig, fs, window, scaling):
return signal.periodogram(
cpu_sig, fs, window=window, scaling=scaling
)
def bench_periodogram_complex_cpu(
self,
rand_complex_data_gen,
benchmark,
num_samps,
fs,
window,
scaling,
):
cpu_sig, _ = rand_complex_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, window, scaling)
def bench_periodogram_complex_gpu(
self,
rand_complex_data_gen,
benchmark,
num_samps,
fs,
window,
scaling,
):
cpu_sig, gpu_sig = rand_complex_data_gen(num_samps)
_, output = benchmark(
cusignal.periodogram,
gpu_sig,
fs,
window=window,
scaling=scaling,
)
_, key = self.cpu_version(cpu_sig, fs, window, scaling)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="Welch")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchWelch:
def cpu_version(self, cpu_sig, fs, nperseg):
return signal.welch(cpu_sig, fs, nperseg=nperseg)
def bench_welch_cpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, _ = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, nperseg)
def bench_welch_gpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, gpu_sig = rand_data_gen(num_samps)
_, output = benchmark(cusignal.welch, gpu_sig, fs, nperseg=nperseg)
_, key = self.cpu_version(cpu_sig, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="WelchComplex")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchWelchComplex:
def cpu_version(self, cpu_sig, fs, nperseg):
return signal.welch(cpu_sig, fs, nperseg=nperseg)
def bench_welch_complex_cpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, _ = rand_complex_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, nperseg)
def bench_welch_complex_gpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, gpu_sig = rand_complex_data_gen(num_samps)
_, output = benchmark(cusignal.welch, gpu_sig, fs, nperseg=nperseg)
_, key = self.cpu_version(cpu_sig, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="CSD")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchCSD:
def cpu_version(self, cpu_x, cpu_y, fs, nperseg):
return signal.csd(cpu_x, cpu_y, fs, nperseg=nperseg)
def bench_csd_cpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, _ = rand_data_gen(num_samps)
cpu_y, _ = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_x, cpu_y, fs, nperseg)
def bench_csd_gpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, gpu_x = rand_data_gen(num_samps)
cpu_y, gpu_y = rand_data_gen(num_samps)
_, output = benchmark(
cusignal.csd, gpu_x, gpu_y, fs, nperseg=nperseg
)
_, key = self.cpu_version(cpu_x, cpu_y, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="CSDComplex")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchCSDComplex:
def cpu_version(self, cpu_x, cpu_y, fs, nperseg):
return signal.csd(cpu_x, cpu_y, fs, nperseg=nperseg)
def bench_csd_complex_cpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, _ = rand_complex_data_gen(num_samps)
cpu_y, _ = rand_complex_data_gen(num_samps)
benchmark(self.cpu_version, cpu_x, cpu_y, fs, nperseg)
def bench_csd_complex_gpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, gpu_x = rand_complex_data_gen(num_samps)
cpu_y, gpu_y = rand_complex_data_gen(num_samps)
_, output = benchmark(
cusignal.csd, gpu_x, gpu_y, fs, nperseg=nperseg
)
_, key = self.cpu_version(cpu_x, cpu_y, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="Spectrogram")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchSpectrogram:
def cpu_version(self, cpu_sig, fs, nperseg):
return signal.spectrogram(cpu_sig, fs, nperseg=nperseg)
def bench_spectrogram_cpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, _ = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, nperseg)
def bench_spectrogram_gpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, gpu_sig = rand_data_gen(num_samps)
_, _, output = benchmark(
cusignal.spectrogram, gpu_sig, fs, nperseg=nperseg
)
_, _, key = self.cpu_version(cpu_sig, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="SpectrogramComplex")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchSpectrogramComplex:
def cpu_version(self, cpu_sig, fs, nperseg):
return signal.spectrogram(cpu_sig, fs, nperseg=nperseg)
def bench_spectrogram_complex_cpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, _ = rand_complex_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, nperseg)
def bench_spectrogram_complex_gpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, gpu_sig = rand_complex_data_gen(num_samps)
_, _, output = benchmark(
cusignal.spectrogram, gpu_sig, fs, nperseg=nperseg
)
_, _, key = self.cpu_version(cpu_sig, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="STFT")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchSTFT:
def cpu_version(self, cpu_sig, fs, nperseg):
return signal.stft(cpu_sig, fs, nperseg=nperseg)
def bench_stft_cpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, _ = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, nperseg)
def bench_stft_gpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, gpu_sig = rand_data_gen(num_samps)
_, _, output = benchmark(
cusignal.stft, gpu_sig, fs, nperseg=nperseg
)
_, _, key = self.cpu_version(cpu_sig, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="STFTComplex")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchSTFTComplex:
def cpu_version(self, cpu_sig, fs, nperseg):
return signal.stft(cpu_sig, fs, nperseg=nperseg)
def bench_stft_complex_cpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, _ = rand_complex_data_gen(num_samps)
benchmark(self.cpu_version, cpu_sig, fs, nperseg)
def bench_stft_complex_gpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_sig, gpu_sig = rand_complex_data_gen(num_samps)
_, _, output = benchmark(
cusignal.stft, gpu_sig, fs, nperseg=nperseg
)
_, _, key = self.cpu_version(cpu_sig, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="Coherence")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchCoherence:
def cpu_version(self, cpu_x, cpu_y, fs, nperseg):
return signal.coherence(cpu_x, cpu_y, fs, nperseg=nperseg)
def bench_coherence_cpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, _ = rand_data_gen(num_samps)
cpu_y, _ = rand_data_gen(num_samps)
benchmark(self.cpu_version, cpu_x, cpu_y, fs, nperseg)
def bench_coherence_gpu(
self, rand_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, gpu_x = rand_data_gen(num_samps)
cpu_y, gpu_y = rand_data_gen(num_samps)
_, output = benchmark(
cusignal.coherence, gpu_x, gpu_y, fs, nperseg=nperseg
)
_, key = self.cpu_version(cpu_x, cpu_y, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
@pytest.mark.benchmark(group="CoherenceComplex")
@pytest.mark.parametrize("num_samps", [2 ** 14])
@pytest.mark.parametrize("fs", [1.0, 1e6])
@pytest.mark.parametrize("nperseg", [1024, 2048])
class BenchCoherenceComplex:
def cpu_version(self, cpu_x, cpu_y, fs, nperseg):
return signal.coherence(cpu_x, cpu_y, fs, nperseg=nperseg)
def bench_coherence_complex_cpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, _ = rand_complex_data_gen(num_samps)
cpu_y, _ = rand_complex_data_gen(num_samps)
benchmark(self.cpu_version, cpu_x, cpu_y, fs, nperseg)
def bench_coherence_complex_gpu(
self, rand_complex_data_gen, benchmark, num_samps, fs, nperseg
):
cpu_x, gpu_x = rand_complex_data_gen(num_samps)
cpu_y, gpu_y = rand_complex_data_gen(num_samps)
_, output = benchmark(
cusignal.coherence, gpu_x, gpu_y, fs, nperseg=nperseg
)
_, key = self.cpu_version(cpu_x, cpu_y, fs, nperseg)
assert array_equal(cp.asnumpy(output), key)
# @pytest.mark.benchmark(group="Vectorstrength")
# class BenchVectorstrength:
# def cpu_version(self, cpu_sig):
# return signal.vectorstrength(cpu_sig)
# def bench_vectorstrength_cpu(self, benchmark):
# benchmark(self.cpu_version, cpu_sig)
# def bench_vectorstrength_gpu(self, benchmark):
# output = benchmark(cusignal.vectorstrength, gpu_sig)
# key = self.cpu_version(cpu_sig)
# assert array_equal(cp.asnumpy(output), key)
|
import os
import numpy as np
import tensorflow as tf
from PIL import Image, ImageDraw
from sklearn.neighbors import NearestNeighbors
from settings import FILES_DIR, VGG_19_CHECKPOINT_FILENAME, VGG_19_CODE_LAYER, IMAGE_DATASET_PATH, IMAGE_SIZE
from vgg import vgg_19
from prepare import rescale_image
def get_images_codes(images, images_placeholder, end_points):
batch_size = 4
saver = tf.train.Saver(tf.get_collection('model_variables'))
with tf.Session() as sess:
saver.restore(sess, VGG_19_CHECKPOINT_FILENAME)
codes = None
for i in range(0, images.shape[0], batch_size):
batch_images = images[i:i + batch_size, ...]
batch_codes = sess.run(end_points[VGG_19_CODE_LAYER], feed_dict={images_placeholder: batch_images})
if codes is None:
codes = batch_codes
else:
codes = np.concatenate((codes, batch_codes))
return np.squeeze(codes, axis=(1, 2))
def get_dataset_image_codes(images_placeholder, end_points):
files = [os.path.join(IMAGE_DATASET_PATH, f) for f in os.listdir(IMAGE_DATASET_PATH)]
images = np.stack([np.asarray(Image.open(f)) for f in files])
image_codes = get_images_codes(images, images_placeholder, end_points)
return image_codes, files
def get_query_image_code(filenames, images_placeholder, end_points):
images = np.stack([np.asarray(rescale_image(Image.open(f))) for f in filenames])
image_codes = get_images_codes(images, images_placeholder, end_points)
return image_codes
def main():
images_placeholder = tf.placeholder(tf.float32, shape=(None, IMAGE_SIZE, IMAGE_SIZE, 3))
_, end_points = vgg_19(images_placeholder, num_classes=None, is_training=False)
dataset_image_codes, dataset_image_files = get_dataset_image_codes(images_placeholder, end_points)
print(dataset_image_codes.shape)
images = [os.path.join(FILES_DIR, f'image_{i}.jpg') for i in range(1, 5)]
query_image_codes = get_query_image_code(images, images_placeholder, end_points)
print(query_image_codes.shape)
neighbors_count = 2
nearest_neighbors = NearestNeighbors(n_neighbors=neighbors_count, metric='cosine').fit(dataset_image_codes)
_, indices = nearest_neighbors.kneighbors(query_image_codes)
space = 10
result_image_size = (
(neighbors_count + 1) * (IMAGE_SIZE + space) - space,
len(images) * (IMAGE_SIZE + space) - space
)
result_image = Image.new('RGB', result_image_size, 'white')
for i, filename in enumerate(images):
query_image = rescale_image(Image.open(filename))
draw = ImageDraw.Draw(query_image)
draw.line(
(
0, 0,
query_image.width - 1, 0,
query_image.width - 1, query_image.height - 1,
0, query_image.height - 1,
0, 0
),
fill='red', width=1)
result_image.paste(query_image, (0, i * (IMAGE_SIZE + space)))
for j in range(neighbors_count):
neighbor_image = Image.open(dataset_image_files[indices[i][j]])
result_image.paste(neighbor_image, ((j + 1) * (IMAGE_SIZE + space), i * (IMAGE_SIZE + space)))
result_image.show()
result_image.save(os.path.join(FILES_DIR, 'result.jpg'))
if __name__ == '__main__':
main()
|
import random
import re
from datetime import datetime
import types
import enum
from inspect import signature
from .scenario import *
def _if_dict(new_answer, nosleep=False):
if not 'message' in new_answer:
new_answer['message'] = ''
if not 'sleep' in new_answer:
if nosleep:
new_answer['sleep'] = 0.0
else:
new_answer['sleep'] = 1 + 5 * round(random.random(), 3)
if not 'attach' in new_answer:
new_answer['attach'] = None
if not 'sticker' in new_answer:
new_answer['sticker'] = None
def run_scen(scen):
new_answers = scen.respond()
if not scen.answer:
if not new_answers:
new_answers = [m('')]
elif isinstance(new_answers, str):
new_answers = [m(new_answers)]
elif isinstance(new_answers, dict):
_if_dict(new_answers)
new_answers = [new_answers]
elif isinstance(new_answers, list):
for i in range(0, len(new_answers)):
if isinstance(new_answers[i], str):
new_answers[i] = m(new_answers[i])
elif isinstance(new_answers[i], list):
try:
new_answers[i] = m(*new_answers[i])
except Exception:
new_answers[i] = m('')
elif isinstance(new_answers[i], dict):
_if_dict(new_answers[i])
else:
raise Exception('Incorrect answer type from scenario ' +
str(type(scen)) + ' : ' + str(new_answers))
else:
raise Exception('Incorrect answer type from scenario ' +
str(type(scen)) + ' : ' + str(new_answers))
if scen.message:
scen.messages_history.append(scen.message)
else:
if not new_answers:
new_answers = [m(scen.answer)]
elif isinstance(new_answers, str):
if new_answers == '':
new_answers = [m(scen.answer)]
else:
new_answers = [m(new_answers)]
elif isinstance(new_answers, dict):
_if_dict(new_answers, nosleep=True)
if new_answers['message'] == '':
new_answers['message'] = scen.answer
new_answers = [new_answers]
elif isinstance(new_answers, list):
for i in range(0, len(new_answers)):
if isinstance(new_answers[i], str):
if new_answers[i] == '':
new_answers[i] = m(scen.answer)
else:
new_answers[i] = m(new_answers[i])
elif isinstance(new_answers[i], list):
try:
new_answers[i] = m(*new_answers[i])
except Exception:
new_answers[i] = m(scen.answer)
elif isinstance(new_answers[i], dict):
_if_dict(new_answers[i], nosleep=True)
if new_answers[i]['message'] == '':
new_answers[i]['message'] = scen.answer
else:
raise Exception('Incorrect answer type from scenario ' +
str(type(scen)) + ' : ' + str(new_answers))
else:
raise Exception('Incorrect answer type from scenario ' +
str(type(scen)) + ' : ' + str(new_answers))
return new_answers
def _wrap_respond(func):
if isinstance(func, str):
resp_string = func
func = lambda: resp_string
def wrap(self):
sign = signature(func)
if len(sign.parameters) == 0:
ret = func()
elif len(sign.parameters) == 1:
ret = func(self)
else:
raise Exception('Handler must take 0 or 1 parameter (self), no more.')
self.respond = None
return ret
return wrap
def _wrap_suitable(func):
def wrap(message, i_sender, interlocutors, is_personal, name, answer):
if isinstance(func, str):
if func[0] == '<' and func[-1] == '>':
words = func[1:-1].split('|')
return True if find_element(words, lambda w: w in message) else False
else:
words = func.split('|')
return True if message in words else False
else:
try:
sign = signature(func)
acceptable_args = {
'message': message,
'i_sender': i_sender,
'interlocutors': interlocutors,
'is_personal': is_personal,
'name': name,
'answer': answer
}
actual_args = []
for arg in sign.parameters:
if arg not in acceptable_args:
raise ValueError(
f"Argument '{arg}' is not allowed in suitable scenario function. " +
f"Acceptable args is: {acceptable_args}."
)
actual_args.append(acceptable_args[arg])
ret = func(*actual_args)
except TypeError:
ret = func()
return True if ret else False
return staticmethod(wrap)
def make_scen(scen, handler, suitable):
if not scen:
valid_types = (
types.FunctionType,
types.BuiltinFunctionType,
types.MethodType,
types.BuiltinMethodType,
str
)
if handler:
if not isinstance(handler, valid_types):
raise TypeError('Handler must be function or str value')
else:
raise ValueError('Handler must be declared')
if suitable:
if not isinstance(suitable, valid_types):
raise TypeError('Suitable must be function or str value')
else:
suitable = lambda: True
scen = type(
'Scenario' + str(id(handler)),
(Scenario,),
{
'respond': _wrap_respond(handler),
'suitable': _wrap_suitable(suitable)
}
)
elif not isinstance(scen, Scenario):
# duct tape 2 or not
if not hasattr(scen, 'respond'):
raise ValueError('Handler must be declared')
if not hasattr(scen, 'suitable'):
scen.suitable = _wrap_suitable(lambda: True)
if not hasattr(scen, 'max_replicas'):
scen.max_replicas = DEFAULT_MAX_REPLICAS
if not hasattr(scen, 'max_idle_time'):
scen.max_idle_time = DEFAULT_MAX_IDLE_TIME
if not hasattr(scen, 'with_all'):
scen.with_all = DEFAULT_WITH_ALL_MODE
if not hasattr(scen, 'description'):
scen.description = 'Auto-generated scenario without description'
return scen
def left_seconds(from_datetime):
return (datetime.now() - from_datetime).seconds / 60
class Interlocutor(object):
def __init__(self, id, first_name, last_name):
self._id = id
self._first_name = first_name
self._last_name = last_name
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def first_name(self):
return self._first_name
@first_name.setter
def first_name(self, value):
self._first_name = value
@property
def last_name(self):
return self._last_name
@last_name.setter
def last_name(self, value):
self._last_name = value
class Envelope(object):
def __init__(self, event, sender_id, message):
self._message = message
self._sender_id = sender_id
self._event = event
@property
def message(self):
return self._message
@message.setter
def message(self, value):
self._message = value
@property
def sender_id(self):
return self._sender_id
@sender_id.setter
def sender_id(self, value):
self._sender_id = value
@property
def event(self):
return self._event
@event.setter
def event(self, value):
self._event = value
class ToSend(object):
def __init__(self, id, message, sleep=0.0,
attachment=None, sticker=None):
self.id = id
if sleep == 0.0:
sleep = 1.0 + 5 * round(random.random(), 3)
self.sleep = sleep
self.message = message
self.attach = attachment
self.sticker = sticker
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def message(self):
return self._message
@message.setter
def message(self, value):
self._message = value
@property
def sleep(self):
return self._sleep
@sleep.setter
def sleep(self, value):
self._sleep = value
@property
def attach(self):
return self._attach
@attach.setter
def attach(self, value):
self._attach = value
@property
def sticker(self):
return self._sticker
@sticker.setter
def sticker(self, value):
self._sticker = value
class Wrap(object):
def __init__(self, value):
self.val = value
@property
def val(self):
return self._val
@val.setter
def val(self, value):
self._val = value
|
from eggdriver.news.app import *
from eggdriver.news.config import *
from eggdriver.news.news import *
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.