content stringlengths 5 1.05M |
|---|
import os
import unittest
import tempfile
import struct
from pyoram.storage.block_storage import \
BlockStorageTypeFactory
from pyoram.encrypted_storage.encrypted_block_storage import \
EncryptedBlockStorage
from pyoram.crypto.aes import AES
from six.moves import xrange
thisdir = os.path.dirname(os.path.abspath(__file__))
class _TestEncryptedBlockStorage(object):
_type_name = None
_aes_mode = None
_test_key = None
_test_key_size = None
@classmethod
def setUpClass(cls):
assert cls._type_name is not None
assert cls._aes_mode is not None
assert not ((cls._test_key is not None) and \
(cls._test_key_size is not None))
fd, cls._dummy_name = tempfile.mkstemp()
os.close(fd)
try:
os.remove(cls._dummy_name)
except OSError: # pragma: no cover
pass # pragma: no cover
cls._block_size = 25
cls._block_count = 5
cls._testfname = cls.__name__ + "_testfile.bin"
cls._blocks = []
f = EncryptedBlockStorage.setup(
cls._testfname,
cls._block_size,
cls._block_count,
key_size=cls._test_key_size,
key=cls._test_key,
storage_type=cls._type_name,
aes_mode=cls._aes_mode,
initialize=lambda i: bytes(bytearray([i])*cls._block_size),
ignore_existing=True)
f.close()
cls._key = f.key
for i in range(cls._block_count):
data = bytearray([i])*cls._block_size
cls._blocks.append(data)
@classmethod
def tearDownClass(cls):
try:
os.remove(cls._testfname)
except OSError: # pragma: no cover
pass # pragma: no cover
try:
os.remove(cls._dummy_name)
except OSError: # pragma: no cover
pass # pragma: no cover
def test_setup_fails(self):
self.assertEqual(os.path.exists(self._dummy_name), False)
with self.assertRaises(IOError):
EncryptedBlockStorage.setup(
os.path.join(thisdir,
"baselines",
"exists.empty"),
block_size=10,
block_count=10,
key=self._test_key,
key_size=self._test_key_size,
aes_mode=self._aes_mode,
storage_type=self._type_name)
self.assertEqual(os.path.exists(self._dummy_name), False)
with self.assertRaises(IOError):
EncryptedBlockStorage.setup(
os.path.join(thisdir,
"baselines",
"exists.empty"),
block_size=10,
block_count=10,
key=self._test_key,
key_size=self._test_key_size,
storage_type=self._type_name,
aes_mode=self._aes_mode,
ignore_existing=False)
self.assertEqual(os.path.exists(self._dummy_name), False)
with self.assertRaises(ValueError):
EncryptedBlockStorage.setup(
self._dummy_name,
block_size=0,
block_count=1,
key=self._test_key,
key_size=self._test_key_size,
aes_mode=self._aes_mode,
storage_type=self._type_name)
self.assertEqual(os.path.exists(self._dummy_name), False)
with self.assertRaises(ValueError):
EncryptedBlockStorage.setup(
self._dummy_name,
block_size=1,
block_count=0,
key=self._test_key,
key_size=self._test_key_size,
aes_mode=self._aes_mode,
storage_type=self._type_name)
self.assertEqual(os.path.exists(self._dummy_name), False)
with self.assertRaises(TypeError):
EncryptedBlockStorage.setup(
self._dummy_name,
block_size=1,
block_count=1,
key=self._test_key,
key_size=self._test_key_size,
aes_mode=self._aes_mode,
storage_type=self._type_name,
header_data=2)
self.assertEqual(os.path.exists(self._dummy_name), False)
with self.assertRaises(ValueError):
EncryptedBlockStorage.setup(
self._dummy_name,
block_size=1,
block_count=1,
key=self._test_key,
key_size=self._test_key_size,
aes_mode=None,
storage_type=self._type_name)
self.assertEqual(os.path.exists(self._dummy_name), False)
with self.assertRaises(ValueError):
EncryptedBlockStorage.setup(
self._dummy_name,
block_size=1,
block_count=1,
key_size=-1,
aes_mode=self._aes_mode,
storage_type=self._type_name)
self.assertEqual(os.path.exists(self._dummy_name), False)
with self.assertRaises(TypeError):
EncryptedBlockStorage.setup(
self._dummy_name,
block_size=1,
block_count=1,
key=-1,
aes_mode=self._aes_mode,
storage_type=self._type_name)
self.assertEqual(os.path.exists(self._dummy_name), False)
with self.assertRaises(ValueError):
EncryptedBlockStorage.setup(
self._dummy_name,
block_size=1,
block_count=1,
key=AES.KeyGen(AES.key_sizes[0]),
key_size=AES.key_sizes[0],
aes_mode=self._aes_mode,
storage_type=self._type_name)
self.assertEqual(os.path.exists(self._dummy_name), False)
with self.assertRaises(ValueError):
EncryptedBlockStorage.setup(
self._dummy_name,
block_size=1,
block_count=1,
key=os.urandom(AES.key_sizes[0]+100),
aes_mode=self._aes_mode,
storage_type=self._type_name)
def test_setup(self):
fname = ".".join(self.id().split(".")[1:])
fname += ".bin"
fname = os.path.join(thisdir, fname)
if os.path.exists(fname):
os.remove(fname) # pragma: no cover
bsize = 10
bcount = 11
fsetup = EncryptedBlockStorage.setup(
fname,
bsize,
bcount,
key=self._test_key,
key_size=self._test_key_size,
aes_mode=self._aes_mode,
storage_type=self._type_name)
fsetup.close()
self.assertEqual(type(fsetup.raw_storage),
BlockStorageTypeFactory(self._type_name))
with open(fname, 'rb') as f:
flen = len(f.read())
self.assertEqual(
flen,
EncryptedBlockStorage.compute_storage_size(
bsize,
bcount,
aes_mode=self._aes_mode,
storage_type=self._type_name))
self.assertEqual(
flen >
EncryptedBlockStorage.compute_storage_size(
bsize,
bcount,
aes_mode=self._aes_mode,
storage_type=self._type_name,
ignore_header=True),
True)
with EncryptedBlockStorage(fname,
key=fsetup.key,
storage_type=self._type_name) as f:
self.assertEqual(f.header_data, bytes())
self.assertEqual(fsetup.header_data, bytes())
self.assertEqual(f.key, fsetup.key)
self.assertEqual(f.block_size, bsize)
self.assertEqual(fsetup.block_size, bsize)
self.assertEqual(f.block_count, bcount)
self.assertEqual(fsetup.block_count, bcount)
self.assertEqual(f.storage_name, fname)
self.assertEqual(fsetup.storage_name, fname)
# tamper with the plaintext index
with open(fname, 'r+b') as f:
f.seek(0)
f.write(struct.pack("!L",0))
with self.assertRaises(ValueError):
with EncryptedBlockStorage(fname,
key=fsetup.key,
storage_type=self._type_name) as f:
pass # pragma: no cover
os.remove(fname)
def test_setup_withdata(self):
fname = ".".join(self.id().split(".")[1:])
fname += ".bin"
fname = os.path.join(thisdir, fname)
if os.path.exists(fname):
os.remove(fname) # pragma: no cover
bsize = 10
bcount = 11
header_data = bytes(bytearray([0,1,2]))
fsetup = EncryptedBlockStorage.setup(
fname,
block_size=bsize,
block_count=bcount,
key=self._test_key,
key_size=self._test_key_size,
aes_mode=self._aes_mode,
storage_type=self._type_name,
header_data=header_data)
fsetup.close()
self.assertEqual(type(fsetup.raw_storage),
BlockStorageTypeFactory(self._type_name))
with open(fname, 'rb') as f:
flen = len(f.read())
self.assertEqual(
flen,
EncryptedBlockStorage.compute_storage_size(
bsize,
bcount,
aes_mode=self._aes_mode,
storage_type=self._type_name,
header_data=header_data))
self.assertTrue(len(header_data) > 0)
self.assertEqual(
EncryptedBlockStorage.compute_storage_size(
bsize,
bcount,
aes_mode=self._aes_mode,
storage_type=self._type_name) <
EncryptedBlockStorage.compute_storage_size(
bsize,
bcount,
aes_mode=self._aes_mode,
storage_type=self._type_name,
header_data=header_data),
True)
self.assertEqual(
flen >
EncryptedBlockStorage.compute_storage_size(
bsize,
bcount,
aes_mode=self._aes_mode,
storage_type=self._type_name,
header_data=header_data,
ignore_header=True),
True)
with EncryptedBlockStorage(fname,
key=fsetup.key,
storage_type=self._type_name) as f:
self.assertEqual(f.header_data, header_data)
self.assertEqual(fsetup.header_data, header_data)
self.assertEqual(f.key, fsetup.key)
self.assertEqual(f.block_size, bsize)
self.assertEqual(fsetup.block_size, bsize)
self.assertEqual(f.block_count, bcount)
self.assertEqual(fsetup.block_count, bcount)
self.assertEqual(f.storage_name, fname)
self.assertEqual(fsetup.storage_name, fname)
os.remove(fname)
def test_init_noexists(self):
self.assertEqual(os.path.exists(self._dummy_name), False)
with self.assertRaises(IOError):
with EncryptedBlockStorage(
self._dummy_name,
key=self._key,
storage_type=self._type_name) as f:
pass # pragma: no cover
def test_init_exists(self):
self.assertEqual(os.path.exists(self._testfname), True)
with open(self._testfname, 'rb') as f:
databefore = f.read()
with self.assertRaises(ValueError):
with EncryptedBlockStorage(self._testfname,
storage_type=self._type_name) as f:
pass # pragma: no cover
with self.assertRaises(ValueError):
with BlockStorageTypeFactory(self._type_name)(self._testfname) as fb:
with EncryptedBlockStorage(fb,
key=self._key,
storage_type=self._type_name) as f:
pass # pragma: no cover
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as f:
self.assertEqual(f.key, self._key)
self.assertEqual(f.block_size, self._block_size)
self.assertEqual(f.block_count, self._block_count)
self.assertEqual(f.storage_name, self._testfname)
self.assertEqual(f.header_data, bytes())
self.assertEqual(os.path.exists(self._testfname), True)
with open(self._testfname, 'rb') as f:
dataafter = f.read()
self.assertEqual(databefore, dataafter)
def test_read_block(self):
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
for i, data in enumerate(self._blocks):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in enumerate(self._blocks):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in reversed(list(enumerate(self._blocks))):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in reversed(list(enumerate(self._blocks))):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
self._block_count*f._storage.block_size*4)
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
self.assertEqual(list(bytearray(f.read_block(0))),
list(self._blocks[0]))
self.assertEqual(list(bytearray(f.read_block(self._block_count-1))),
list(self._blocks[-1]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
f._storage.block_size*2)
def test_write_block(self):
data = bytearray([self._block_count])*self._block_size
self.assertEqual(len(data) > 0, True)
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
for i in xrange(self._block_count):
self.assertNotEqual(list(bytearray(f.read_block(i))),
list(data))
for i in xrange(self._block_count):
f.write_block(i, bytes(data))
for i in xrange(self._block_count):
self.assertEqual(list(bytearray(f.read_block(i))),
list(data))
for i, block in enumerate(self._blocks):
f.write_block(i, bytes(block))
self.assertEqual(f.bytes_sent,
self._block_count*f._storage.block_size*2)
self.assertEqual(f.bytes_received,
self._block_count*f._storage.block_size*2)
def test_read_blocks(self):
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
data = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
data = f.read_blocks([0])
self.assertEqual(len(data), 1)
self.assertEqual(list(bytearray(data[0])),
list(self._blocks[0]))
self.assertEqual(len(self._blocks) > 1, True)
data = f.read_blocks(list(xrange(1, self._block_count)) + [0])
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data[:-1], 1):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(list(bytearray(data[-1])),
list(self._blocks[0]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
(2*self._block_count+1)*f._storage.block_size)
def test_yield_blocks(self):
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
data = list(f.yield_blocks(list(xrange(self._block_count))))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
data = list(f.yield_blocks([0]))
self.assertEqual(len(data), 1)
self.assertEqual(list(bytearray(data[0])),
list(self._blocks[0]))
self.assertEqual(len(self._blocks) > 1, True)
data = list(f.yield_blocks(list(xrange(1, self._block_count)) + [0]))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data[:-1], 1):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(list(bytearray(data[-1])),
list(self._blocks[0]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
(2*self._block_count+1)*f._storage.block_size)
def test_write_blocks(self):
data = [bytearray([self._block_count])*self._block_size
for i in xrange(self._block_count)]
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as f:
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
orig = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(orig), self._block_count)
for i, block in enumerate(orig):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
f.write_blocks(list(xrange(self._block_count)),
[bytes(b) for b in data])
new = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(new), self._block_count)
for i, block in enumerate(new):
self.assertEqual(list(bytearray(block)),
list(data[i]))
f.write_blocks(list(xrange(self._block_count)),
[bytes(b) for b in self._blocks])
orig = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(orig), self._block_count)
for i, block in enumerate(orig):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(f.bytes_sent,
self._block_count*f._storage.block_size*2)
self.assertEqual(f.bytes_received,
self._block_count*f._storage.block_size*3)
def test_update_header_data(self):
fname = ".".join(self.id().split(".")[1:])
fname += ".bin"
fname = os.path.join(thisdir, fname)
if os.path.exists(fname):
os.remove(fname) # pragma: no cover
bsize = 10
bcount = 11
header_data = bytes(bytearray([0,1,2]))
fsetup = EncryptedBlockStorage.setup(
fname,
block_size=bsize,
block_count=bcount,
key=self._test_key,
key_size=self._test_key_size,
header_data=header_data)
fsetup.close()
new_header_data = bytes(bytearray([1,1,1]))
with EncryptedBlockStorage(fname,
key=fsetup.key,
storage_type=self._type_name) as f:
self.assertEqual(f.header_data, header_data)
f.update_header_data(new_header_data)
self.assertEqual(f.header_data, new_header_data)
with EncryptedBlockStorage(fname,
key=fsetup.key,
storage_type=self._type_name) as f:
self.assertEqual(f.header_data, new_header_data)
with self.assertRaises(ValueError):
with EncryptedBlockStorage(fname,
key=fsetup.key,
storage_type=self._type_name) as f:
f.update_header_data(bytes(bytearray([1,1])))
with self.assertRaises(ValueError):
with EncryptedBlockStorage(fname,
key=fsetup.key,
storage_type=self._type_name) as f:
f.update_header_data(bytes(bytearray([1,1,1,1])))
with EncryptedBlockStorage(fname,
key=fsetup.key,
storage_type=self._type_name) as f:
self.assertEqual(f.header_data, new_header_data)
os.remove(fname)
def test_locked_flag(self):
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as f:
with self.assertRaises(IOError):
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as f1:
pass # pragma: no cover
with self.assertRaises(IOError):
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as f1:
pass # pragma: no cover
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name,
ignore_lock=True) as f1:
pass
with self.assertRaises(IOError):
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as f1:
pass # pragma: no cover
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name,
ignore_lock=True) as f1:
pass
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name,
ignore_lock=True) as f1:
pass
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as f:
pass
def test_read_block_cloned(self):
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
for i, data in enumerate(self._blocks):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in enumerate(self._blocks):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in reversed(list(enumerate(self._blocks))):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
for i, data in reversed(list(enumerate(self._blocks))):
self.assertEqual(list(bytearray(f.read_block(i))),
list(self._blocks[i]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
self._block_count*f._storage.block_size*4)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
self.assertEqual(list(bytearray(f.read_block(0))),
list(self._blocks[0]))
self.assertEqual(list(bytearray(f.read_block(self._block_count-1))),
list(self._blocks[-1]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
f._storage.block_size*2)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
def test_write_block_cloned(self):
data = bytearray([self._block_count])*self._block_size
self.assertEqual(len(data) > 0, True)
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
for i in xrange(self._block_count):
self.assertNotEqual(list(bytearray(f.read_block(i))),
list(data))
for i in xrange(self._block_count):
f.write_block(i, bytes(data))
for i in xrange(self._block_count):
self.assertEqual(list(bytearray(f.read_block(i))),
list(data))
for i, block in enumerate(self._blocks):
f.write_block(i, bytes(block))
self.assertEqual(f.bytes_sent,
self._block_count*f._storage.block_size*2)
self.assertEqual(f.bytes_received,
self._block_count*f._storage.block_size*2)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
def test_read_blocks_cloned(self):
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
data = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
data = f.read_blocks([0])
self.assertEqual(len(data), 1)
self.assertEqual(list(bytearray(data[0])),
list(self._blocks[0]))
self.assertEqual(len(self._blocks) > 1, True)
data = f.read_blocks(list(xrange(1, self._block_count)) + [0])
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data[:-1], 1):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(list(bytearray(data[-1])),
list(self._blocks[0]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
(2*self._block_count + 1)*f._storage.block_size)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
def test_yield_blocks_cloned(self):
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
data = list(f.yield_blocks(list(xrange(self._block_count))))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
data = list(f.yield_blocks([0]))
self.assertEqual(len(data), 1)
self.assertEqual(list(bytearray(data[0])),
list(self._blocks[0]))
self.assertEqual(len(self._blocks) > 1, True)
data = list(f.yield_blocks(list(xrange(1, self._block_count)) + [0]))
self.assertEqual(len(data), self._block_count)
for i, block in enumerate(data[:-1], 1):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(list(bytearray(data[-1])),
list(self._blocks[0]))
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received,
(2*self._block_count + 1)*f._storage.block_size)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
def test_write_blocks_cloned(self):
data = [bytearray([self._block_count])*self._block_size
for i in xrange(self._block_count)]
with EncryptedBlockStorage(self._testfname,
key=self._key,
storage_type=self._type_name) as forig:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
with forig.clone_device() as f:
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
self.assertEqual(f.bytes_sent, 0)
self.assertEqual(f.bytes_received, 0)
orig = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(orig), self._block_count)
for i, block in enumerate(orig):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
f.write_blocks(list(xrange(self._block_count)),
[bytes(b) for b in data])
new = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(new), self._block_count)
for i, block in enumerate(new):
self.assertEqual(list(bytearray(block)),
list(data[i]))
f.write_blocks(list(xrange(self._block_count)),
[bytes(b) for b in self._blocks])
orig = f.read_blocks(list(xrange(self._block_count)))
self.assertEqual(len(orig), self._block_count)
for i, block in enumerate(orig):
self.assertEqual(list(bytearray(block)),
list(self._blocks[i]))
self.assertEqual(f.bytes_sent,
self._block_count*f._storage.block_size*2)
self.assertEqual(f.bytes_received,
self._block_count*f._storage.block_size*3)
self.assertEqual(forig.bytes_sent, 0)
self.assertEqual(forig.bytes_received, 0)
class TestEncryptedBlockStorageFileCTRKey(_TestEncryptedBlockStorage,
unittest.TestCase):
_type_name = 'file'
_aes_mode = 'ctr'
_test_key = AES.KeyGen(16)
class TestEncryptedBlockStorageFileCTR32(_TestEncryptedBlockStorage,
unittest.TestCase):
_type_name = 'file'
_aes_mode = 'ctr'
_test_key_size = 16
class TestEncryptedBlockStorageFileGCMKey(_TestEncryptedBlockStorage,
unittest.TestCase):
_type_name = 'file'
_aes_mode = 'gcm'
_test_key = AES.KeyGen(24)
class TestEncryptedBlockStorageFileGCM32(_TestEncryptedBlockStorage,
unittest.TestCase):
_type_name = 'file'
_aes_mode = 'gcm'
_test_key_size = 24
class TestEncryptedBlockStorageMMapFileCTRKey(_TestEncryptedBlockStorage,
unittest.TestCase):
_type_name = 'mmap'
_aes_mode = 'ctr'
_test_key = AES.KeyGen(32)
class TestEncryptedBlockStorageMMapFileCTR32(_TestEncryptedBlockStorage,
unittest.TestCase):
_type_name = 'mmap'
_aes_mode = 'ctr'
_test_key_size = 32
class TestEncryptedBlockStorageMMapFileGCMKey(_TestEncryptedBlockStorage,
unittest.TestCase):
_type_name = 'mmap'
_aes_mode = 'gcm'
_test_key = AES.KeyGen(32)
class TestEncryptedBlockStorageMMapFileGCM32(_TestEncryptedBlockStorage,
unittest.TestCase):
_type_name = 'mmap'
_aes_mode = 'gcm'
_test_key_size = 32
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
import inspect
import os
import re
import types
import pymel.internal.apicache as apicache
import pymel.internal.parsers as parsers
import pymel.internal.startup
import pymel.util.arguments as arguments
import pymel.versions as versions
from pprint import pprint
from pymel.util.enum import Enum
from pymel.util.arguments import AddedKey, ChangedKey, RemovedKey
from future.utils import PY2
from past.builtins import basestring, unicode
THIS_FILE = inspect.getsourcefile(lambda: None)
THIS_DIR = os.path.dirname(THIS_FILE)
repodir = os.path.dirname(THIS_DIR)
cachedir = os.path.join(repodir, 'pymel', 'cache')
cacheversions = {
'old': 2020,
'new': 2021,
}
cachefiles = {key: 'mayaApi{}.py'.format(ver)
for key, ver in cacheversions.items()}
DO_PREPROCESS = False
def preprocess(cache):
apiClassInfo = cache[-1]
# remove skipped entries
for clsname in list(apiClassInfo):
if parsers.ApiDocParser.shouldSkip(clsname):
apiClassInfo.pop(clsname, None)
for clsname, methName in parsers.XmlApiDocParser.SKIP_PARSING_METHODS:
apiClassInfo.get(clsname, {})['methods'].pop(methName, None)
for clsInfo in apiClassInfo.values():
for overloads in clsInfo['methods'].values():
for methInfo in overloads:
argInfo = methInfo['argInfo']
quals = methInfo.get('typeQualifiers', {})
for argName, argQuals in list(quals.items()):
if argName not in argInfo or not argQuals:
del quals[argName]
return cache
caches = {}
for key, cachefile in cachefiles.items():
cachepath = os.path.join(cachedir, cachefile)
cache_globals = {}
cacheInst = apicache.ApiCache()
data = cacheInst.read(path=cachepath)
if DO_PREPROCESS:
data = preprocess(data)
cachepath_namebase, cachepath_ext = os.path.splitext(cachepath)
preprocessed_path = cachepath_namebase + '.preprocessed' + cachepath_ext
cacheInst.write(data, path=preprocessed_path)
caches[key] = data
# we only care about the diffs of the classInfo
both, onlyOld, onlyNew, diffs = arguments.compareCascadingDicts(
caches['old'][-1],
caches['new'][-1],
useAddedKeys=True, useChangedKeys=True)
################################################################################
# iteration utils
class AnyKey(object):
'''Sentinel value to indicate all keys should be iterated over'''
def __init__(self, comment):
'''Init does nothing, just allows you to add a comment visible in the
code'''
pass
class NoValue(object):
'''Sentinel value (distinct from None) to indicate no value'''
pass
def iterDiffDictForKey(cascadingDict, multiKey, onlyDicts=False):
'''Given a multiKey into a cascading dict, where each piece in the multiKey
is either a fixed value, list of fixed values, or AnyKey, meaning to iterate
over all keys at that level, yield the results gained from getting the last
key in the multiKey'''
if not multiKey:
raise ValueError("multiKey must have at least one item")
head, tail = multiKey[0], multiKey[1:]
if head is AnyKey or isinstance(head, AnyKey):
keyIter = cascadingDict.keys()
elif isinstance(head, list):
keyIter = head
else:
keyIter = [head]
for key in keyIter:
val = cascadingDict.get(key, NoValue)
if val is NoValue:
continue
if not tail:
# if there's no tail, we're done recursing...
if not onlyDicts or isinstance(val, dict):
yield (key,), val
elif isinstance(val, dict):
for subMultiKey, subItem in iterDiffDictForKey(val, tail,
onlyDicts=onlyDicts):
yield (key,) + subMultiKey, subItem
# convience iterators at specific levels
def iterOverloadDiffs(onlyDicts=False):
iterKey = (
AnyKey('classname'),
'methods',
AnyKey('methodname'),
AnyKey('overloadIndex'),
)
for item in iterDiffDictForKey(diffs, iterKey, onlyDicts=onlyDicts):
yield item
#eliminate known diffs
################################################################################
# Doc for 'className' method got more verbose, and it became static
#'className': {0: {'doc': ChangedKey('Class name.', 'Returns the name of this class.')
iterKey = (
AnyKey('classname'),
'methods',
'className', # this is potentially confusing - the methodName IS 'className'
AnyKey('overloadIndex'),
)
for _, overloadDiff in iterDiffDictForKey(diffs, iterKey):
docDiff = overloadDiff.get('doc')
if isinstance(docDiff, ChangedKey):
if set([
docDiff.oldVal.lower().rstrip('.'),
docDiff.newVal.lower().rstrip('.'),
]) == set([
'class name',
'returns the name of this class',
]):
del overloadDiff['doc']
staticDiff = overloadDiff.get('static')
if (isinstance(staticDiff, ChangedKey)
and not staticDiff.oldVal
and staticDiff.newVal):
del overloadDiff['static']
################################################################################
# It's ok if it didn't have a doc, and now it does
def hasNewDoc(arg):
if not isinstance(arg, dict):
return False
doc = arg.get('doc')
if not doc:
return False
if isinstance(doc, AddedKey):
return True
if isinstance(doc, ChangedKey):
if not doc.oldVal:
return True
return False
def removeDocDiff(arg):
del arg['doc']
return arg
arguments.deepPatch(diffs, hasNewDoc, removeDocDiff)
################################################################################
# It's ok if the doc is now longer
# (as long as it doesn't now include "\param" or "\return" codes)
def hasLongerDoc(arg):
if not isinstance(arg, dict):
return False
doc = arg.get('doc')
if not doc:
return False
if isinstance(doc, ChangedKey):
if not doc.newVal.startswith(doc.oldVal):
return False
extraDoc = doc.newVal[len(doc.oldVal):]
return '\\param' not in extraDoc and '\\return' not in extraDoc
return False
arguments.deepPatch(diffs, hasLongerDoc, removeDocDiff)
################################################################################
# It's ok if the doc is now shorter, if it seems to have been truncated at a
# sentence end.
def wasTrimmedToSentence(arg):
if not isinstance(arg, dict):
return False
doc = arg.get('doc')
if not doc:
return False
if isinstance(doc, ChangedKey):
if not doc.oldVal.startswith(doc.newVal):
return False
if not doc.newVal.endswith('.'):
return False
return doc.oldVal[len(doc.newVal)] == ' '
return False
arguments.deepPatch(diffs, wasTrimmedToSentence, removeDocDiff)
################################################################################
# It's ok if the doc changed for a deprecated function
for multiKey, overloadDiff in iterOverloadDiffs(onlyDicts=True):
overloadData = arguments.getCascadingDictItem(caches['new'][-1],
multiKey)
if not overloadData.get('deprecated'):
continue
overloadDiff.pop('doc', None)
# check for changed docs for params
argInfoDiff = overloadDiff.get('argInfo')
if isinstance(argInfoDiff, dict):
for argDiffs in argInfoDiff.values():
if not isinstance(argDiffs, dict):
continue
argDiffs.pop('doc', None)
################################################################################
# ignore changes in only capitalization or punctuation
# ...also strip out any "\\li " or <b>/</b> items
# ...or whitespace length...
ASCII_PUNCTUATION = """;-'"`,."""
UNICODE_PUNCTUATION = (unicode(ASCII_PUNCTUATION) \
# single left/right quote
+ u'\u2018\u2019')
PUNCTUATION_TABLE = {ord(x): None for x in UNICODE_PUNCTUATION}
def strip_punctuation(input):
if PY2:
if isinstance(input, str):
return input.translate(None, ASCII_PUNCTUATION)
return input.translate(PUNCTUATION_TABLE)
MULTI_SPACE_RE = re.compile('\s+')
def normalize_str(input):
result = strip_punctuation(input.lower())
result = result.replace(' \\li ', ' ')
result = result.replace('<b>', '')
result = result.replace('</b>', '')
result = result.replace('\n', '')
result = MULTI_SPACE_RE.sub(' ', result)
return result
def same_after_normalize(input):
if not isinstance(input, ChangedKey):
return False
if not isinstance(input.oldVal, basestring) or not isinstance(input.newVal, basestring):
return False
return normalize_str(input.oldVal) == normalize_str(input.newVal)
def returnNone(input):
return None
arguments.deepPatch(diffs, same_after_normalize, returnNone)
################################################################################
# enums are now recorded in a way where there's no documentation for values...
# {'enums': {'ColorTable': {'valueDocs': {'activeColors': RemovedKey('Colors for active objects.'),
# 'backgroundColor': RemovedKey('Colors for background color.'),
# 'dormantColors': RemovedKey('Colors for dormant objects.'),
# 'kActiveColors': RemovedKey('Colors for active objects.'),
# 'kBackgroundColor': RemovedKey('Colors for background color.'),
# 'kDormantColors': RemovedKey('Colors for dormant objects.'),
# 'kTemplateColor': RemovedKey('Colors for templated objects.'),
# 'templateColor': RemovedKey('Colors for templated objects.')}},
iterKey = (
AnyKey('classname'),
'enums',
AnyKey('enumname'),
)
for _, enumDiffs in iterDiffDictForKey(diffs, iterKey, onlyDicts=True):
valueDocs = enumDiffs.get('valueDocs')
if not isinstance(valueDocs, dict):
continue
if all(isinstance(val, arguments.RemovedKey) for val in valueDocs.values()):
del enumDiffs['valueDocs']
################################################################################
# Enums that have new values added are ok
def enums_with_new_values(input):
if not isinstance(input, ChangedKey):
return False
oldVal = input.oldVal
newVal = input.newVal
if not (isinstance(oldVal, Enum) and isinstance(newVal, Enum)):
return False
if oldVal.name != newVal.name:
return False
oldKeys = set(oldVal._keys)
newKeys = set(newVal._keys)
if not newKeys.issuperset(oldKeys):
return False
onlyNewKeys = newKeys - oldKeys
prunedNewKeyDict = dict(newVal._keys)
prunedNewDocDict = dict(newVal._docs)
for k in onlyNewKeys:
del prunedNewKeyDict[k]
prunedNewDocDict.pop(k, None)
if not prunedNewKeyDict == oldVal._keys:
return False
if not prunedNewDocDict == oldVal._docs:
return False
return True
arguments.deepPatch(diffs, enums_with_new_values, returnNone)
################################################################################
# new enums are ok
iterKey = (
AnyKey('classname'),
['enums', 'pymelEnums'],
)
for _, enums in iterDiffDictForKey(diffs, iterKey, onlyDicts=True):
for enumName, enumDiff in list(enums.items()):
if isinstance(enumDiff, AddedKey):
del enums[enumName]
################################################################################
# new methods are ok
iterKey = (
AnyKey('classname'),
'methods',
)
for multiKey, methods in iterDiffDictForKey(diffs, iterKey, onlyDicts=True):
newMethods = []
for methodName, methodDiff in list(methods.items()):
if isinstance(methodDiff, AddedKey):
del methods[methodName]
newMethods.append(methodName)
# may not be an entirely new method, but maybe there's new overloads?
elif isinstance(methodDiff, dict):
for key, overloadDiff in list(methodDiff.items()):
if isinstance(overloadDiff, AddedKey):
del methodDiff[key]
if not newMethods:
continue
clsname = multiKey[0]
clsDiffs = diffs[clsname]
# check if the new methods were invertibles, and clear up diffs due to that
if len(newMethods) >= 2:
invertibleDiffs = clsDiffs.get('invertibles')
if not isinstance(invertibleDiffs, dict):
continue
# build up a set of all the invertibles in the new and old cache.
# Then, from the set of new invertibles, subtract out all new methods.
# If what's left over is the same as the oldInvertibles, we can ignore
# the changes to the invertibles
allInvertibles = {'old': set(), 'new': set()}
for oldNew in ('old', 'new'):
invertibles = caches[oldNew][-1][clsname]['invertibles']
for setGet in invertibles:
allInvertibles[oldNew].update(setGet)
newInvertMinusNewMethods = allInvertibles['new'].difference(newMethods)
if newInvertMinusNewMethods == allInvertibles['old']:
del clsDiffs['invertibles']
pymelMethodDiffs = clsDiffs.get('pymelMethods')
if not isinstance(pymelMethodDiffs, dict):
continue
for newMethod in newMethods:
if isinstance(pymelMethodDiffs.get(newMethod), AddedKey):
del pymelMethodDiffs[newMethod]
################################################################################
# new args are ok
for multiKey, overloadDiff in iterOverloadDiffs(onlyDicts=True):
# check to see if the ONLY change to args is AddedKeys..
args = overloadDiff.get('args')
if not isinstance(args, dict):
continue
if not all(isinstance(x, AddedKey) for x in args.values()):
continue
# Ok, args only had added keys - get a list of the names...
newArgs = set(x.newVal[0] for x in args.values())
# the args MUST also appear as AddedKeys in argInfo
argInfo = overloadDiff.get('argInfo')
if not isinstance(argInfo, dict):
continue
if not all(isinstance(argInfo.get(x), AddedKey) for x in newArgs):
continue
# ok, everything seems to check out - start deleting
# we confirmed that all the diffs in args are AddedKey, remove them all!
del overloadDiff['args']
# remove newArgs from 'argInfo'
for newArg in newArgs:
del argInfo[newArg]
# remove newArgs from defaults, types, typeQualifiers - these all key on
# argName
for subItemName in ('defaults', 'types', 'typeQualifiers'):
subDict = overloadDiff.get(subItemName)
if isinstance(subDict, AddedKey):
subDict = subDict.newVal
if set(subDict) == newArgs:
del overloadDiff[subItemName]
elif isinstance(subDict, dict):
for newArg in newArgs:
argDiff = subDict.get(newArg)
if isinstance(argDiff, AddedKey):
del subDict[newArg]
# remove newArgs from inArgs / outArgs - these are lists, and so key on
# arbitrary indices
for subItemName in ('inArgs', 'outArgs'):
subDict = overloadDiff.get(subItemName)
if isinstance(subDict, AddedKey):
subList = subDict.newVal
if set(subDict.newVal) == newArgs:
del overloadDiff[subItemName]
elif isinstance(subDict, dict):
for key, val in list(subDict.items()):
if isinstance(val, AddedKey) and val.newVal in newArgs:
del subDict[key]
################################################################################
# new classes are ok
for clsname, clsDiffs in list(diffs.items()):
if isinstance(clsDiffs, AddedKey):
del diffs[clsname]
################################################################################
# Lost docs
# these params or methods no longer have documentation in the xml... not great,
# but nothing the xml parser can do about that
# LOST_ALL_DETAIL_DOCS = {
# ('MColor', 'methods', 'get', 1,),
# ('MColor', 'methods', 'get', 2,),
# }
#
# for multiKey in LOST_ALL_DETAIL_DOCS:
# try:
# overloadInfo = arguments.getCascadingDictItem(diffs, multiKey)
# except KeyError:
# continue
# if not isinstance(overloadInfo, dict):
# continue
#
# # deal with missing returnInfo doc
# returnInfo = overloadInfo.get('returnInfo')
# if isinstance(returnInfo, dict):
# doc = returnInfo.get('doc')
# if (isinstance(doc, arguments.RemovedKey)
# or (isinstance(doc, ChangedKey)
# and not doc.newVal)):
# del returnInfo['doc']
#
# # deal with missing param docs
# argInfo = overloadInfo.get('argInfo')
# if not isinstance(argInfo, dict):
# continue
# for argName, argDiff in argInfo.items():
# if not isinstance(argDiff, dict):
# continue
# doc = argDiff.get('doc')
# if (isinstance(doc, arguments.RemovedKey)
# or (isinstance(doc, ChangedKey)
# and not doc.newVal)):
# del argDiff['doc']
# Temp - ignore all doc deletion diffs
for _, overloadDiff in iterOverloadDiffs(onlyDicts=True):
# ignore method doc removal
doc = overloadDiff.get('doc')
if (isinstance(doc, arguments.RemovedKey)
or (isinstance(doc, ChangedKey)
and not doc.newVal)):
del overloadDiff['doc']
# ignore returnInfo doc removal
returnInfo = overloadDiff.get('returnInfo')
if isinstance(returnInfo, dict):
doc = returnInfo.get('doc')
if (isinstance(doc, arguments.RemovedKey)
or (isinstance(doc, ChangedKey)
and not doc.newVal)):
del returnInfo['doc']
# ignore param doc removal
for _, argDiff in iterDiffDictForKey(overloadDiff,
('argInfo', AnyKey('argname')),
onlyDicts=True):
doc = argDiff.get('doc')
if (isinstance(doc, arguments.RemovedKey)
or (isinstance(doc, ChangedKey)
and not doc.newVal)):
del argDiff['doc']
################################################################################
# Can ignore
def delDiff(multiKey, diffsDict=None):
dictsAndKeys = []
if diffsDict is None:
currentItem = diffs
else:
currentItem = diffsDict
for piece in multiKey:
dictsAndKeys.append((currentItem, piece))
try:
currentItem = currentItem[piece]
except Exception:
return
for currentItem, piece in reversed(dictsAndKeys):
del currentItem[piece]
if currentItem:
break
KNOWN_IGNORABLE = [
# MFn.Type has a bunch of changes each year...
('MFn', 'enums', 'Type'),
('MFn', 'pymelEnums', 'Type'),
]
for multiKey in KNOWN_IGNORABLE:
delDiff(multiKey)
################################################################################
# MFnDependencyNode.isNameLocked/setNameLocked haven't existed on the node
# since 2017 (though they still appeared in the xml in 2019). They never
# seem to have been in the official docs...
mfnDepDiffs = diffs.get('MFnDependencyNode', {})
methodDiffs = mfnDepDiffs.get('methods', {})
for methName in ('isNameLocked', 'setNameLocked'):
methDiff = methodDiffs.get(methName)
if isinstance(methDiff, arguments.RemovedKey):
del methodDiffs[methName]
invertDiffs = mfnDepDiffs.get('invertibles', {})
if (invertDiffs.get(4) == {0: ChangedKey('setNameLocked', 'setUuid'),
1: ChangedKey('isNameLocked', 'uuid')}
and invertDiffs.get(5) == RemovedKey(('setUuid', 'uuid'))):
del invertDiffs[4]
del invertDiffs[5]
################################################################################
# New subclasses
OK_NEW_VALS = {
'MCreatorFunction': ['MCustomEvaluatorCreatorFunction',
'MTopologyEvaluatorCreatorFunction'],
'nullptr': [None],
}
def isOkChange(val):
if not isinstance(val, ChangedKey):
return False
if val.oldVal not in OK_NEW_VALS:
return False
return val.newVal in OK_NEW_VALS[val.oldVal]
arguments.deepPatch(diffs, isOkChange, returnNone)
################################################################################
# CAN IGNORE - 2021
CAN_IGNORE_2021 = [
# The docstring for MFnAirfield got messed up
('MFnAirField', 'methods', 'setSpeed', 0, 'argInfo', 'value', 'doc'),
# docstring got messed up
('MFnMesh', 'methods', 'create', 5, 'argInfo', 'edgeFaceDesc', 'doc'),
('MFnMesh', 'methods', 'create', 6, 'argInfo', 'edgeFaceDesc', 'doc'),
# docstring changed
('MFnSubdNames', 'methods', 'baseFaceIndexFromId', 0, 'doc'),
# This is a valid fix - MFnIkJoint::getPreferedAngle (the mispelled,
# obsolete one) formerly had 'rotation' improperly marked as an in arg
('MFnIkJoint', 'methods', 'getPreferedAngle', 0, 'args', 0, 2),
('MFnIkJoint', 'methods', 'getPreferedAngle', 0, 'inArgs', 0),
('MFnIkJoint', 'methods', 'getPreferedAngle', 0, 'outArgs', 0),
# A valid fix - 'const unsigned short' was formerly parsed (in the xml)
# as a type of "const unsigned" and a name of "short"
('MFloatPoint', 'methods', '__imul__', 4, 'argInfo', 'factor', 'type'),
('MFloatPoint', 'methods', '__imul__', 4, 'args', 0, 1),
('MFloatPoint', 'methods', '__imul__', 4, 'types', 'factor'),
('MPoint', 'methods', '__imul__', 4, 'argInfo', 'factor', 'type'),
('MPoint', 'methods', '__imul__', 4, 'args', 0, 1),
('MPoint', 'methods', '__imul__', 4, 'types', 'factor'),
]
if versions.current() // 10000 == cacheversions['new']:
for multiKey in CAN_IGNORE_2021:
delDiff(multiKey)
################################################################################
# KNOWN PROBLEMS
# place to temporarily put issues that need fixing, but you want to filter
KNOWN_PROBLEMS_2021 = [
]
if versions.current() // 10000 == cacheversions['new']:
for multiKey in KNOWN_PROBLEMS_2021:
delDiff(multiKey)
################################################################################
# clean up any diff dicts that are now empty
def pruneEmpty(diffs):
def isempty(arg):
return isinstance(arg, (dict, list, tuple, set, types.NoneType)) and not arg
def hasEmptyChildren(arg):
if not isinstance(arg, dict):
return False
return any(isempty(child) for child in arg.values())
def pruneEmptyChildren(arg):
keysToDel = []
for key, val in arg.items():
if isempty(val):
keysToDel.append(key)
for key in keysToDel:
del arg[key]
return arg
altered = True
while altered:
diffs, altered = arguments.deepPatchAltered(diffs, hasEmptyChildren, pruneEmptyChildren)
return diffs
# afterPrune = pruneEmpty({'foo': 7, 'bar': {5:None, 8:None}})
# print(afterPrune)
diffs = pruneEmpty(diffs)
diff_classes = sorted(diffs)
print('###########')
print("Num diffs: {}".format(len(diffs)))
print('###########')
print("diff_classes:")
for cls in diff_classes:
print(" " + str(cls))
print('###########')
if len(diffs):
print("first class diff:")
print(diff_classes[0])
pprint(diffs[diff_classes[0]])
else:
print("no diffs left! hooray!")
print('###########')
|
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
# MIT License
#
# Copyright (c) 2021 Nathan Juraj Michlo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~=~
import logging
from typing import final
import numpy as np
import torch
from disent.nn.modules import DisentModule
log = logging.getLogger(__name__)
# ========================================================================= #
# Custom Base Module Involving Inputs & Representations #
# ========================================================================= #
# TODO: vae models should be implemented here!
# models should potentially output distributions!
# - but how do we handle mixmatch of stuff in frameworks?
# - can the framework detect if the output is a distribution or not and handle accordingly?
class DisentLatentsModule(DisentModule):
def __init__(self, x_shape=(3, 64, 64), z_size=6, z_multiplier=1):
super().__init__()
self._x_shape = tuple(x_shape)
self._x_size = int(np.prod(x_shape))
self._z_size = int(z_size)
self._z_multiplier = int(z_multiplier)
def forward(self, *args, **kwargs):
raise NotImplementedError
@property
def x_shape(self):
return self._x_shape
@property
def x_size(self):
return self._x_size
@property
def z_size(self):
return self._z_size
@property
def z_multiplier(self):
return self._z_multiplier
@property
def z_total(self):
return self._z_size * self._z_multiplier
# ========================================================================= #
# Base Encoder & Base Decoder #
# ========================================================================= #
class DisentEncoder(DisentLatentsModule):
@final
def forward(self, x, chunk=True) -> torch.Tensor:
"""same as self.encode but with size checks"""
# checks
assert x.ndim == 4, f'ndim mismatch: 4 (required) != {x.ndim} (given) [shape={x.shape}]'
assert x.shape[1:] == self.x_shape, f'x_shape mismatch: {self.x_shape} (required) != {x.shape[1:]} (batch)'
# encode | p(z|x)
# for a gaussian encoder, we treat z as concat(z_mean, z_logvar) where z_mean.shape == z_logvar.shape
# ie. the first half of z is z_mean, the second half of z is z_logvar
z = self.encode(x)
# checks
assert z.shape == (x.size(0), self.z_total)
if chunk:
z = z.split(self.z_size, dim=-1)
assert all(s.size(1) == self.z_size for s in z)
return z
def encode(self, x) -> torch.Tensor:
raise NotImplementedError
class DisentDecoder(DisentLatentsModule):
def __init__(self, x_shape=(3, 64, 64), z_size=6, z_multiplier=1):
assert z_multiplier == 1, 'decoder does not support z_multiplier != 1'
super().__init__(x_shape=x_shape, z_size=z_size, z_multiplier=z_multiplier)
@final
def forward(self, z):
"""same as self.decode but with size checks"""
# checks
assert z.ndim == 2
assert z.size(1) == self.z_size
# decode | p(x|z)
x_recon = self.decode(z)
# return
assert x_recon.shape == (z.size(0), *self.x_shape)
return x_recon
def decode(self, z) -> torch.Tensor:
raise NotImplementedError
# ========================================================================= #
# Auto-Encoder Wrapper #
# ========================================================================= #
class AutoEncoder(DisentLatentsModule):
def __init__(self, encoder: DisentEncoder, decoder: DisentDecoder):
assert isinstance(encoder, DisentEncoder)
assert isinstance(decoder, DisentDecoder)
# check sizes
assert encoder.x_shape == decoder.x_shape, 'x_shape mismatch'
assert encoder.x_size == decoder.x_size, 'x_size mismatch - this should never happen if x_shape matches'
assert encoder.z_size == decoder.z_size, 'z_size mismatch'
# initialise
super().__init__(x_shape=decoder.x_shape, z_size=decoder.z_size, z_multiplier=encoder.z_multiplier)
# assign
self._encoder = encoder
self._decoder = decoder
def forward(self, x):
raise RuntimeError(f'{self.__class__.__name__}.forward(...) has been disabled')
def encode(self, x, chunk=False):
z_raw = self._encoder(x, chunk=chunk)
return z_raw
def decode(self, z: torch.Tensor) -> torch.Tensor:
"""
decode the given representation.
the returned tensor does not have an activation applied to it!
"""
return self._decoder(z)
# ========================================================================= #
# END #
# ========================================================================= #
|
import logging
from autobahn.twisted.websocket import WebSocketClientFactory
from twisted.internet.protocol import ReconnectingClientFactory
from coinlendingbot.websocket.WsConfig import WsConfig
class ExchangeWsClientFactory(ReconnectingClientFactory, WebSocketClientFactory):
def __init__(self, exchange, data_processing):
self.exchange = exchange
WebSocketClientFactory.__init__(self, WsConfig[self.exchange]["ws_url"])
self.logging = logging.getLogger(__name__)
self.data_processing = data_processing
self.proto = None
self.lendingbook_list = []
self.ticker_list = []
def clientConnectionLost(self, connector, reason):
self.logging.warn('Lost connection. Reason: {}'.format(reason))
self.retry(connector)
def clientConnectionFailed(self, connector, reason):
self.logging.warn('Connection failed. Reason: {}'.format(reason))
self.retry(connector)
def websocket_opened(self, protocol):
self.proto = protocol
self.resetDelay()
self._resubscribe_lendingbook()
self._resubscribe_ticker()
def websocket_closed(self):
self.proto = None
def startedConnecting(self, connector):
self.logging.debug('startedConnecting')
def subscribe_lendingbook(self, currency):
if self.proto:
self.proto.subscribe_lendingbook(currency)
if currency not in self.lendingbook_list:
self.lendingbook_list.append(currency)
else:
self.reactor.callLater(1, self.subscribe_lendingbook, currency)
def unsubscribe_lendingbook(self, currency):
if self.proto:
self.proto.unsubscribe_lendingbook(currency)
self.lendingbook_list.remove(currency)
else:
self.reactor.callLater(1, self.unsubscribe_lendingbook, currency)
def _resubscribe_lendingbook(self):
for currency in self.lendingbook_list:
self.subscribe_lendingbook(currency)
def subscribe_ticker(self, pair):
if self.proto:
self.proto.subscribe_ticker(pair)
if pair not in self.ticker_list:
self.ticker_list.append(pair)
else:
self.reactor.callLater(1, self.subscribe_ticker, pair)
def unsubscribe_ticker(self, pair):
if self.proto:
self.proto.unsubscribe_ticker(pair)
self.ticker_list.remove(pair)
else:
self.reactor.callLater(1, self.unsubscribe_ticker, pair)
def _resubscribe_ticker(self):
for pair in self.ticker_list:
self.subscribe_ticker(pair)
|
#!/usr/bin/python
class Token( ):
def __init__( self, state, score=0.0, heur=0.0, parent=None, label=None ):
self.score = float(score) #actual score
self.heur = float(heur) #heuristic score in Dijkstra: score==heur
self.state = state
self.label = label
self.parent = parent
def __repr__( self ):
string = str(self.heur)
return string
#def __repr__( self ):
# string = str(self.state)+":"+str(self.score)+":"+str(self.heur)+":"
# if self.parent:
# string += str(self.parent.state)
# else:
# string += str(None)
# return string
def __cmp__( self, other ):
if self.heur > other.heur:
return 1
elif self.heur == other.heur:
return 0
elif self.heur < other.heur:
return -1
if __name__=="__main__":
import sys, argparse
from random import random
tokens = []
for i in xrange(65,75):
tok = Token(0, i*random())
tok.label = chr(i)
tokens.append(tok)
print tokens
tokens.sort()
print tokens
|
"""
LaunchQTAppCommand.py
=====================
Python module for launching and restarting a QT App
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2021 by Patrick Rainsberry.
:license: MIT, see LICENSE for more details.
"""
import adsk.core
from ..apper import apper
# This command is only active if the option to start the thread in config.py is False
# Manually start the Fusion 360 Client Connection to the Standalone QT App Listener
class LaunchReceiverCommand(apper.Fusion360CommandBase):
def on_execute(self, command: adsk.core.Command, inputs: adsk.core.CommandInputs, args, input_values):
fusion_app: apper.FusionApp = self.fusion_app
qt_event_thread: apper.Fusion360CustomThread = fusion_app.events[0]
qt_event_thread.start_thread()
# Manually restart the Fusion 360 Client Connection to the Standalone QT App Listener
# Required if you restart the QT App or start a new one.
# This command kills the current Client Thread and starts a new one
class RestartReceiverCommand(apper.Fusion360CommandBase):
def on_execute(self, command: adsk.core.Command, inputs: adsk.core.CommandInputs, args, input_values):
fusion_app: apper.FusionApp = self.fusion_app
qt_event_thread: apper.Fusion360CustomThread = fusion_app.events[0]
qt_event_thread.restart_thread()
|
"""
(c) 2020 Daniel Companeetz
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next paragraph) shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
https://opensource.org/licenses/MIT
# SPDX-License-Identifier: MIT
For information on SDPX, https://spdx.org/licenses/MIT.html
Input: a file name
Output, a table with the names defined in the program, and the line numbers where they were used.
Change Log
Date (YMD) Name What
-------- ------------------ ------------------------
20200319 Daniel Companeetz Initial commit
"""
import json
from sys import exit
from urllib3 import disable_warnings
from urllib3.exceptions import NewConnectionError, MaxRetryError, InsecureRequestWarning
from pprint import pprint
try:
#updated version
import controlm_py as controlm_client
except:
#original version 9.0.19 at PyPi
import controlm_client
class CtmConnection(object):
"""
Implements persistent connectivity for the Control-M Automation API
:property api_client Implements the connection to the Control-M AAPI endpoint
"""
logged_in = False
def __init__(self, host='', port='', endpoint='/automation-api',
user='', password='',
ssl=True, verify_ssl=False,
additional_login_header={}):
"""
Initializes the CtmConnection object and provides the Automation API client.
:param host: str: Control-M web server host name (preferred fqdn) serving the Automation API.
Could be a load balancer or API Gateway
:param port: str: Control-M web server port serving the Automation API.
:param endpoint: str: The serving point for the AAPI (default='/automation-api')
:param ssl: bool: If the web server uses https (default=True)
:param user: str: Login user
:param password: str: Password for the login user
:param verify_ssl: bool: If the web server uses self signed certificates (default=False)
:param additionalLoginHeader: dict: login headers to be added to the AAPI headers
:return None
"""
#
configuration = controlm_client.Configuration()
if ssl:
configuration.host = 'https://'
# Only use verify_ssl = False if the cert is self-signed.
configuration.verify_ssl = verify_ssl
if not verify_ssl:
# This urllib3 function disables warnings when certs are self-signed
disable_warnings(InsecureRequestWarning)
else:
configuration.host = 'http://'
configuration.host = configuration.host + host + ':' + port + endpoint
self.api_client = controlm_client.api_client.ApiClient(configuration=configuration)
self.session_api = controlm_client.api.session_api.SessionApi(api_client=self.api_client)
credentials = controlm_client.models.LoginCredentials(username=user, password=password)
if additional_login_header is not None:
for header in additional_login_header.keys():
self.api_client.set_default_header(header, additional_login_header[header])
try:
api_token = self.session_api.do_login(body=credentials)
self.api_client.default_headers.setdefault('Authorization', 'Bearer ' + api_token.token)
self.logged_in = True
except (NewConnectionError, MaxRetryError, controlm_client.rest.ApiException) as aapi_error:
print("Some connection error occurred: " + str(aapi_error))
exit(42)
def __del__(self):
if self.session_api is not None:
try:
self.logout()
except ImportError:
print('Network access for Logout unavailable due to python shutdown.')
print(' Program termination occurred before deleting ApiClient object,')
print(' which performs logout.')
print('SECURITY RISK: Token will still be available to continue operations.')
exit(50)
def logout(self):
if self.logged_in:
try:
self.session_api.do_logout()
self.logged_in = False
except controlm_client.rest.ApiException as e:
raise("Exception when calling SessionApi->do_logout: %s\n" % e)
class SaaSConnection(object):
"""
Implements persistent connectivity for the Control-M Automation API
:property api_client Implements the connection to the Control-M AAPI endpoint
"""
logged_in = True
def __init__(self, host='', port='', endpoint='/automation-api',
aapi_token='', ssl=True, verify_ssl=False,
additional_login_header={}):
"""
Initializes the CtmConnection object and provides the Automation API client.
:param host: str: Control-M web server host name (preferred fqdn) serving the Automation API.
Could be a load balancer or API Gateway
:param port: str: Control-M web server port serving the Automation API.
:param endpoint: str: The serving point for the AAPI (default='/automation-api')
:param ssl: bool: If the web server uses https (default=True)
:param user: str: Login user
:param password: str: Password for the login user
:param verify_ssl: bool: If the web server uses self signed certificates (default=False)
:param additionalLoginHeader: dict: login headers to be added to the AAPI headers
:return None
"""
#
configuration = controlm_client.Configuration()
if ssl:
configuration.host = 'https://'
# Only use verify_ssl = False if the cert is self-signed.
configuration.verify_ssl = verify_ssl
if not verify_ssl:
# This urllib3 function disables warnings when certs are self-signed
disable_warnings(InsecureRequestWarning)
else:
configuration.host = 'http://'
configuration.host = configuration.host + host + ':' + port + endpoint
self.api_client = controlm_client.api_client.ApiClient(configuration=configuration)
# self.session_api = controlm_client.api.session_api.SessionApi(api_client=self.api_client)
# credentials = controlm_client.models.LoginCredentials(username=user, password=password)
if additional_login_header is not None:
for header in additional_login_header.keys():
self.api_client.set_default_header(header, additional_login_header[header])
try:
#api_token = self.session_api.do_login(body=credentials)
self.api_client.default_headers.setdefault('x-api-key', aapi_token)
self.logged_in = True
pprint( self.api_client)
pass
except (NewConnectionError, MaxRetryError, controlm_client.rest.ApiException) as aapi_error:
print("Some connection error occurred: " + str(aapi_error))
exit(42)
# def __del__(self):
# if self.session_api is not None:
# try:
# self.logout()
# except ImportError:
# print('Network access for Logout unavailable due to python shutdown.')
# print(' Program termination occurred before deleting ApiClient object,')
# print(' which performs logout.')
# print('SECURITY RISK: Token will still be available to continue operations.')
# exit(50)
# def logout(self):
# if self.logged_in:
# try:
# self.session_api.do_logout()
# self.logged_in = False
# except controlm_client.rest.ApiException as e:
# raise("Exception when calling SessionApi->do_logout: %s\n" % e) |
# Stream are lazy linked list
# A stream is a linked list, but the rest of the list is computed on demand
'''
Link- First element is anything
- second element is a Link instance or Link.empty
Stream -First element can be anything
- Second element is a zero-argument function that returns a Stream or Stream.empty
Once Created, the Link and Stream can be used interchangeable
namely first, rest
'''
class Stream:
class empty:
def __repr__(self):
return 'Stream.empty'
empty = empty()
# Singleton Class
def __init__(self,first,compute_rest = lambda: Stream.empty):
assert callable(compute_rest), 'compute_rest must be callable.' # Zero Argument
self.first = first
self._compute_rest = compute_rest
@property
def rest(self):
if self._compute_rest is not None:
self._rest = self._compute_rest()
self._compute_rest = None
return self._rest
def __repr__(self):
return 'Stream({0}, <...>)'.format(repr(self.first))
# An integer stream is a stream of consecutive integers
# Start with first. Constructed from first and a function compute_rest that returns the integer stream starting at first+1
def integer_stream(first):
def compute_rest():
return integer_stream(first+1)
return Stream(first,compute_rest)
# same as
def int_stream(first):
return Stream(first, lambda: int_stream(first+1))
def first_k(s,k):
values = []
while s is not Stream.empty and k>0:
values.append(s.first)
s, k = s.rest, k-1
return values
def square_stream(s):
first = s.first*s.first
return Stream(first, lambda: square_stream(s.rest))
def add_stream(s,t):
first = s.first + t.first
return Stream(first, lambda: add_stream(s.rest,t.rest))
#same
def added_stream(s,t):
first = s.first + t.first
def compute_rest():
return added_stream(s.rest,t.rest)
return Stream(first,compute_rest)
ones = Stream(1, lambda: ones)
ints = Stream(1, lambda: add_stream(ones,ints))
# Mapping a function over a stream
'''
Mapping a function over a stream applies a function only to the first element right away,
the rest is computed lazily
'''
def map_stream(fn,s):
"Map a function fn over the elements of a stream."""
if s is Stream.empty:
return s
def compute_rest():
return map_stream(fn, s.rest)
return Stream(fn(s.first),compute_rest)
# Filtering a Stream
# When filtering a stream, processing continues until a element is kept in the output
def filter_stream(fn,s):
'Filter stream s with predicate function fn.'
if s is Stream.empty:
return s
def compute_rest():
return filter_stream(fn,s.rest)
if fn(s.first):
return Stream(s.first,compute_rest)
else:
return compute_rest()
odds = lambda x: x % 2 == 1
odd = filter_stream(odds, ints)
def primes(s):
' Return a stream of primes, given a stream of postive integers.'
def not_divisible(x):
return x % s.first != 0
def compute_rest():
return primes(filter_stream(not_divisible, s.rest))
return Stream(s.first, compute_rest)
p = primes(int_stream(2))
|
# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testtools
from performa.modules import atop
def _read_sample():
with open('performa/tests/atop_sample.txt') as f:
return f.read()
class TestAtop(testtools.TestCase):
def test_parse_cpu_total(self):
expected = [{'cpu_count': 4, 'date': '2016/02/26', 'guest': 0.0,
'host': 'host', 'idle': 3.92, 'interval': 1, 'irq': 0.0,
'label': 'CPU', 'nice': 0.0, 'softirq': 0.0, 'steal': 0.0,
'sys': 0.04, 'ticks_per_second': 100, 'time': '10:01:04',
'timestamp': 1456480864, 'user': 0.04, 'wait': 0.0},
{'cpu_count': 4, 'date': '2016/02/26', 'guest': 0.0,
'host': 'host', 'idle': 3.92, 'interval': 1, 'irq': 0.0,
'label': 'CPU', 'nice': 0.0, 'softirq': 0.0, 'steal': 0.0,
'sys': 0.04, 'ticks_per_second': 100, 'time': '10:01:05',
'timestamp': 1456480865, 'user': 0.04, 'wait': 0.0}]
self.assertEqual(expected,
atop.parse(_read_sample(), dict(label=['CPU'])))
def test_parse_cpu(self):
needle = {'cpu_id': 2, 'date': '2016/02/26', 'guest': 0.0,
'host': 'host', 'idle': 0.94, 'interval': 1, 'irq': 0.0,
'label': 'cpu', 'nice': 0.0, 'softirq': 0.0, 'steal': 0.0,
'sys': 0.03, 'ticks_per_second': 100, 'time': '10:01:05',
'timestamp': 1456480865, 'user': 0.03, 'wait': 0.0}
self.assertIn(needle,
atop.parse(_read_sample(), dict(label=['cpu'])))
def test_parse_mem(self):
expected = [
{'buffer': 351428608, 'cache': 3317374976, 'date': '2016/02/26',
'dirty': 0, 'free': 3659939840, 'host': 'host', 'interval': 1,
'label': 'MEM', 'page_size': 4096, 'phys': 8373075968,
'slab': 298115072, 'time': '10:01:04', 'timestamp': 1456480864},
{'buffer': 351428608, 'cache': 3317387264, 'date': '2016/02/26',
'dirty': 0, 'free': 3659939840, 'host': 'host', 'interval': 1,
'label': 'MEM', 'page_size': 4096, 'phys': 8373075968,
'slab': 298115072, 'time': '10:01:05', 'timestamp': 1456480865}]
self.assertEqual(expected,
atop.parse(_read_sample(), dict(label=['MEM'])))
def test_parse_net(self):
needle = {'date': '2016/02/26', 'host': 'host', 'interval': 1,
'ip_dx': 0, 'ip_fx': 0, 'ip_rx': 0, 'ip_tx': 0,
'label': 'NET', 'tcp_rx': 0, 'tcp_tx': 0, 'time': '10:01:04',
'timestamp': 1456480864, 'udp_rx': 0, 'udp_tx': 0}
self.assertIn(needle,
atop.parse(_read_sample(), dict(label=['NET'])))
def test_parse_prc(self):
needle = {'current_cpu': 2, 'date': '2016/02/26', 'host': 'host',
'interval': 1, 'label': 'PRC', 'name': 'dstat', 'nice': 0,
'pid': 11014, 'priority': 120, 'realtime_priority': 0,
'scheduling_policy': 0, 'sleep_avg': 0, 'state': 'S',
'sys': 0.02, 'ticks_per_second': 100, 'time': '10:01:04',
'timestamp': 1456480864, 'user': 0.01}
self.assertIn(needle,
atop.parse(_read_sample(), dict(label=['PRC'])))
def test_parse_prm(self):
needle = {'date': '2016/02/26', 'host': 'host', 'interval': 1,
'label': 'PRM', 'major_page_faults': 0,
'minor_page_faults': 751, 'name': 'atop', 'page_size': 4096,
'pid': 19929, 'resident': 2019328, 'resident_growth': 0,
'shared': 151552, 'state': 'R', 'time': '10:01:05',
'timestamp': 1456480865, 'virtual': 17412096,
'virtual_growth': 0}
self.assertIn(needle,
atop.parse(_read_sample(), dict(label=['PRM'])))
def test_parse_match_name_regex(self):
expected = [{'current_cpu': 2, 'date': '2016/02/26', 'host': 'host',
'interval': 1, 'label': 'PRC', 'name': 'dstat', 'nice': 0,
'pid': 11014, 'priority': 120, 'realtime_priority': 0,
'scheduling_policy': 0, 'sleep_avg': 0, 'state': 'S',
'sys': 0.02, 'ticks_per_second': 100, 'time': '10:01:04',
'timestamp': 1456480864, 'user': 0.01},
{'current_cpu': 2, 'date': '2016/02/26', 'host': 'host',
'interval': 1, 'label': 'PRC', 'name': 'dstat', 'nice': 0,
'pid': 11014, 'priority': 120, 'realtime_priority': 0,
'scheduling_policy': 0, 'sleep_avg': 0, 'state': 'S',
'sys': 0.0, 'ticks_per_second': 100, 'time': '10:01:05',
'timestamp': 1456480865, 'user': 0.02}]
filter = {
'name': 'dstat',
'label': ['PRC'],
}
self.assertEqual(expected, atop.parse(_read_sample(), filter))
def test_parse_no_filter(self):
self.assertEqual(43, len(atop.parse(_read_sample(), {})))
|
def normalize_str(x):
if x is None:
return x
return " ".join(x.replace("\n", " ").split()).lower()
|
"""Implementation of the Noun Absurdity property."""
from collections import defaultdict
from functools import partial
import numpy as np
import os
import re
import sys
from scipy.spatial import distance
from pyhumour._utilities.pos_tag_bigram_frequency_matrix import POSTagBigramFrequencyMatrix
class NounAbsurdity:
"""Calculates the 'Noun Absurdity' value of a given text."""
def __init__(self, frequency_matrix, embeddings_index):
"""
Construct a :class:`NounAbsurdity` object.
:param POSTagBigramFrequencyMatrix frequency_matrix: The adjective-noun frequency matrix
"""
self.adj_noun_dict = frequency_matrix
self.embeddings_index = embeddings_index
def calculate(self, pos_tags: list) -> float:
"""Return the 'Humourous Noun Absurdity' value of a given text.
:param list pos_tags: List of pos_tags for the given text.
"""
acceptable_types = ('JJ', 'JJR', 'JJS')
second_type = ('NN', 'NNS', 'NNP', 'NNPS')
noun_absurdity_average = 0
noun_absurdity_positive = 0
noun_absurdity_count = 0
number_of_pos_tags = len(pos_tags)
for j in range(number_of_pos_tags - 1):
if pos_tags[j][1] in acceptable_types and pos_tags[j + 1][1] in second_type:
adj = re.sub('[^A-Za-z]*', '', pos_tags[j][0])
adj = adj.lower()
noun = re.sub('[^A-Za-z]*', '', pos_tags[j + 1][0])
noun = noun.lower()
for k in self.adj_noun_dict.get_row(adj): # gets list of nouns
noun_absurdity_positive += self.adj_noun_dict.cell_value(adj, k) * distance.cosine(
self.embeddings_index[noun], self.embeddings_index[k])
noun_absurdity_count += self.adj_noun_dict.cell_value(adj, k)
if noun_absurdity_count > 0:
noun_absurdity_average = noun_absurdity_positive / noun_absurdity_count
return noun_absurdity_average
def async_get_embeddings_index(loop, executor):
return loop.run_in_executor(executor, get_embeddings_index)
def get_embeddings_index():
"""Returns the ConceptNet Embeddings-Index matrix."""
embeddings_index = defaultdict(partial(np.ndarray, 0))
resources_path = os.path.join(os.path.dirname(sys.modules['pyhumour'].__file__), 'resources')
target_path = os.path.join(resources_path, 'numberbatch-en.txt')
try:
f = open(target_path, encoding='utf-8')
except FileNotFoundError:
import requests
import gzip
import shutil
url = 'https://conceptnet.s3.amazonaws.com/downloads/2019/numberbatch/numberbatch-en-19.08.txt.gz'
download_path = os.path.join(resources_path, 'numberbatch-en-19.08.txt.gz')
response = requests.get(url, stream=True)
if response.status_code == 200:
with open(download_path, 'wb') as f:
total_length = response.headers.get('content-length')
if total_length is None:
f.write(response.raw.read())
else:
cumulative_length = 0
total_length = int(total_length)
for data in response.iter_content(chunk_size=4096):
cumulative_length += len(data)
f.write(data)
progress = int(50 * cumulative_length / total_length)
sys.stdout.write("\r Downloading 'numberbatch-en-19.08.txt.gz': [%s%s]" % (
'=' * progress, ' ' * (50 - progress)))
sys.stdout.flush()
with gzip.open(download_path) as f_in:
with open(target_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(download_path) # removes the gz (zip) file
f = open(target_path, encoding='utf-8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
return embeddings_index
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Carter Yagemann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import socket
import sys
from subprocess import check_output, Popen
from time import sleep
def default_gateway():
"""Gets the default gateway in Windows"""
res = check_output(['ipconfig']).split('\n')
gateway = None
for line in res:
if 'Default Gateway' in line:
gateway = line.split(' ')[-1]
break
return str(gateway).strip()
def main():
sleep(10) # Allow time for VM to startup
os.chdir(os.path.dirname(os.path.realpath(__file__)))
gateway = default_gateway()
if not gateway:
sys.exit(1) # Failed to find default gateway
# Connect to server
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(30)
try:
sock.connect((gateway, 52174))
except socket.timeout:
sys.exit(2) # Failed to connect to server
# Get sample from server
with open('sample.exe', 'wb') as ofile:
while True:
data = sock.recv(4096)
if not data: break
ofile.write(data)
# Run sample
Popen(['sample.exe'])
if __name__ == '__main__':
main()
|
# from torch.utils import data
import torch
from torchvision import datasets
import torchvision.transforms as transforms
import config
def tr_dataset(batch_size):
# Transformation
transform_data = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
# Dataloader
training_data = datasets.ImageFolder(config.TRAINING_FILE, transform = transform_data)
validation_data = datasets.ImageFolder(config.TRAINING_FILE, transform = transform_data)
# print (training_data)
# print (validation_data)
val_split = 0.10
length = len(training_data)
split = int(length*val_split)
indices = torch.randperm(length)
train_subset = torch.utils.data.Subset(training_data, indices[split:])
val_subset = torch.utils.data.Subset(validation_data, indices[:split])
# print (len(train_subset))
# print (len(val_subset))
train_dataloader = torch.utils.data.DataLoader(dataset = train_subset, batch_size = batch_size, shuffle = True)
val_dataloader = torch.utils.data.DataLoader(dataset = val_subset, batch_size = batch_size, shuffle = False)
print ('Classes : ', train_dataloader.dataset.dataset.class_to_idx)
return train_dataloader, val_dataloader |
# Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from Synapse:
# https://github.com/matrix-org/synapse/blob/1016f303e58b1305ed5b3572fde002e1273e0fc0/synapse/crypto/context_factory.py#L77
import logging
import idna
from OpenSSL import SSL
from service_identity import VerificationError
from service_identity.pyopenssl import verify_hostname, verify_ip_address
from twisted.internet.abstract import isIPAddress, isIPv6Address
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
from twisted.internet.ssl import CertificateOptions, TLSVersion, platformTrust
from twisted.python.failure import Failure
from twisted.web.iweb import IPolicyForHTTPS
from zope.interface import implementer
logger = logging.getLogger(__name__)
@implementer(IPolicyForHTTPS)
class ClientTLSOptionsFactory(object):
"""Factory for Twisted SSLClientConnectionCreators that are used to make connections
to remote servers for federation.
Uses one of two OpenSSL context objects for all connections, depending on whether
we should do SSL certificate verification.
get_options decides whether we should do SSL certificate verification and
constructs an SSLClientConnectionCreator factory accordingly.
"""
def __init__(self):
# Use CA root certs provided by OpenSSL
trust_root = platformTrust()
# "insecurelyLowerMinimumTo" is the argument that will go lower than
# Twisted's default, which is why it is marked as "insecure" (since
# Twisted's defaults are reasonably secure). But, since Twisted is
# moving to TLS 1.2 by default, we want to respect the config option if
# it is set to 1.0 (which the alternate option, raiseMinimumTo, will not
# let us do).
minTLS = TLSVersion.TLSv1_2
self._verify_ssl = CertificateOptions(
trustRoot=trust_root, insecurelyLowerMinimumTo=minTLS
)
self._verify_ssl_context = self._verify_ssl.getContext()
self._verify_ssl_context.set_info_callback(self._context_info_cb)
def get_options(self, host):
ssl_context = self._verify_ssl_context
return SSLClientConnectionCreator(host, ssl_context)
@staticmethod
def _context_info_cb(ssl_connection, where, ret):
"""The 'information callback' for our openssl context object."""
# we assume that the app_data on the connection object has been set to
# a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator)
tls_protocol = ssl_connection.get_app_data()
try:
# ... we further assume that SSLClientConnectionCreator has set the
# '_synapse_tls_verifier' attribute to a ConnectionVerifier object.
tls_protocol._synapse_tls_verifier.verify_context_info_cb(
ssl_connection, where
)
except: # noqa: E722, taken from the twisted implementation
logger.exception("Error during info_callback")
f = Failure()
tls_protocol.failVerification(f)
def creatorForNetloc(self, hostname, port):
"""Implements the IPolicyForHTTPS interace so that this can be passed
directly to agents.
"""
return self.get_options(hostname)
@implementer(IOpenSSLClientConnectionCreator)
class SSLClientConnectionCreator(object):
"""Creates openssl connection objects for client connections.
Replaces twisted.internet.ssl.ClientTLSOptions
"""
def __init__(self, hostname, ctx):
self._ctx = ctx
self._verifier = ConnectionVerifier(hostname)
def clientConnectionForTLS(self, tls_protocol):
context = self._ctx
connection = SSL.Connection(context, None)
# as per twisted.internet.ssl.ClientTLSOptions, we set the application
# data to our TLSMemoryBIOProtocol...
connection.set_app_data(tls_protocol)
# ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the
# tls_protocol so that the SSL context's info callback has something to
# call to do the cert verification.
setattr(tls_protocol, "_synapse_tls_verifier", self._verifier)
return connection
class ConnectionVerifier(object):
"""Set the SNI, and do cert verification
This is a thing which is attached to the TLSMemoryBIOProtocol, and is called by
the ssl context's info callback.
"""
# This code is based on twisted.internet.ssl.ClientTLSOptions.
def __init__(self, hostname):
if isIPAddress(hostname) or isIPv6Address(hostname):
self._hostnameBytes = hostname.encode("ascii")
self._is_ip_address = True
else:
# twisted's ClientTLSOptions falls back to the stdlib impl here if
# idna is not installed, but points out that lacks support for
# IDNA2008 (http://bugs.python.org/issue17305).
#
# We can rely on having idna.
self._hostnameBytes = idna.encode(hostname)
self._is_ip_address = False
self._hostnameASCII = self._hostnameBytes.decode("ascii")
def verify_context_info_cb(self, ssl_connection, where):
if where & SSL.SSL_CB_HANDSHAKE_START and not self._is_ip_address:
ssl_connection.set_tlsext_host_name(self._hostnameBytes)
if where & SSL.SSL_CB_HANDSHAKE_DONE:
try:
if self._is_ip_address:
verify_ip_address(ssl_connection, self._hostnameASCII)
else:
verify_hostname(ssl_connection, self._hostnameASCII)
except VerificationError:
f = Failure()
tls_protocol = ssl_connection.get_app_data()
tls_protocol.failVerification(f)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 16 10:51:06 2020
@author: AzureDVBB
Test functions for dynamic lookups.
"""
def test1(a):
return str(a)
def test2():
return "OK test2" |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. $Id: punctuation.py 85352 2016-03-26 19:08:54Z carlos.sanchez $
"""
from __future__ import print_function, unicode_literals, absolute_import, division
__docformat__ = "restructuredtext en"
logger = __import__('logging').getLogger(__name__)
import re
from zope import interface
from nti.contentfragments.interfaces import IPunctuationMarkPattern
from nti.contentfragments.interfaces import IPunctuationMarkExpression
from nti.contentfragments.interfaces import IPunctuationMarkPatternPlus
from nti.contentfragments.interfaces import IPunctuationMarkExpressionPlus
# NOTE: we import unicode_literals
default_punk_mark_expression = (r'[\?|!|(|)|"|\''
'|\u2039|\u203a' # single angle quotes
'|\u2018|\u2019' # single curly quotes
'|\u201c|\u201d' # double curly quotes
'|\u00ab|\u00bb' # double angle quotes
r'|`|{|}|\[|\]|:|;|,|\.|\^|%|&|#|\*|@|'
'$|\u20ac' # dollar and euro
r'|&|+|\-|<|>|=|_|\~|\\|/|\|]')
default_punk_mark_expression_plus = (default_punk_mark_expression[:-1] +
r'|\s'
r'|\u200b|\u2060]') # zero-width space, word joiner
default_punk_mark_pattern = re.compile(default_punk_mark_expression,
re.I | re.MULTILINE | re.DOTALL | re.UNICODE)
default_punk_mark_pattern_plus = re.compile(default_punk_mark_expression_plus,
re.I | re.MULTILINE | re.DOTALL | re.UNICODE)
@interface.implementer(IPunctuationMarkExpression)
def _default_punctuation_mark_expression():
return default_punk_mark_expression
@interface.implementer(IPunctuationMarkPattern)
def _default_punctuation_mark_pattern():
return default_punk_mark_pattern
@interface.implementer(IPunctuationMarkExpressionPlus)
def _default_punctuation_mark_expression_plus():
return default_punk_mark_expression_plus
@interface.implementer(IPunctuationMarkPatternPlus)
def _default_punctuation_mark_pattern_plus():
return default_punk_mark_pattern_plus
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# Required dependencies
required = [
'joblib',
'numpy',
'tqdm',
'humanhash3',
'python-dateutil',
'dill'
]
setup(
name="smallab",
version="1.8.2",
url='https://github.com/octopuscabbage/smallab',
packages=find_packages(),
install_requires=required,
license="BSD 2-Clause License",
description="smallab (Small Lab) is an experiment framework designed " + \
"to be easy to use with your experiment",
long_description=read('README.md'),
long_description_content_type='text/markdown',
)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['FrozenBatchNorm2d', 'FilterResponseNorm1d',
'FilterResponseNorm2d', 'FilterResponseNorm3d']
class FrozenBatchNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-5):
super(FrozenBatchNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.register_buffer('weight', torch.ones(num_features))
self.register_buffer('bias', torch.zeros(num_features))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer(
'running_var', torch.ones(num_features) - eps)
def forward(self, x):
if x.requires_grad:
# do not use F.batch_norm since it will use extra memory
# for computing gradients
scale = self.weight * (self.running_var + self.eps).rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
else:
# F.batch_norm provides more optimization opportunities
return F.batch_norm(
x,
self.running_mean,
self.running_var,
self.weight,
self.bias,
training=False,
eps=self.eps)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get('version', None)
if version is None or version < 2:
if prefix + 'running_mean' not in state_dict:
state_dict[prefix + 'running_mean'] = \
torch.zeros_like(self.running_mean)
if prefix + 'running_var' not in state_dict:
state_dict[prefix + 'running_var'] = \
torch.ones_like(self.running_var)
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def __repr__(self):
return 'FrozenBatchNorm2d(num_features={}, eps={})'.format(
self.num_features, self.eps)
class FilterResponseNormNd(nn.Module):
def __init__(self, ndim, num_features, eps=1e-6,
learnable_eps=False):
assert ndim in [3, 4, 5], \
'FilterResponseNorm only supports 3d, 4d or 5d inputs.'
super(FilterResponseNormNd, self).__init__()
shape = (1, num_features) + (1, ) * (ndim - 2)
self.eps = nn.Parameter(torch.ones(*shape) * eps)
if not learnable_eps:
self.eps.requires_grad_(False)
self.gamma = nn.Parameter(torch.Tensor(*shape))
self.beta = nn.Parameter(torch.Tensor(*shape))
self.tau = nn.Parameter(torch.Tensor(*shape))
self.reset_parameters()
def forward(self, x):
avg_dims = tuple(range(2, x.dim()))
nu2 = torch.pow(x, 2).mean(dim=avg_dims, keepdim=True)
x = x * torch.rsqrt(nu2 + torch.abs(self.eps))
return torch.max(self.gamma * x + self.beta, self.tau)
def reset_parameters(self):
nn.init.ones_(self.gamma)
nn.init.zeros_(self.beta)
nn.init.zeros_(self.tau)
class FilterResponseNorm1d(FilterResponseNormNd):
def __init__(self, num_features, eps=1e-6, learnable_eps=False):
super(FilterResponseNorm1d, self).__init__(
3, num_features, eps=eps, learnable_eps=learnable_eps)
class FilterResponseNorm2d(FilterResponseNormNd):
def __init__(self, num_features, eps=1e-6, learnable_eps=False):
super(FilterResponseNorm2d, self).__init__(
4, num_features, eps=eps, learnable_eps=learnable_eps)
class FilterResponseNorm3d(FilterResponseNormNd):
def __init__(self, num_features, eps=1e-6, learnable_eps=False):
super(FilterResponseNorm3d, self).__init__(
5, num_features, eps=eps, learnable_eps=learnable_eps)
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_utils import importutils
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import l3_rpc
from neutron.common import constants as n_const
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import common_db_mixin
from neutron.db import dns_db
from neutron.db import extraroute_db
from neutron.db import l3_db
from neutron.db import l3_dvr_ha_scheduler_db
from neutron.db import l3_dvrscheduler_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_hamode_db
from neutron.db import l3_hascheduler_db
from neutron.plugins.common import constants
from neutron.quota import resource_registry
from neutron.services import service_base
class L3RouterPlugin(service_base.ServicePluginBase,
common_db_mixin.CommonDbMixin,
extraroute_db.ExtraRoute_db_mixin,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
l3_dvr_ha_scheduler_db.L3_DVR_HA_scheduler_db_mixin,
dns_db.DNSDbMixin):
"""Implementation of the Neutron L3 Router Service Plugin.
This class implements a L3 service plugin that provides
router and floatingip resources and manages associated
request/response.
All DB related work is implemented in classes
l3_db.L3_NAT_db_mixin, l3_hamode_db.L3_HA_NAT_db_mixin,
l3_dvr_db.L3_NAT_with_dvr_db_mixin, and extraroute_db.ExtraRoute_db_mixin.
"""
supported_extension_aliases = ["dvr", "router", "ext-gw-mode",
"extraroute", "l3_agent_scheduler",
"l3-ha", "router_availability_zone"]
@resource_registry.tracked_resources(router=l3_db.Router,
floatingip=l3_db.FloatingIP)
def __init__(self):
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver)
self.start_periodic_l3_agent_status_check()
super(L3RouterPlugin, self).__init__()
if 'dvr' in self.supported_extension_aliases:
l3_dvrscheduler_db.subscribe()
if 'l3-ha' in self.supported_extension_aliases:
l3_hascheduler_db.subscribe()
l3_db.subscribe()
self.start_rpc_listeners()
@log_helpers.log_method_call
def start_rpc_listeners(self):
# RPC support
self.topic = topics.L3PLUGIN
self.conn = n_rpc.create_connection()
self.agent_notifiers.update(
{n_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})
self.endpoints = [l3_rpc.L3RpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
return self.conn.consume_in_threads()
def get_plugin_type(self):
return constants.L3_ROUTER_NAT
def get_plugin_description(self):
"""returns string description of the plugin."""
return ("L3 Router Service Plugin for basic L3 forwarding"
" between (L2) Neutron networks and access to external"
" networks via a NAT gateway.")
def create_floatingip(self, context, floatingip):
"""Create floating IP.
:param context: Neutron request context
:param floatingip: data for the floating IP being created
:returns: A floating IP object on success
As the l3 router plugin asynchronously creates floating IPs
leveraging the l3 agent, the initial status for the floating
IP object will be DOWN.
"""
return super(L3RouterPlugin, self).create_floatingip(
context, floatingip,
initial_status=n_const.FLOATINGIP_STATUS_DOWN)
|
import os
import sys
import errno
import pprint
def safe_print(p_str):
try:
print(p_str)
except IOError, e:
if e.errno == errno.EPIPE:
sys.exit(0)
else:
raise
def safe_pprint(p_str):
try:
pprint.pprint(p_str)
except IOError, e:
if e.errno == errno.EPIPE:
sys.exit(0)
else:
raise
def load_cloudinitd_db(run_name):
# doing imports within function because they are not needed elsewhere
# and they are surprisingly expensive.
# (and this is generally only called once)
from cloudinitd.user_api import CloudInitD
from cloudinitd.exceptions import APIUsageException, ConfigException
vars = {}
home = os.environ['HOME']
try:
cid = CloudInitD(home + '/.cloudinitd', db_name=run_name, terminate=False, boot=False, ready=False)
except APIUsageException, e:
print "Problem loading records from cloudinit.d: %s" % str(e)
raise
svc_list = cid.get_all_services()
services = dict((svc.name, svc) for svc in svc_list)
rabbitmq = services.get('rabbitmq')
basenode = services.get('basenode')
if not rabbitmq and not basenode:
raise Exception("cloudinit.d plan has neither rabbitmq or basenode services")
if rabbitmq:
vars['rabbitmq_host'] = rabbitmq.get_attr_from_bag("rabbitmq_host")
vars['rabbitmq_username'] = rabbitmq.get_attr_from_bag("rabbitmq_username")
vars['rabbitmq_password'] = rabbitmq.get_attr_from_bag("rabbitmq_password")
try:
vars['rabbitmq_exchange'] = rabbitmq.get_attr_from_bag("rabbitmq_exchange")
except ConfigException:
vars['rabbitmq_exchange'] = None
else:
vars['rabbitmq_host'] = basenode.get_attr_from_bag("hostname")
vars['rabbitmq_username'] = basenode.get_attr_from_bag("rabbitmq_username")
vars['rabbitmq_password'] = basenode.get_attr_from_bag("rabbitmq_password")
try:
vars['rabbitmq_exchange'] = basenode.get_attr_from_bag("rabbitmq_exchange")
except ConfigException:
vars['rabbitmq_exchange'] = None
if basenode:
try:
vars['coi_services_system_name'] = basenode.get_attr_from_bag("coi_services_system_name")
except ConfigException:
vars['coi_services_system_name'] = None
try:
vars['gateway_port'] = basenode.get_attr_from_bag("gateway_port")
except ConfigException:
vars['gateway_port'] = None
try:
vars['gateway_host'] = basenode.get_attr_from_bag("hostname")
except ConfigException:
vars['gateway_host'] = None
try:
vars['dashi_sysname'] = basenode.get_attr_from_bag("dashi_sysname")
except ConfigException:
vars['dashi_sysname'] = None
return vars
|
import torch
from PIL import Image
from torch.utils.data import Dataset, DataLoader,random_split
import torchvision
from torchvision import datasets, models, transforms
import numpy as np
import matplotlib.pyplot as plt
import time
import os
import PIL
import pickle
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
WORKING_PATH="C:/Users/D-blue/Desktop/Humen_Behaviour/project/"
TEXT_LENGTH=75
TEXT_HIDDEN=256
"""
read text file, find corresponding image path
"""
def load_data():
data_set=dict()
for dataset in ["train"]:
file=open(os.path.join(WORKING_PATH,"text_data/",dataset+".txt"),"rb")
for line in file:
content=eval(line)
image=content[0]
sentence=content[1]
group=content[2]
if os.path.isfile(os.path.join(WORKING_PATH,"image_data/",image+".jpg")):
data_set[int(image)]={"text":sentence,"group":group}
for dataset in ["test","valid"]:
file=open(os.path.join(WORKING_PATH,"text_data/",dataset+".txt"),"rb")
for line in file:
content=eval(line)
image=content[0]
sentence=content[1]
group=content[3] #2
if os.path.isfile(os.path.join(WORKING_PATH,"image_data/",image+".jpg")):
data_set[int(image)]={"text":sentence,"group":group}
return data_set
data_set=load_data()
"""
load image data
"""
image_feature_folder="image_feature_data_temp"
# pretrain dataloader
class pretrain_data_set(Dataset):
def __init__(self, data):
self.data=data
self.image_ids=list(data.keys())
for id in data.keys():
self.data[id]["image_path"] = os.path.join(WORKING_PATH,"image_data/",str(id)+".jpg")
# load image
def __image_loader(self,id):
path=self.data[id]["image_path"]
img_pil = PIL.Image.open(path)
transform = transforms.Compose([transforms.Resize((448,448)),
transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
img_tensor = transform(img_pil)
return img_tensor
def __getitem__(self, index):
id=self.image_ids[index]
image=self.__image_loader(id)
return id,image
def __len__(self):
return len(self.image_ids)
sub_image_size=32 #448/14
sub_graph_preprocess = transforms.Compose([
transforms.ToPILImage(mode=None),
transforms.Resize(256),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
all_pretrain_dataset=pretrain_data_set(data_set)
"""
generate data
"""
class Identity(torch.nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def resnet50_predictor():
# extract the input for last fc layer in resenet50
resnet50=torchvision.models.resnet50(pretrained=True)
for param in resnet50.parameters():
param.requires_grad = False
resnet50.fc = Identity()
resnet50 = resnet50.to(device)
resnet50.eval()
# save the output in .npy file
resnet50_output_path=os.path.join(WORKING_PATH,image_feature_folder)
if not os.path.exists(resnet50_output_path):
os.makedirs(resnet50_output_path)
with torch.no_grad():
total=len(all_pretrain_loader)*all_pretrain_loader.batch_size
count=0
time_s=time.perf_counter()
for img_index,img in all_pretrain_loader:
# seperate img(448,448) into 14*14 images with size (32,32)
# [0,1,2,3,4,5,6,7,8,9,10,11,12,13]
# [14,15,16,17,18,................]
# [28,...]
# ...
# [182,....,195]
sub_img_output=list()
for column in range(14):
for row in range(14):
# resize image from (32,32) to (256,256)
sub_image_original=img[:,:,sub_image_size*row:sub_image_size*(row+1),sub_image_size*column:sub_image_size*(column+1)]
sub_image_normalized=torch.stack(list(map(lambda image:sub_graph_preprocess(image),sub_image_original)),dim=0)
output=resnet50(sub_image_normalized.to(device))
sub_img_output.append(output.to("cpu").numpy())
sub_img_output=np.array(sub_img_output).transpose([1,0,2])
# save averaged attribute to "resnet50_output", same name as the image
for index,sub_img_index in enumerate(img_index):
np.save(os.path.join(resnet50_output_path,str(sub_img_index.item())),sub_img_output[index])
time_e=time.perf_counter()
count+=all_pretrain_loader.batch_size
total_time=time_e-time_s
print(f"Completed {count}/{total} time left={int((total-count)*total_time/count/60/60)}:{int((total-count)*total_time/count/60%60)}:{int((total-count)*total_time/count%60)} speed={round(total_time/count,3)}sec/image")
# 32 is the minimum batch size can achieve best performance
all_pretrain_loader = DataLoader(all_pretrain_dataset,batch_size=64)
# it will take really long time to run...
resnet50_predictor()
"""
test the image split
"""
# if __name__ == "__main__":
# # can be used to create image feature data
# # resnet50_predictor()
# for img_index,img in all_loader:
# temp_img=img
# print(img[0].size())
# plt.imshow(img[0].permute(1,2,0))
# plt.show()
# print("======================================")
# # try to seperate
# for column in range(14):
# for row in range(14):
# sub_index=row*14+column
# sub_image_original=img[0][:,sub_image_size*row:sub_image_size*(row+1),sub_image_size*column:sub_image_size*(column+1)]
# sub_image_normalized=sub_graph_preprocess(sub_image_original)
# # show original sub image
# plt.imshow(sub_image_original.permute(1,2,0))
# plt.show()
# # show normalized sub image
# plt.imshow(sub_image_normalized.permute(1,2,0))
# plt.show()
# print(sub_index)
# print(sub_image_original.size())
# print(sub_image_normalized.size())
# break
# break
# break
|
# Range: Sequence representing an arithmetic progression of intgers
# By default it initialized from0
a = range(3)
print(a)
for i in a:
print(a)
# Iterating over a range
for i in range(5):
print(i)
"""
Range signature
1. One argument: means argument stop value
range(stop)
2. Two arguments: means argument contains start and stop values
range(start,stop)
3. Three arguments: means argument contains start,stop and step values
range(start,stop,step)
* Range does not support keyword arguments
"""
# Iterating over a list
b =[3423,23423,465,786,8132,6578]
for i in b:
print(i)
# Enumerate
# Constructs an iterable of (index, value ) tuples around iterable object
print('Enumerate list 1: ')
l1 =[232,4456,567,879,980,1346,658]
for i in enumerate(l1):
print(i)
# Enumerate using tuple unpacking
print('Enumerate list 1 using tuple unpacking: ')
for i,v in enumerate(l1):
print(f"index= {i}, value= {v}")
|
from __future__ import print_function, absolute_import
import numpy as np
import vtk
from .vtk_camera_manipulator import vtkCameraManipulator
np_array = np.array
np_cross = np.cross
class vtkPVTrackballPan(vtkCameraManipulator):
def __init__(self):
vtkCameraManipulator.__init__(self)
def OnMouseMove(self, x, y, ren, iren):
if ren is None:
return
camera = ren.GetActiveCamera()
viewFocus = list(camera.GetFocalPoint())
self.ComputeWorldToDisplay(ren, viewFocus[0], viewFocus[1], viewFocus[2], viewFocus)
focalDepth = viewFocus[2]
event_pos = iren.GetEventPosition()
newPickPoint = [0, 0, 0, 0]
self.ComputeDisplayToWorld(ren, event_pos[0], event_pos[1], focalDepth, newPickPoint)
last_pos = iren.GetLastEventPosition()
oldPickPoint = [0, 0, 0, 0]
self.ComputeDisplayToWorld(ren, last_pos[0], last_pos[1], focalDepth, oldPickPoint)
try:
key_code = self.KeyCode.upper()
except AttributeError:
key_code = self.KeyCode
if key_code in ['X', 'Y', 'Z']:
if key_code == 'X':
axis_factors = [1., 0., 0.]
elif key_code == 'Y':
axis_factors = [0., 1., 0.]
elif key_code == 'Z':
axis_factors = [0., 0., 1.]
else:
raise Exception
else:
axis_factors = [1., 1., 1.]
motionVector = [(oldPickPoint[0] - newPickPoint[0])*self.TranslationFactor*axis_factors[0],
(oldPickPoint[1] - newPickPoint[1])*self.TranslationFactor*axis_factors[1],
(oldPickPoint[2] - newPickPoint[2])*self.TranslationFactor*axis_factors[2]]
viewFocus = camera.GetFocalPoint()
viewPoint = camera.GetPosition()
camera.SetFocalPoint(motionVector[0] + viewFocus[0],
motionVector[1] + viewFocus[1],
motionVector[2] + viewFocus[2])
camera.SetPosition(motionVector[0] + viewPoint[0],
motionVector[1] + viewPoint[1],
motionVector[2] + viewPoint[2])
if iren.GetLightFollowCamera():
ren.UpdateLightsGeometryToFollowCamera()
iren.Render()
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/ContactPoint
Release: R5
Version: 4.5.0
Build ID: 0d95498
Last updated: 2021-04-03T00:34:11.075+00:00
"""
from pydantic import Field
from . import fhirtypes
from . import datatype
class ContactPoint(datatype.DataType):
"""Disclaimer: Any field name ends with ``__ext`` doesn't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Details of a Technology mediated contact point (phone, fax, email, etc.).
Details for all kinds of technology mediated contact points for a person or
organization, including telephone, email, etc.
"""
resource_type = Field("ContactPoint", const=True)
period: fhirtypes.PeriodType = Field(
None,
alias="period",
title="Time period when the contact point was/is in use",
description=None,
# if property is element of this resource.
element_property=True,
)
rank: fhirtypes.PositiveInt = Field(
None,
alias="rank",
title="Specify preferred order of use (1 = highest)",
description=(
"Specifies a preferred order in which to use a set of contacts. "
"ContactPoints with lower rank values are more preferred than those "
"with higher rank values."
),
# if property is element of this resource.
element_property=True,
)
rank__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_rank", title="Extension field for ``rank``."
)
system: fhirtypes.Code = Field(
None,
alias="system",
title="phone | fax | email | pager | url | sms | other",
description=(
"Telecommunications form for contact point - what communications system"
" is required to make use of the contact."
),
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["phone", "fax", "email", "pager", "url", "sms", "other"],
)
system__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_system", title="Extension field for ``system``."
)
use: fhirtypes.Code = Field(
None,
alias="use",
title="home | work | temp | old | mobile - purpose of this contact point",
description="Identifies the purpose for the contact point.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["home", "work", "temp", "old", "mobile"],
)
use__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_use", title="Extension field for ``use``."
)
value: fhirtypes.String = Field(
None,
alias="value",
title="The actual contact point details",
description=(
"The actual contact point details, in a form that is meaningful to the "
"designated communication system (i.e. phone number or email address)."
),
# if property is element of this resource.
element_property=True,
)
value__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_value", title="Extension field for ``value``."
)
@classmethod
def elements_sequence(cls):
"""returning all elements names from ``ContactPoint`` according specification,
with preserving original sequence order.
"""
return ["id", "extension", "system", "value", "use", "rank", "period"]
|
# -*- coding: utf-8 -*-
"""
@project: Sorting-Algorithms
@version: v1.0.0
@file: main.py
@brief: main file
@software: PyCharm
@author: Kai Sun
@email: autosunkai@gmail.com
@date: 2021/8/1 21:08
@updated: 2021/8/1 21:08
"""
from bubble_sort import *
from selection_sort import *
from insertion_sort import *
from shell_sort import *
from merge_sort import *
from quick_sort import *
from heap_sort import *
from bucket_sort import *
from counting_sort import *
from radix_sort import *
if __name__ == '__main__':
sort = radix_sort
arr = [3, 44, 38, 5, 47, 15, 36, 26, 27, 2, 46, 4, 19, 50, 48]
print('Raw:', arr)
arr = sort(arr)
print('Sorted:', arr)
|
from django.conf.urls import url
from django.urls import include, path
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
path(r'', include('apps.web.urls', namespace='web')),
path(r'', include('apps.products.urls', namespace='products')),
path(r'', include('apps.users.urls', namespace='users')),
path(r'', include('apps.carts.urls', namespace='carts')),
path(r'', include('apps.auth.urls', namespace='auth')),
url(r'^froala_editor/', include('froala_editor.urls')),
url('', include('social_django.urls', namespace='social')),
]
|
"""
Script to update AWS IAM user information
"""
import boto3
client = boto3.client('iam') |
"""
TRAN - a wrapper for traxy/tranback to transform a position (x,y) in pixels
on an input, distorted image to/from an output image.
There are two methods: tran.f for forward transforms and tran.b for
the reverse.
The syntax is:
tran.f(original_image,drizzled_image,x,y)
--or--
tran.f(original_image,drizzled_image,List='list')
and
tran.b(drizzled_image,original_image,x,y)
--or--
tran.b(drizzled_image,original_image,List='list')
In the 'list' case the list is a normal text file with two, free-
format columns of X,Y pixel positions.
All the information is extracted from the header by searching for
the drizzle header records. The coefficients file must be present and
have the same name. This is in "drizzle" format, as produced by PyDrizzle,
and not the IDCTAB.
Note - the 'original_image' name must match the string written to
the header by drizzle, exactly.
It is assumed that this script is invoked from Pyraf and that the
traxy and tranback IRAF tasks are available. They are in the dither
package of STSDAS.
Example:
--> import tran
--forwards---
--> tran.f('j8c0c1011_crj.fits[sci,1]','f606w_z61c1_drz.fits[1]',136.109,371.455)
Running Tran Version 0.11 (May 2004)
-Reading drizzle keywords from header...
-Image j8c0c1011_crj.fits[sci,1] was # 3
-to be drizzled onto output f606w_z61c1_drz.fits[1]
-Transforming position...
Xin,Yin: 136.109 371.455 Xout,Yout: 123.000 432.000
--backwards---
Running Tran Version 0.11 (May 2004)
--> tran.b('f606w_z61c1_drz.fits[1]','j8c0c1011_crj.fits[sci,1]',123,432)
-Reading drizzle keywords from header...
-Image j8c0c1011_crj.fits[sci,1] was # 3
-to be drizzled onto output f606w_z61c1_drz.fits[1]
-Transforming position...
Xin,Yin: 136.109 371.455 Xout,Yout: 123.000 432.000
Richard Hook, ST-ECF/STScI, April 2003
Added "List" feature and other small improvements, November 2003
Added trap for error messages in "list" form, May 2004.
Version 0.12 for STSDAS 3.3 release, October 2004
Added more robust handling of wavelengths and DGEO image support.
Version 0.20
PyDrizzle will be automatically run to generate coeffs files if not already
present for input image.
Version 0.21
Syntax for calling PyDrizzle updated for new 'bits' syntax.
Comments: rhook@eso.org
"""
from __future__ import division, print_function # confidence medium
from math import *
import iraf
import sys
import pydrizzle
from stsci.tools import fileutil
PY3K = sys.version_info[0] >= 3
# Some convenient definitions
yes=iraf.yes
no=iraf.no
MaxImages=999
TRUE=1
FALSE=0
__version__ = '0.21 (Jan2006)'
# A class for drizzle geometrical parameters
class DrizGeoPars:
# Constructor, set to drizzle default values
def __init__(self,image=None,inimage=None):
if image == None:
self.scale=1.0
self.coeffs=None
self.lam=555.0
self.xsh=0.0
self.ysh=0.0
self.rot=0.0
self.shft_un="input"
self.shft_fr="input"
self.align="center"
self.xgeoim=""
self.ygeoim=""
self.d2xscale=0.0
self.d2yscale=0.0
self.d2xsh=0.0
self.d2ysh=0.0
self.d2rot=0.0
self.d2shft_fr="output"
else:
# Read geometric parameters from a header using an image name as
# the key
found=FALSE
# First search for the entry for this image
i=1
while i < MaxImages:
datkey = 'D%3iDATA' % i
datkey=datkey.replace(' ','0')
iraf.keypar(image,datkey,silent='yes')
# If we can't read this no point considering
if iraf.keypar.value == '':
break
# If we have a match set flag and leave
if iraf.keypar.value == inimage:
found=TRUE
break
i += 1
if found:
print("-Reading drizzle keywords from header...")
print("-Image ",inimage," was #",i)
print("-to be drizzled onto output ",image)
else:
raise("Failed to get keyword information from header")
# Now we know that the selected image is present we can
# get all the other parameters - we don't check whether this
# succeeds, if it doesn't let it crash
stem=datkey[:4]
iraf.keypar(image,stem+"SCAL",silent='yes')
self.scale=float(iraf.keypar.value)
iraf.keypar(image,stem+"COEF",silent='yes')
self.coeffs=iraf.keypar.value
# Check for existence
if fileutil.findFile(self.coeffs) == FALSE:
try:
print('\n-Coeffs file not found. Trying to reproduce them using PyDrizzle...')
# Try to generate the coeffs file automatically
indx = inimage.find('[')
p = pydrizzle.PyDrizzle(inimage[:indx],bits_single=None,bits_final=None)
del p
except:
print("! Cannot access coefficients file. (",self.coeffs,")")
raise("File missing or inaccessible.")
iraf.keypar(image,stem+"LAM",silent='yes')
if iraf.keypar.value != '':
self.lam=float(iraf.keypar.value)
else:
self.lam=555.0
iraf.keypar(image,stem+"XSH",silent='yes')
self.xsh=float(iraf.keypar.value)
iraf.keypar(image,stem+"YSH",silent='yes')
self.ysh=float(iraf.keypar.value)
iraf.keypar(image,stem+"ROT",silent='yes')
self.rot=float(iraf.keypar.value)
iraf.keypar(image,stem+"SFTU",silent='yes')
self.shft_un=iraf.keypar.value
iraf.keypar(image,stem+"SFTF",silent='yes')
self.shft_fr=iraf.keypar.value
iraf.keypar(image,stem+"XGIM",silent='yes')
self.xgeoim=iraf.keypar.value
indx = self.xgeoim.find('[')
# Check for existence
if fileutil.findFile(self.xgeoim[:indx]) == FALSE and self.xgeoim != '':
print("! Warning, cannot access X distortion correction image")
print(" continuing without it. (",self.xgeoim,")")
self.xgeoim=''
iraf.keypar(image,stem+"YGIM",silent='yes')
self.ygeoim=iraf.keypar.value
indx = self.ygeoim.find('[')
# Check for existence
if fileutil.findFile(self.ygeoim[:indx]) == FALSE and self.ygeoim != '':
print("! Warning, cannot access Y distortion correction image")
print(" continuing without it. (",self.ygeoim,")")
self.ygeoim=''
# The case of the "align" parameter is more tricky, we
# have to deduce it from INXC keyword
iraf.keypar(image,stem+"INXC",silent='yes')
inxc=float(iraf.keypar.value)
# Need the X and Y dimensions as well - both input and
# output
iraf.keypar(inimage,'i_naxis1',silent='yes')
xdim=int(iraf.keypar.value)
iraf.keypar(inimage,'i_naxis2',silent='yes')
ydim=int(iraf.keypar.value)
self.nxin=xdim
self.nyin=ydim
iraf.keypar(image,'i_naxis1',silent='yes')
xdim=int(iraf.keypar.value)
iraf.keypar(image,'i_naxis2',silent='yes')
ydim=int(iraf.keypar.value)
self.nxout=xdim
self.nyout=ydim
if abs(inxc-float(xdim/2)-0.5) < 1e-4:
self.align='corner'
else:
self.align='center'
# Check for the presence of secondary parameters
iraf.keypar(image,stem+"SECP",silent='yes')
if iraf.keypar.value == "yes":
raise Exception("! Sorry, this version does NOT support secondary parameters")
else:
self.secp=FALSE
# Main TRAN methods - f for forward and b for back
#
# inimage - the input image which is to have its WCS updated
# drizimage - the reference image, assumed to contain the drizzle parameters
# in its header
#
# x,y - a single position for transformation
#
# List - a text file name containing x y pairs
#
def f(origimage,drizimage,x=None,y=None,List=None):
# Get the parameters from the header
GeoPar=DrizGeoPars(drizimage,origimage)
# Use traxy, along with all the parameters specified above, to
# transform to the output image
iraf.traxy.nxin=GeoPar.nxin
iraf.traxy.nyin=GeoPar.nyin
iraf.traxy.nxout=GeoPar.nxout
iraf.traxy.nyout=GeoPar.nyout
iraf.traxy.scale=GeoPar.scale
iraf.traxy.xsh=GeoPar.xsh
iraf.traxy.ysh=GeoPar.ysh
iraf.traxy.rot=GeoPar.rot
iraf.traxy.coeffs=GeoPar.coeffs
iraf.traxy.shft_un=GeoPar.shft_un
iraf.traxy.shft_fr=GeoPar.shft_fr
iraf.traxy.align=GeoPar.align
iraf.traxy.lam=GeoPar.lam
iraf.traxy.xgeoim=GeoPar.xgeoim
iraf.traxy.ygeoim=GeoPar.ygeoim
if List != None:
f=open(List)
lines=f.readlines()
print(" Xin Yin Xout Yout")
for line in lines:
x=float(line.split()[0])
y=float(line.split()[1])
str=iraf.traxy(x,y,mode='h',Stdout=1)
# Just show the lines of interest
for line in str:
if line[0:1] == '!':
print(line)
sys.exit()
if line[0:3] == ' Xi':
xin = float(line.split()[1])
yin = float(line.split()[2])
xout = float(line.split()[4])
yout = float(line.split()[5])
print("%10.3f %10.3f %10.3f %10.3f" % (xin,yin,xout,yout))
else:
# Transform and display the result
print("-Transforming position...")
str=iraf.traxy(x,y,mode='h',Stdout=1)
# Just show the lines of interest
for line in str:
if line[0:1] == '!':
print(line)
if line[0:3] == ' Xi':
print(line)
def b(drizimage,origimage,x=None,y=None,List=None):
# Get the parameters from the header
GeoPar=DrizGeoPars(drizimage,origimage)
# Use tranback, along with all the parameters specified above, to
# transform to the output image
iraf.tranback.nxin=GeoPar.nxin
iraf.tranback.nyin=GeoPar.nyin
iraf.tranback.nxout=GeoPar.nxout
iraf.tranback.nyout=GeoPar.nyout
iraf.tranback.scale=GeoPar.scale
iraf.tranback.xsh=GeoPar.xsh
iraf.tranback.ysh=GeoPar.ysh
iraf.tranback.rot=GeoPar.rot
iraf.tranback.coeffs=GeoPar.coeffs
iraf.tranback.shft_un=GeoPar.shft_un
iraf.tranback.shft_fr=GeoPar.shft_fr
iraf.tranback.align=GeoPar.align
iraf.tranback.lam=GeoPar.lam
iraf.tranback.xgeoim=GeoPar.xgeoim
iraf.tranback.ygeoim=GeoPar.ygeoim
if List != None:
f=open(List)
lines=f.readlines()
print(" Xin Yin Xout Yout")
for line in lines:
x=float(line.split()[0])
y=float(line.split()[1])
str=iraf.tranback(x,y,mode='h',Stdout=1)
# Just show the lines of interest
for line in str:
if line[0:1] == "!":
print(line)
sys.exit()
if line[0:3] == ' Xi':
xin = float(line.split()[1])
yin = float(line.split()[2])
xout = float(line.split()[4])
yout = float(line.split()[5])
print("%10.3f %10.3f %10.3f %10.3f" % (xin,yin,xout,yout))
else:
# Transform and display the result
print("-Transforming position...")
str=iraf.tranback(x,y,mode='h',Stdout=1)
# Just show the lines of interest
for line in str:
if line[0:1] == '!':
print(line)
sys.exit()
if line[0:3] == ' Xi':
print(line)
|
import unittest
from unittest.mock import patch
import pygatt
from paulmann.paulmann import Paulmann
from paulmann.models import State, Info
from .mocks import MockAdapter, MockDevice
MAC = "AA:AA:AA:AA:AA:AA"
PWD = "1234"
class PaulmannTestCase(unittest.TestCase):
_light: Paulmann = None
def setUpReal(self):
self._light = Paulmann(MAC, PWD, pygatt.backends.GATTToolBackend())
@patch('pygatt.backends.GATTToolBackend')
def setUpMock(self, adapter):
self._light = Paulmann(MAC, PWD, adapter=adapter)
def new_connect(mac:str):
return MockDevice(adapter)
adapter.connect = new_connect
def setUp(self):
self.setUpReal()
#self.setUpMock()
def tearDown(self):
self._light.disconnect()
@patch('pygatt.backends.GATTToolBackend')
def test_connect_and_authenticate(self, adapter):
p:Paulmann = Paulmann(MAC, PWD, adapter=adapter)
d = p.get_device()
adapter.start.assert_called()
adapter.connect.assert_called_with(MAC)
self.assertIsNot(d, None)
def test_set_and_get_state(self):
self._light.set_state(on=True, brightness=70, color=200)
s:State = self._light.get_state()
self.assertEqual(s.on, True)
self.assertEqual(s.brightness, 70)
self.assertEqual(s.color, 200)
def test_switch(self):
self._light.switch(False)
self.assertFalse(self._light.is_on())
self._light.switch(True)
self.assertTrue(self._light.is_on())
def test_toggle(self):
self._light.switch(False)
self._light.toggle()
self.assertTrue(self._light.is_on())
self._light.toggle()
self.assertFalse(self._light.is_on())
def test_brightness(self):
self._light.brightness(40)
self.assertEqual(self._light.get_brightness(), 40)
self._light.brightness(-5)
self.assertEqual(self._light.get_brightness(), 0)
self._light.brightness(150)
self.assertEqual(self._light.get_brightness(), 100)
def test_color(self):
self._light.color(250)
self.assertEqual(self._light.get_color(), 250)
self._light.color(100)
self.assertEqual(self._light.get_color(), 153)
self._light.color(400)
self.assertEqual(self._light.get_color(), 370)
def test_all(self):
#p = Paulmann(MAC, PWD)
#info = p.get_info()
#print(info)
"""state = p.get_state()
print(state)
p.switch(True)
print("Light is " + str(p.is_on()))
sleep(1)
p.switch(False)
print("Light is " + str(p.is_on()))
sleep(1)
p.toggle()
print("Light is " + str(p.is_on()))
sleep(1)
p.switch(False)
sleep(1)
p.brightness(10)
print("Dimm is " + str(p.get_brightness()))
p.switch(True)
sleep(1)
p.brightness(30)
print("Dimm is " + str(p.get_brightness()))
sleep(1)
p.brightness(50)
print("Dimm is " + str(p.get_brightness()))
sleep(1)
p.brightness(70)
print("Dimm is " + str(p.get_brightness()))
sleep(1)
p.brightness(100)
print("Dimm is " + str(p.get_brightness()))
sleep(1)
print("Dimm is " + str(p.get_brightness()))
sleep(1)
p.color(2700)
print("Color is " + str(p.get_color()))
sleep(1)
p.color(3500)
print("Color is " + str(p.get_color()))
sleep(1)
p.color(5000)
print("Color is " + str(p.get_color()))
sleep(1)
p.color(6500)
print("Color is " + str(p.get_color()))
sleep(1)
""" |
# -*- coding: utf-8 -*-
"""
This overrides the Rubric with the class loaded from the
PUBLICATION_BACKBONE_FACET_MODEL setting if it exists.
"""
from django.conf import settings
from publication_backbone.utils.loader import load_class
#==============================================================================
# Extensibility
#==============================================================================
FACET_MODEL = getattr(settings, 'PUBLICATION_BACKBONE_FACET_MODEL',
'publication_backbone.models.defaults.rubricator.facet.Facet')
Facet = load_class(FACET_MODEL, 'PUBLICATION_BACKBONE_FACET_MODEL')
|
"""
/flash/main.py
Initial setup and configuration
Should be called if there is no SD card inserted
Assumes variables are set in conf.py
Author: Howard Webb
Data: 11/13/2020
"""
import conf
from utime import sleep, ticks_ms, ticks_diff, localtime
import Environ
from pyb import RTC, LED
import Wifi
led_G = LED(2)
led_B = LED(3)
# Get a wifi connection
# Set the real time clock (RTC)
wifi = None
def set_time():
print("Set NTP time")
# Get NTP time, initialize env and set timer
try:
#try Exception as e:
import ntptime
ntptime.set_time()
except Exception as e:
print("Failure setting NTP time -", str(e))
def set_env():
# Set the environmental variables
Environ.create_env()
def set_timer():
# Set the wake-up timer for periodic starting
tm = conf.SAMPLE_MIN * 60 * 1000 # multiply by seconds per minute and miliseconds
RTC().wakeup(tm)
def init_now():
# Perform the initialization work
global wifi
led_B.off()
if wifi is None:
try:
wifi = Wifi.connect()
except:
print("Failure getting connection needed for setup, check SSID and PWD")
return
set_time()
set_env()
set_timer()
# blue LED indicates successful setup
led_B.on()
def init_later():
# wait till reach start time
led_B.off()
tm = utime.localtime()
min = tm[4]
wait = (min - 1) * 60 # seconds to delay
utime.sleep(wait)
# count down the last seconds
while min != conf.START_TIME:
utime.sleep(1)
tm = utime.localtime()
min = tm[4]
init_now()
# Default to wifi connection for using WebREPL
try:
wifi = Wifi.connect() # Comment out this line if not using WebREPL
pass
except Exception as e:
print("Failure getting connection needed for setup:", str(e))
|
from .models import Player, Room
def get_player_from_request(request):
"""
Get a player from a request.
:param request: the request
:return: a Player object if the request has a player, False otherwise
"""
player_id = request.COOKIES.get(Player.PLAYER_COOKIE_NAME, None)
return get_player_from_cookie(player_id)
def get_player_from_cookie(player_id: str):
"""
Get a player from the player id.
:param player_id: the player id
:return: a Player object if the player id matches, None otherwise
"""
if player_id is not None:
try:
player = Player.objects.get(cookie=player_id)
return player
except Player.DoesNotExist:
return None
return None
def execute_data_minimisation(dry_run=False):
"""
Remove all players that are offline and all rooms that only have offline players.
:param dry_run: does not really remove data if True
:return: list of objects removed
"""
deleted_rooms = list()
deleted_players = list()
rooms = Room.objects.all()
for room in rooms:
delete = True
for player in room.players:
if player.online:
delete = False
if delete:
deleted_rooms.append(room)
players = Player.objects.all()
players_in_deleted_rooms = Player.objects.filter(room__in=deleted_rooms)
for player in players:
if not player.online and (player.room is None or player in players_in_deleted_rooms):
deleted_players.append(player)
if not dry_run:
for room in deleted_rooms:
room.delete()
for player in deleted_players:
player.delete()
return [deleted_rooms] + [deleted_players]
|
PART_NAMES = [
"nose", "leftEye", "rightEye", "leftEar", "rightEar", "leftShoulder",
"rightShoulder", "leftElbow", "rightElbow", "leftWrist", "rightWrist",
"leftHip", "rightHip", "leftKnee", "rightKnee", "leftAnkle", "rightAnkle"
]
NUM_KEYPOINTS = len(PART_NAMES)
PART_IDS = {pn: pid for pid, pn in enumerate(PART_NAMES)}
CONNECTED_PART_NAMES = [
("leftHip", "leftShoulder"), ("leftElbow", "leftShoulder"),
("leftElbow", "leftWrist"), ("leftHip", "leftKnee"),
("leftKnee", "leftAnkle"), ("rightHip", "rightShoulder"),
("rightElbow", "rightShoulder"), ("rightElbow", "rightWrist"),
("rightHip", "rightKnee"), ("rightKnee", "rightAnkle"),
("leftShoulder", "rightShoulder"), ("leftHip", "rightHip")
]
CONNECTED_PART_INDICES = [(PART_IDS[a], PART_IDS[b]) for a, b in CONNECTED_PART_NAMES]
LOCAL_MAXIMUM_RADIUS = 1
POSE_CHAIN = [
("nose", "leftEye"), ("leftEye", "leftEar"), ("nose", "rightEye"),
("rightEye", "rightEar"), ("nose", "leftShoulder"),
("leftShoulder", "leftElbow"), ("leftElbow", "leftWrist"),
("leftShoulder", "leftHip"), ("leftHip", "leftKnee"),
("leftKnee", "leftAnkle"), ("nose", "rightShoulder"),
("rightShoulder", "rightElbow"), ("rightElbow", "rightWrist"),
("rightShoulder", "rightHip"), ("rightHip", "rightKnee"),
("rightKnee", "rightAnkle")
]
MOVABLE_PART = [
[("rightShoulder", "rightElbow"), ("rightElbow", "rightWrist")],
[("leftShoulder", "leftElbow"), ("leftElbow", "leftWrist")],
[("rightElbow", "rightShoulder"), ("rightShoulder", "rightHip")],
[("leftElbow", "leftShoulder"), ("leftShoulder", "leftHip")],
[("leftHip", "leftKnee"), ("leftKnee", "leftAnkle")],
[("rightHip", "rightKnee"), ("rightKnee", "rightAnkle")]
]
PIVOT_POINT = [
"rightElbow", "leftElbow", "rightShoulder", "leftShoulder", "leftKnee", "rightKnee"
]
PARENT_CHILD_TUPLES = [(PART_IDS[parent], PART_IDS[child]) for parent, child in POSE_CHAIN]
PART_CHANNELS = [
'left_face',
'right_face',
'right_upper_leg_front',
'right_lower_leg_back',
'right_upper_leg_back',
'left_lower_leg_front',
'left_upper_leg_front',
'left_upper_leg_back',
'left_lower_leg_back',
'right_feet',
'right_lower_leg_front',
'left_feet',
'torso_front',
'torso_back',
'right_upper_arm_front',
'right_upper_arm_back',
'right_lower_arm_back',
'left_lower_arm_front',
'left_upper_arm_front',
'left_upper_arm_back',
'left_lower_arm_back',
'right_hand',
'right_lower_arm_front',
'left_hand'
] |
import os
os.system('pandoc post.md -s --highlight-style pygments -o ex.html')
#os.system('pandoc -t beamer pres.md -o ex.pdf')
os.system('pandoc post.md -s --highlight-style pygments -o ex.pdf')
|
"""
Title: utils.py
Purpose: Contains utility functions for yt answer tests
Notes:
"""
import functools
import hashlib
import inspect
import os
import numpy as np
import pytest
import yaml
from yt.config import ytcfg
from yt.convenience import load, simulation
from yt.data_objects.selection_data_containers import YTRegion
from yt.data_objects.static_output import Dataset
from yt.frontends.ytdata.api import save_as_dataset
from yt.units.yt_array import \
YTArray, \
YTQuantity
from yt.utilities.exceptions import \
YTOutputNotIdentified
import yt.visualization.particle_plots as particle_plots
import yt.visualization.plot_window as pw
import yt.visualization.profile_plotter as profile_plotter
from yt.visualization.volume_rendering.scene import Scene
def _streamline_for_io(params):
r"""
Put test results in a more io-friendly format.
Many of yt's tests use objects such as tuples as test parameters
(fields, for instance), but when these objects are written to a
yaml file, yaml includes python specific anchors that make the file
harder to read and less portable. The goal of this function is to
convert these objects to strings (using __repr__() has it's own
issues) in order to solve this problem.
Parameters
----------
params : dict
The dictionary of test parameters in the form
{param_name : param_value}.
Returns
-------
streamlined_params : dict
The dictionary of parsed and converted
{param_name : param_value} pairs.
"""
streamlined_params = {}
for key, value in params.items():
# Check for user-defined functions
if inspect.isfunction(key):
key = key.__name__
if inspect.isfunction(value):
value = value.__name__
# The key can be nested iterables, e.g.,
# d = [None, ('sphere', (center, (0.1, 'unitary')))] so we need
# to use recursion
if not isinstance(key, str) and hasattr(key, '__iter__'):
key = _iterable_to_string(key)
# The value can also be nested iterables
if not isinstance(value, str) and hasattr(value, '__iter__'):
value = _iterable_to_string(value)
# Scene objects need special treatment to make them more IO friendly
if isinstance(value, Scene):
value = 'Scene'
elif isinstance(value, YTRegion):
value = 'Region'
streamlined_params[key] = value
return streamlined_params
def _iterable_to_string(iterable):
r"""
An extension of streamline_for_io that does the work of making an
iterable more io-friendly.
Parameters
----------
iterable : python iterable
The object to be parsed and converted.
Returns
-------
result : str
The io-friendly version of the passed iterable.
"""
result = iterable.__class__.__name__
for elem in iterable:
# Check for user-defined functions
if inspect.isfunction(elem):
result += '_' + elem.__name__
# Non-string iterables (e.g., lists, tuples, etc.)
elif not isinstance(elem, str) and hasattr(elem, '__iter__'):
result += '_' + _iterable_to_string(elem)
# Non-string non-iterables (ints, floats, etc.)
elif not isinstance(elem, str) and not hasattr(elem, '__iter__'):
result += '_' + str(elem)
# Strings
elif isinstance(elem, str):
result += '_' + elem
return result
def _hash_results(results):
r"""
Driver function for hashing the test result.
Parameters
----------
results : dict
Dictionary of {test_name : test_result} pairs.
Returns
-------
results : dict
Same as the passed results, but the test_results are now
hex digests of the hashed test_result.
"""
# Here, results should be comprised of only the tests, not the test
# parameters
for test_name, test_value in results.items():
# These tests have issues with python-specific anchors and so
# are already hashed
# (see their definitions in yt/utilites/answer_testing/answer_tests.py)
if test_name in ['projection_values', 'pixelized_projection_values', 'grid_values']:
continue
else:
results[test_name] = generate_hash(test_value)
return results
def _hash_dict(data):
r"""
Specifically handles hashing a dictionary object.
Parameters
----------
data : dict
The dictionary to be hashed.
Returns
-------
hd.hexdigest : str
The hex digest of the hashed dictionary.
"""
hd = None
for key, value in sorted(data.items()):
if hd is None:
hd = hashlib.md5(bytearray(key.encode('utf8')) + bytearray(value))
else:
hd.update(bytearray(key.encode('utf8')) + bytearray(value))
return hd.hexdigest()
def generate_hash(data):
r"""
Actually performs the hash operation.
Parameters
----------
data : python object
Data to be hashed.
Returns
-------
hd : str
Hex digest of the hashed data.
"""
# Sometimes md5 complains that the data is not contiguous, so we
# make it so here
if isinstance(data, np.ndarray):
data = np.ascontiguousarray(data)
# Try to hash. Some tests return hashable types (like ndarrays) and
# others don't (such as dictionaries)
try:
hd = hashlib.md5(data).hexdigest()
# Handle those tests that return non-hashable types. This is done
# here instead of in the tests themselves to try and reduce boilerplate
# and provide a central location where all of this is done in case it needs
# to be changed
except TypeError:
if isinstance(data, dict):
hd = _hash_dict(data)
else:
raise TypeError
return hd
def _save_result(data, outputFile):
r"""
Saves the test results to the desired answer file.
Parameters
----------
data : dict
Test results to be saved.
outputFile : str
Name of file to save results to.
"""
with open(outputFile, 'a') as f:
yaml.dump(data, f)
def _compare_result(data, outputFile):
r"""
Compares the just-generated test results to those that are already
saved.
Parameters
----------
data : dict
Just-generated test results.
outputFile : str
Name of file where answers are already saved.
"""
# Load the saved data
with open(outputFile, 'r') as f:
savedData = yaml.safe_load(f)
# Define the comparison function
def _check_vals(newVals, oldVals):
for key, value in newVals.items():
if isinstance(value, dict):
_check_vals(value, oldVals[key])
else:
assert value == oldVals[key]
# Compare
_check_vals(data, savedData)
def _handle_hashes(save_dir_name, fname, hashes, answer_store):
r"""
Driver function for deciding whether to save the test results or
compare them to already saved results.
Parameters
----------
save_dir_name : str
Name of the directory to save results or where results are
already saved.
fname : str
Name of the file to either save results to or where results
are already saved.
hashes : dict
The just-generated test results.
answer_store : bool
If true, save the just-generated test results, otherwise,
compare them to the previously saved results.
"""
# Set up the answer file in the answer directory
answer_file = os.path.join(save_dir_name, fname)
# Save the result
if answer_store:
_save_result(hashes, answer_file)
# Compare to already saved results
else:
_compare_result(hashes, answer_file)
def _save_arrays(save_dir_name, fbasename, arrays, answer_store):
r"""
Driver routine for either saving the raw arrays resulting from the
tests, or compare them to previously saved results.
Parameters
----------
save_dir_name : str
Name of the directory to save results or where results are
already saved.
fbasename : str
Base name (no extension) of the file to either save results
to or where results are already saved.
arrays : dict
The raw arrays generated from the tests, with the test name
as a key.
answer_store : bool
If true, save the just-generated test results, otherwise,
compare them to the previously saved results.
"""
pass
def can_run_ds(ds_fn, file_check = False):
r"""
Validates whether or not a given input can be loaded and used as a
Dataset object.
"""
if isinstance(ds_fn, Dataset):
return True
path = ytcfg.get("yt", "test_data_dir")
if not os.path.isdir(path):
return False
if file_check:
return os.path.isfile(os.path.join(path, ds_fn))
try:
load(ds_fn)
return True
except YTOutputNotIdentified:
return False
def can_run_sim(sim_fn, sim_type, file_check = False):
r"""
Validates whether or not a given input can be used as a simulation
time-series object.
"""
path = ytcfg.get("yt", "test_data_dir")
if not os.path.isdir(path):
return False
if file_check:
return os.path.isfile(os.path.join(path, sim_fn))
try:
simulation(sim_fn, sim_type)
except YTOutputNotIdentified:
return False
return True
def data_dir_load(ds_fn, cls = None, args = None, kwargs = None):
r"""
Loads a sample dataset from the designated test_data_dir for use in
testing.
"""
args = args or ()
kwargs = kwargs or {}
path = ytcfg.get("yt", "test_data_dir")
if isinstance(ds_fn, Dataset): return ds_fn
if not os.path.isdir(path):
return False
if cls is None:
ds = load(ds_fn, *args, **kwargs)
else:
ds = cls(os.path.join(path, ds_fn), *args, **kwargs)
ds.index
return ds
def requires_ds(ds_fn, file_check = False):
r"""
Meta-wrapper for specifying required data for a test and
checking if said data exists.
"""
def ffalse(func):
@functools.wraps(func)
def skip(*args, **kwargs):
msg = "{} not found, skipping {}.".format(ds_fn, func.__name__)
pytest.fail(msg)
return skip
def ftrue(func):
@functools.wraps(func)
def true_wrapper(*args, **kwargs):
return func
return true_wrapper
if not can_run_ds(ds_fn, file_check):
return ffalse
else:
return ftrue
def requires_sim(sim_fn, sim_type, file_check = False):
r"""
Meta-wrapper for specifying a required simulation for a test and
checking if said simulation exists.
"""
def ffalse(func):
@functools.wraps(func)
def skip(*args, **kwargs):
msg = "{} not found, skipping {}.".format(sim_fn, func.__name__)
pytest.fail(msg)
return skip
def ftrue(func):
@functools.wraps(func)
def true_wrapper(*args, **kwargs):
return func
return true_wrapper
if not can_run_sim(sim_fn, sim_type, file_check):
return ffalse
else:
return ftrue
def create_obj(ds, obj_type):
# obj_type should be tuple of
# ( obj_name, ( args ) )
if obj_type is None:
return ds.all_data()
cls = getattr(ds, obj_type[0])
obj = cls(*obj_type[1])
return obj
def compare_unit_attributes(ds1, ds2):
r"""
Checks to make sure that the length, mass, time, velocity, and
magnetic units are the same for two different dataset objects.
"""
attrs = ('length_unit', 'mass_unit', 'time_unit',
'velocity_unit', 'magnetic_unit')
for attr in attrs:
u1 = getattr(ds1, attr, None)
u2 = getattr(ds2, attr, None)
assert u1 == u2
def fake_halo_catalog(data):
filename = "catalog.0.h5"
ftypes = dict((field, '.') for field in data)
extra_attrs = {"data_type": "halo_catalog",
"num_halos": data['particle_mass'].size}
ds = {'cosmological_simulation': 1,
'omega_lambda': 0.7,
'omega_matter': 0.3,
'hubble_constant': 0.7,
'current_redshift': 0,
'current_time': YTQuantity(1, 'yr'),
'domain_left_edge': YTArray(np.zeros(3), 'cm'),
'domain_right_edge': YTArray(np.ones(3), 'cm')}
save_as_dataset(ds, filename, data, field_types=ftypes,
extra_attrs=extra_attrs
)
return filename
def _create_plot_window_attribute_plot(ds, ptype, field, axis, pkwargs = None):
r"""
Convenience function used in plot_window_attribute_test.
Parameters
----------
ds : Dataset
The Dataset object from which the plotting data is taken.
ptype : string
Type of plot to make (e.g., SlicePlot).
field : yt field
The field (e.g, density) to plot.
axis : int
The plot axis to plot or project along.
pkwargs : dict
Any keywords to be passed when creating the plot.
"""
if ptype is None:
raise RuntimeError('Must explicitly request a plot type')
cls = getattr(pw, ptype, None)
if cls is None:
cls = getattr(particle_plots, ptype)
plot = cls(*(ds, axis, field), **pkwargs)
return plot
def _create_phase_plot_attribute_plot(data_source, x_field, y_field, z_field,
plot_type, plot_kwargs=None):
r"""
Convenience function used in phase_plot_attribute_test.
Parameters
----------
data_source : Dataset object
The Dataset object from which the plotting data is taken.
x_field : yt field
Field to plot on x-axis.
y_field : yt field
Field to plot on y-axis.
z_field : yt field
Field to plot on z-axis.
plot_type : string
Type of plot to make (e.g., SlicePlot).
plot_kwargs : dict
Any keywords to be passed when creating the plot.
"""
if plot_type is None:
raise RuntimeError('Must explicitly request a plot type')
cls = getattr(profile_plotter, plot_type, None)
if cls is None:
cls = getattr(particle_plots, plot_type)
plot = cls(*(data_source, x_field, y_field, z_field), **plot_kwargs)
return plot
|
# Url of sso cas server
CAS_SERVER = "https://websso.example.com"
# This is probably fine
CAS_LOGOUT_URL = "/cas/logout"
# The name of your cookie
CAS_COOKIE = "CAS"
# The top level that the cookie should be stored
COOKIE_PATH = '/'
# Duration of cookie before revalidating to cas server
TIMEOUT = 600
# Your cookie encryption key
# Change and uncomment this to remove errors
# SECRET = "SUPER_SECRET_PASSPHRASE"
# Store sessions in non https connections
# WARNING: this makes session hijacking silly easy. PLS set this to False for production use
ALLOW_HTTP = True
# Turn on debug messages in logfiles
DEBUG = 1
# Configure Beaker session type
# http://beaker.readthedocs.io/en/latest/configuration.html
# File is default to allow multiple instances of beaker to share cached data
BEAKER_TYPE = 'file'
# Specifies Beaker data location
BEAKER_DATA_DIR = '/tmp/beaker/data'
# Specifies Beaker lock location
BEAKER_LOCK_DIR = '/tmp/beaker/lock'
|
import numpy as np
from numpy.testing import assert_allclose
import pytest
from ..kernels import KERNELS
kernel_types = sorted(KERNELS.keys())
@pytest.mark.parametrize('kernel', kernel_types)
@pytest.mark.parametrize('sigma', [2, 3])
@pytest.mark.parametrize('n', [1000, 2000])
@pytest.mark.parametrize('m', [50, 100])
def test_kernel_fft(kernel, sigma, n, m):
kernel = KERNELS[kernel]
x = np.linspace(-0.5, 0.5, n, endpoint=False)
f = kernel.phi(x, n, m, sigma)
k = -(n // 2) + np.arange(n)
f_hat = kernel.phi_hat(k, n, m, sigma)
f_fft = (1. / n) * np.fft.fftshift(np.fft.fft(np.fft.fftshift(f)))
assert_allclose(f_hat, f_fft, atol=1E-12)
@pytest.mark.parametrize('kernel', kernel_types)
@pytest.mark.parametrize('sigma', [2, 3])
def test_kernel_m_C(kernel, sigma):
kernel = KERNELS[kernel]
m = np.arange(1, 100)
C = kernel.C(m, sigma)
m2 = kernel.m_from_C(C, sigma).astype(int)
assert_allclose(m, m2, atol=1) # atol=1 for float->int rounding errors
|
from decimal import Decimal
from deepdiff import DeepDiff
class TestDiffMath:
def test_math_diff(self):
"""Testing for the correct setting and usage of epsilon."""
d1 = {"a": Decimal("3.5")}
d2 = {"a": Decimal("4")}
ep = 0.5
res = DeepDiff(d1, d2, math_epsilon=ep)
assert res == {}
d1 = {"a": Decimal("2.5")}
d2 = {"a": Decimal("3")}
ep = 0.5
res = DeepDiff(d1, d2, math_epsilon=ep)
assert res == {}
d1 = {"a": Decimal("2.5")}
d2 = {"a": Decimal("2")}
ep = 0.5
res = DeepDiff(d1, d2, math_epsilon=ep)
assert res == {}
d1 = {"a": Decimal("7.175")}
d2 = {"a": Decimal("7.174")}
ep = 0.1
res = DeepDiff(d1, d2, math_epsilon=ep)
assert res == {}
d1 = {"a": Decimal("7.175")}
d2 = {"a": Decimal("7.174")}
ep = 0.01
res = DeepDiff(d1, d2, math_epsilon=ep)
assert res == {}
d1 = {"a": Decimal("7.175")}
d2 = {"a": Decimal("7.174")}
ep = 0.001
res = DeepDiff(d1, d2, math_epsilon=ep)
assert res == {}
d1 = {"a": Decimal("7.175")}
d2 = {"a": Decimal("7.174")}
ep = 0.0001
expected = {
"values_changed": {
"root['a']": {
"new_value": Decimal("7.174"),
"old_value": Decimal("7.175"),
}
}
}
res = DeepDiff(d1, d2, math_epsilon=ep)
assert res == expected
def test_math_diff_special_case(self):
"""Testing epsilon on a special Decimal case.
Even though the Decimal looks different, math will evaluate it for us."""
d1 = {"a": Decimal("9.709999618320632")}
d2 = {"a": Decimal("9.710000038146973")}
ep = 0.001
res = DeepDiff(d1, d2, math_epsilon=ep)
assert res == {}
d1 = {"a": Decimal("9.709999618320632")}
d2 = {"a": Decimal("9.710000038146973")}
ep = 0.0001
res = DeepDiff(d1, d2, math_epsilon=ep)
assert res == {}
d1 = {"a": Decimal("9.709999618320632")}
d2 = {"a": Decimal("9.710000038146973")}
ep = 0.00001
res = DeepDiff(d1, d2, math_epsilon=ep)
assert res == {}
d1 = {"a": Decimal("9.709999618320632")}
d2 = {"a": Decimal("9.710000038146973")}
ep = 0.000001
res = DeepDiff(d1, d2, math_epsilon=ep)
assert res == {}
d1 = {"a": Decimal("9.709999618320632")}
d2 = {"a": Decimal("9.710000038146973")}
ep = 0.0000001
res = DeepDiff(d1, d2, math_epsilon=ep)
expected = {
"values_changed": {
"root['a']": {
"new_value": Decimal("9.710000038146973"),
"old_value": Decimal("9.709999618320632"),
}
}
}
assert res == expected
def test_math_diff_ignore_order(self):
"""math_close will not work with ignore_order=true.
Items are hashed if order is ignored, that will not work."""
d1 = {"a": [Decimal("9.709999618320632"), Decimal("9.709999618320632")]}
d2 = {"a": [Decimal("9.710000038146973"), Decimal("9.709999618320632")]}
ep = 0.0001
res = DeepDiff(d1, d2, ignore_order=False, math_epsilon=ep)
assert res == {}
def test_math_diff_ignore_order_warning(self, caplog):
"""math_close will not work with ignore_order=true.
Items are hashed if order is ignored, that will not work."""
d1 = {"a": [Decimal("9.709999618320632"), Decimal("9.709999618320632")]}
d2 = {"a": [Decimal("9.710000038146973"), Decimal("9.709999618320632")]}
ep = 0.0001
res = DeepDiff(d1, d2, ignore_order=True, math_epsilon=ep)
expected = {
"iterable_item_added": {"root['a'][0]": Decimal("9.710000038146973")}
}
assert res == expected
# assert "math_epsilon will be ignored." in caplog.text
|
from leapp.exceptions import StopActorExecutionError
from leapp.libraries.common.rpms import has_package
from leapp.libraries.stdlib import api
from leapp.models import InstalledRedHatSignedRPM
# The main SSHD configuration file
SSHD_CONFIG = '/etc/ssh/sshd_config'
# The include directive needed, taken from RHEL9 sshd_config with leapp comment
INCLUDE = 'Include /etc/ssh/sshd_config.d/*.conf'
INCLUDE_BLOCK = ''.join(('# Added by leapp during upgrade from RHEL8 to RHEL9\n', INCLUDE, '\n'))
def prepend_string_if_not_present(f, content, check_string):
"""
This reads the open file descriptor and checks for presense of the `check_string`.
If not present, the `content` is prepended to the original content of the file and
result is written.
Note, that this requires opened file for both reading and writing, for example with:
with open(path, r+') as f:
"""
lines = f.readlines()
for line in lines:
if line.lstrip().startswith(check_string):
# The directive is present
return
# prepend it otherwise, also with comment
f.seek(0)
f.write(''.join((content, ''.join(lines))))
def process(openssh_messages):
"""
The main logic of the actor:
* read the configuration file message
* skip if no action is needed
* package not installed
* the configuration file was not modified
* insert the include directive if it is not present yet
"""
config = next(openssh_messages, None)
if list(openssh_messages):
api.current_logger().warning('Unexpectedly received more than one OpenSshConfig message.')
if not config:
raise StopActorExecutionError(
'Could not check openssh configuration', details={'details': 'No OpenSshConfig facts found.'}
)
# If the package is not installed, there is no need to do anything
if not has_package(InstalledRedHatSignedRPM, 'openssh-server'):
return
# If the configuration file was not modified, the rpm update will bring the new
# changes by itself
if not config.modified:
return
# otherwise prepend the Include directive to the main sshd_config
api.current_logger().debug('Adding the Include directive to {}.'
.format(SSHD_CONFIG))
try:
with open(SSHD_CONFIG, 'r+') as f:
prepend_string_if_not_present(f, INCLUDE_BLOCK, INCLUDE)
except (OSError, IOError) as error:
api.current_logger().error('Failed to modify the file {}: {} '.format(SSHD_CONFIG, error))
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains evaluation plan for the Im2vox model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tensorflow import app
import model_ptn
flags = tf.app.flags
slim = tf.contrib.slim
flags.DEFINE_string('inp_dir',
'',
'Directory path containing the input data (tfrecords).')
flags.DEFINE_string(
'dataset_name', 'shapenet_chair',
'Dataset name that is to be used for training and evaluation.')
flags.DEFINE_integer('z_dim', 512, '')
flags.DEFINE_integer('f_dim', 64, '')
flags.DEFINE_integer('fc_dim', 1024, '')
flags.DEFINE_integer('num_views', 24, 'Num of viewpoints in the input data.')
flags.DEFINE_integer('image_size', 64,
'Input images dimension (pixels) - width & height.')
flags.DEFINE_integer('vox_size', 32, 'Voxel prediction dimension.')
flags.DEFINE_integer('step_size', 24, '')
flags.DEFINE_integer('batch_size', 1, 'Batch size while training.')
flags.DEFINE_float('focal_length', 0.866, '')
flags.DEFINE_float('focal_range', 1.732, '')
flags.DEFINE_string('encoder_name', 'ptn_encoder',
'Name of the encoder network being used.')
flags.DEFINE_string('decoder_name', 'ptn_vox_decoder',
'Name of the decoder network being used.')
flags.DEFINE_string('projector_name', 'ptn_projector',
'Name of the projector network being used.')
# Save options
flags.DEFINE_string('checkpoint_dir', '/tmp/ptn/eval/',
'Directory path for saving trained models and other data.')
flags.DEFINE_string('model_name', 'ptn_proj',
'Name of the model used in naming the TF job. Must be different for each run.')
flags.DEFINE_string('eval_set', 'val', 'Data partition to form evaluation on.')
# Optimization
flags.DEFINE_float('proj_weight', 10, 'Weighting factor for projection loss.')
flags.DEFINE_float('volume_weight', 0, 'Weighting factor for volume loss.')
flags.DEFINE_float('viewpoint_weight', 1,
'Weighting factor for viewpoint loss.')
flags.DEFINE_float('learning_rate', 0.0001, 'Learning rate.')
flags.DEFINE_float('weight_decay', 0.001, '')
flags.DEFINE_float('clip_gradient_norm', 0, '')
# Summary
flags.DEFINE_integer('save_summaries_secs', 15, '')
flags.DEFINE_integer('eval_interval_secs', 60 * 5, '')
# Distribution
flags.DEFINE_string('master', '', '')
FLAGS = flags.FLAGS
def main(argv=()):
del argv # Unused.
eval_dir = os.path.join(FLAGS.checkpoint_dir, FLAGS.model_name, 'train')
log_dir = os.path.join(FLAGS.checkpoint_dir, FLAGS.model_name,
'eval_%s' % FLAGS.eval_set)
if not os.path.exists(eval_dir):
os.makedirs(eval_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
g = tf.Graph()
with g.as_default():
eval_params = FLAGS
eval_params.batch_size = 1
eval_params.step_size = FLAGS.num_views
###########
## model ##
###########
model = model_ptn.model_PTN(eval_params)
##########
## data ##
##########
eval_data = model.get_inputs(
FLAGS.inp_dir,
FLAGS.dataset_name,
eval_params.eval_set,
eval_params.batch_size,
eval_params.image_size,
eval_params.vox_size,
is_training=False)
inputs = model.preprocess_with_all_views(eval_data)
##############
## model_fn ##
##############
model_fn = model.get_model_fn(is_training=False, run_projection=False)
outputs = model_fn(inputs)
#############
## metrics ##
#############
names_to_values, names_to_updates = model.get_metrics(inputs, outputs)
del names_to_values
################
## evaluation ##
################
num_batches = eval_data['num_samples']
slim.evaluation.evaluation_loop(
master=FLAGS.master,
checkpoint_dir=eval_dir,
logdir=log_dir,
num_evals=num_batches,
eval_op=names_to_updates.values(),
eval_interval_secs=FLAGS.eval_interval_secs)
if __name__ == '__main__':
app.run()
|
import numpy as np
import pandas as pd
import time
import networkx as nx
start = time.time()
def average_cost(data_price, data_dist):
"""
Calcul de la matrice du prix moyen au kilomètre.
.. warning:: Attention, les données de distances et de prix
doivent être compatibles pour pouvoir avoir des résultats cohérents.
Voir la documentation pour la :ref:`Compatibilité`.
:param DataFrame data_price: Tableau des prix.
:param DataFrame data_dist: Tableau des distances.
:return: * **M** (*array*) - Retourne la matrice des prix moyens.
"""
n = len(data_price)
p = np.array(data_price)
d = np.array(data_dist) + np.diag(np.ones(n))
return p/d
end = time.time()
print("Temps passé pour exécuter average_cost: {0:.5f} s.".format(end - start))
start = time.time()
def average_cost_list(data_price, data_dist):
"""
Calcul de la liste du prix moyen au kilomètre.
.. warning:: Attention, les données de distances et de prix
doivent être compatibles pour pouvoir avoir des résultats cohérents.
Voir la documentation pour la :ref:`Compatibilité`.
:param DataFrame data_price: Tableau des prix.
:param DataFrame data_dist: Tableau des distances.
:return: * **L** (*list*) - Retourne la partie diagonale inférieure (ou supérieure par symétrie) de la matrice des prix moyens sous forme de liste.
"""
n = len(data_price)
p = np.array(data_price)[np.triu_indices(n, k=1)]
d = np.array(data_dist)[np.triu_indices(n, k=1)]
# le np.triu_indices permet de sélectionner juste ce
# qu'il y a en dessous de la diagonale de la matrice
return p/d
end = time.time()
print("Temps passé pour exécuter average_cost_list: {0:.5f} s.".format(end - start))
start = time.time()
def get_index(data, name):
"""
Retourne la valeur de la position de name en tant qu'index.
:param DateFrame data: Un tableau de données.
:param str or list name: Nom(s) d'index du tableau de données data.
:return: * **I** (*int or list*) - Donne la/les positions(s) de name dans l'index de data.
"""
if type(name) == str:
# dans le cas où l'on veut qu'un seul index.
i = 0
while i < len(data) and name != data.index[i]:
i += 1
return i
elif type(name) == list:
# dans le cas où on veut une liste de plusieurs index.
ind = []
for j in range(len(name)):
i = 0
while i < len(data) and name[j] != data.index[i]:
i += 1
ind.append(i)
return ind
end = time.time()
print("Temps passé pour exécuter get_index: {0:.5f} s.".format(end - start))
start = time.time()
def get_way(data_dist, start, target):
"""
Renvoie une liste contenant les péages entre start et target.
Notre méthode se base sur le fait que l'algorithme de Kruskal nous
fournit un graphe du réseau routier avec seulement des arêtes
entre deux gares successives dont on déduit trivialement la liste
avec un algorithme du plus court chemin (ici Dijkstra).
:param DataFrame data_dist: Tableau de données sous forme de matrice de distances entre toutes les gares.
:param str start: La gare de départ (doit être une élément de data_dist.columns).
:param str target: La gare d'arrivée (doit être une élément de data_dist.columns).
:return: * **L** (*list*) - Liste des péages situés sur le trajet autoroutier le plus court de start à target.
"""
G = nx.Graph(incoming_graph_data=data_dist)
a = nx.minimum_spanning_tree(G, weight='weight')
return nx.shortest_path(a, start, target, weight='weight')
end = time.time()
print("Temps passé pour exécuter get_way : {0:.5f} s.".format(end - start))
|
from os import environ as env
import requests, json, datetime
from telegram import ReplyKeyboardRemove, Update
from telegram.ext import ConversationHandler, CallbackContext
from telegram import KeyboardButton, ReplyKeyboardMarkup
import pymongo
from bot import reply_markups
from libs import utils
from bot.globals import *
# Flow for expense input begins here
def newExpense (update: Update, context: CallbackContext):
"""
Initiate the expenses input flow
"""
text = ("Select a field to fill in from below, once ready, tap Submit.")
context.bot.send_message(chat_id=update.message.chat_id,
text = text,
reply_markup = reply_markups.newExpenseMarkup)
return CHOOSING
# Create new expense
# timestamp column: YYYY-MM-DD HH:MM:SS
# TODO: Use /done to navigate back to new()
# FIXME: deal with incorrect input
def timestamp(update: Update, context: CallbackContext):
context.user_data['currentExpCat'] = "Timestamp"
# Find out the local time and date
utc_datetime = datetime.datetime.utcnow()
local_datetime = (utc_datetime + datetime.timedelta(hours=UTC_OFFSET)).strftime("%Y-%m-%d %H:%M:%S")
context.user_data['input'][context.user_data['currentExpCat']] = local_datetime
text = ("Using '"+local_datetime+"' as your "+context.user_data['currentExpCat']+" value."
+"\n"
+"\nType /done to proceed "
+"\nor type in how long ago the expense occured in the format"
+"\n'x duration' for example, 1 hour, 6 days, 10 weeks."
+"\nOr /cancel to choose other options "
+"\nOr /home to return to Main Menu")
markup = ReplyKeyboardRemove()
context.bot.send_message(chat_id=update.message.chat_id,
text = text,
reply_markup = markup)
return TYPING_REPLY
# Create new expense
# description column
# TODO: Add feature: set how many months back to look
# TODO: Add bot message just before the query to state how far back we are looking
# TODO: Add a check to see if there is sufficient data depending on number of descr to query
# TODO: For each of the top ten descriptions, attach the most common amount
def description(update: Update, context: CallbackContext):
chat_ID = update.message.chat_id
#get the local time
dt0 = datetime.datetime.utcnow()+ datetime.timedelta(hours=UTC_OFFSET)
# get the date from 3 months back from now
for _ in range(3): dt0 = utils.subtract_one_month(dt0)
date = dt0.strftime("%Y-%m-%d %H:%M:%S")[:10] #only the date, not
# date = '2019-02-01'
utils.logger.debug("START DATE: "+date)
top_descr = []
# send a get request to obtain top ten results in a json
try:
context.bot.sendChatAction(chat_id=chat_ID, action='Typing')
r = requests.get(env.get("URL_SORTDESC"),
params={'chat_id':chat_ID,'date':date})
response = r.json()
if response['Success'] is not True: # some error
text = ("Failed!"
+"\nComment: " +response['Comment']
+"\nError: "+response['Error']+".")
else: # no errors
# append the top ten descriptions to the reply markup list
# for descr in response['Data']:
# top_descr.append([KeyboardButton(descr['Description'])])
top_descr = [[KeyboardButton(descr['Description'])] for descr in response['Data']]
reply_markup = ReplyKeyboardMarkup(top_descr, resize_keyboard=True)
text = ("Select a description from below or type in the description. Or /cancel to return to choose other options."
+"\nOr /home to return to Main Menu")
except Exception as e:
text = ("Something went wrong."
+"\n"
+"\nNo connection to the db server."
+"\n"
+"Type in the description. Or /cancel to return to choose other options."
+"\nOr /home to return to Main Menu")
utils.logger.error("failed to select description with error: "+repr(e))
reply_markup = ReplyKeyboardRemove()
context.user_data['currentExpCat'] = "Description"
context.bot.send_message(chat_id=chat_ID,
text = text,
reply_markup = reply_markup)
return TYPING_REPLY
# Create new expense
# category column
# TODO: Consider using userdata saved categories
def category(update: Update, context: CallbackContext):
categories = []
chat_ID = update.message.chat_id
# get the categories
try:
context.bot.sendChatAction(chat_id=chat_ID, action='Typing')
r = requests.get(env.get("URL_CATEGORIES"),
params={'chat_id':chat_ID})
response = r.json()
if response['Success'] is not True: # some error
text = ("Failed!"
+"\nComment: " +response['Comment']
+"\nError: "+response['Error']+".")
else: # no errors
# append the categories to the reply markup list
# for category in response['Data']:
# categories.append([KeyboardButton(category['Category'])])
categories = [[KeyboardButton(category['Category'])] for category in response['Data']]
reply_markup = ReplyKeyboardMarkup(categories, resize_keyboard=True)
text = ("Select a category from below or type in the category. Or /cancel to return to choose other options."
+"\nOr /home to return to Main Menu")
except Exception as e:
text = ("Something went wrong."
+"\n"
+"\nNo connection to the db server."
+"\n"
+"Type in the category. Or /cancel to choose other options."
+"\nOr /home to return to Main Menu")
utils.logger.error("failed to select category with error: "+repr(e))
reply_markup = ReplyKeyboardRemove()
context.user_data['currentExpCat'] = "Category" #update the check for most recently updated field
context.bot.send_message(chat_id=chat_ID,
text = text,
reply_markup = reply_markup)
return TYPING_REPLY
# Create new expense
# proof column
# TODO: Accept image. Call API to upload in multiform
def proof(update: Update, context: CallbackContext):
context.user_data['currentExpCat'] = "Proof"
text = ("Type in the proof. Or /cancel to choose other options."
+"\nOr /home to return to Main Menu")
context.bot.send_message(chat_id=update.message.chat_id,
text = text,
reply_markup = ReplyKeyboardRemove())
return TYPING_REPLY
# Create new expense
# Amount column
# TODO: Add keys of most common amounts
def amount(update: Update, context: CallbackContext):
context.user_data['currentExpCat'] = "Amount"
text = ("Type in the amount. Or /cancel to return to choose other options."
+"\nOr /home to return to Main Menu")
context.bot.send_message(chat_id=update.message.chat_id,
text = text,
reply_markup = ReplyKeyboardRemove())
return TYPING_REPLY
# Create new expense
# confirmation of entered value
def verifyValue(update: Update, context: CallbackContext):
"""
Verify various inputs to proceed
"""
data = update.message.text #grab the reply text
# if the timestamp was just set sort the input
if (context.user_data['currentExpCat'] == 'Timestamp'):
try: #if datetime object can be obtained from input. expected to raise exception
datetime.datetime.strptime(data,"%Y-%m-%d %H:%M:%S")
except ValueError: #time passed given
s = update.message.text.split() #split on space
X = int(s[0])
s = s[1].lower() #the keyword. Lowercase for standardization
#get current datetime
dt0 = (datetime.datetime.utcnow()+ datetime.timedelta(hours=3))
#go back in time. Filter 's' from keywords if present
if (s.replace('s','') == 'hour'): dt1 = dt0 - datetime.timedelta(seconds=X*3600)
if (s.replace('s','') == 'day'): dt1 = dt0 - datetime.timedelta(days=X)
if (s.replace('s','') == 'week'): dt1 = dt0 - datetime.timedelta(days=X*7)
data = dt1.strftime("%Y-%m-%d %H:%M:%S") #get string format
#parse to relevant key
context.user_data['input'][context.user_data['currentExpCat']] = data
text = ("Received '"+data+"' as your "+context.user_data['currentExpCat']+" value."
+"\n"
+"\nType /done to proceed or type in a different value to change the "
+context.user_data['currentExpCat']+" value."
+"\nOr /cancel to choose other options ")
markup = ReplyKeyboardRemove()
#If amount was just entered, provide summary of values and update 'text' var
if (context.user_data['currentExpCat'] == 'Amount'):
text = ("Received '"+data+"' as your "+context.user_data['currentExpCat']+" value."
+"\nCurrent entries: "
+"\n{}".format(utils.convertJson(context.user_data['input']))
+"\n"
+"\nType /submit to post or type in a different value to change the "
+context.user_data['currentExpCat']+" value."
+"\nOr /cancel to Choose other entries to change")
context.bot.send_message(chat_id=update.message.chat_id,
text = text,
reply_markup = markup)
return TYPING_REPLY
# Create new expense
# display and allow other field selection
def nextExpenseField(update: Update, context: CallbackContext):
"""
Display keyboards to select other input categories
"""
# Choose relevant reply markup
markup = context.user_data['markups'][context.user_data['currentExpCat']]
text = ("Great! Choose next option to populate."
+"\nOr if done, tap Submit to post.")
context.bot.send_message(chat_id=update.message.chat_id,
text = text,
reply_markup = markup)
return CHOOSING
# Create new expense
# post values to provided endpoint to update the db
# TODO: On successful submit, for the relevant budget limit, display value and
# if no threshold set, ask if user wants to set the limits
def postExpense(update: Update, context: CallbackContext):
chat_ID = update.message.chat_id
# Check for empty fields. Timestamp, Amount, Category has to be filled always
required_inputs = ['Amount','Timestamp','Category']
if all([context.user_data['input'][key] for key in required_inputs]):
# if all([inputs['Amount'], context.user_data['input']['Timestamp'], context.user_data['input']['Category']):
# Initiate the POST. If successfull, you will get a primary key value
# and a Success bool as True
try:
context.bot.sendChatAction(chat_id=chat_ID, action='Typing')
r = requests.post(env.get("URL_POST_EXPENSE"),
json={ "timestamp":context.user_data['input']['Timestamp'],
"description":context.user_data['input']['Description'],
"proof":context.user_data['input']['Proof'],
"amount":context.user_data['input']['Amount'],
"category":context.user_data['input']['Category']
},
params={'chat_id':chat_ID})
utils.logger.debug('request: %s', r.url)
response = r.json()
utils.logger.debug("POST response: "+repr(response))
if response['Success'] is not True: # some error
text = ("Failed!"
+"\nComment: " +response['Comment']
+"\nError: "+response['Error']['Message']+".")
else: # no errors
# empty the fields
context.user_data['input']['Timestamp'] = []
context.user_data['input']['Description'] = []
context.user_data['input']['Proof'] = []
context.user_data['input']['Category'] = []
context.user_data['input']['Amount'] = []
text = ("Expense recorded! Expense id is: "+str(response['Data']['id'])
+"\nPlease select an option from below.")
except Exception as e:
text = ("Something went wrong."
+"\n"
+"\nNo connection to the server.")
utils.logger.error("Post failed with error: "+repr(e))
else: # fields empty or amount empty
text = ("Please complete filling in the fields.")
context.bot.send_message(chat_id=chat_ID,
text = text,
reply_markup = reply_markups.newExpenseMarkup)
return CHOOSING
# TODO: Add pipeline to accept proof as images and post to API |
from . import dcgan
from . import dqn
from . import mobilenetv2
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
class GlobalParams:
PARALLELS = 5
LOG_LEVEL = logging.INFO
class GlobalPaths:
MARO_GRASS_LIB = '~/.maro/lib/grass'
MARO_K8S_LIB = '~/.maro/lib/k8s'
MARO_CLUSTERS = '~/.maro/clusters'
MARO_DATA = '~/.maro/data'
|
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_http_methods
@login_required
@require_http_methods(['PUT'])
def set(request, target):
"""Sets user's goal.
Arguments:
target: target in ['week', 'month', 'year']
"""
objective = request.PUT.get('objective')
value = request.PUT.get('value')
if objective not in ['distance', 'time']:
return JsonResponse({'success': False})
try:
goals = request.user.goals
setattr(goals, '{}ly_{}'.format(target, objective), int(value))
goals.save()
except Exception:
return JsonResponse({'success': False})
return JsonResponse({'success': True})
|
import asyncio
import datetime
import logging
import subprocess
import json
from copy import deepcopy
from typing import Optional
from hashlib import sha1
from dateutil.tz import tzutc
from botocore import UNSIGNED
from botocore.config import Config
import botocore.compat
from botocore.credentials import EnvProvider, Credentials, RefreshableCredentials, \
ReadOnlyCredentials, ContainerProvider, ContainerMetadataFetcher, \
_parse_if_needed, InstanceMetadataProvider, _get_client_creator, \
ProfileProviderBuilder, ConfigProvider, SharedCredentialProvider, \
ProcessProvider, AssumeRoleWithWebIdentityProvider, _local_now, \
CachedCredentialFetcher, _serialize_if_needed, BaseAssumeRoleCredentialFetcher, \
AssumeRoleProvider, AssumeRoleCredentialFetcher, CredentialResolver, \
CanonicalNameCredentialSourcer, BotoProvider, OriginalEC2Provider, \
SSOProvider
from botocore.exceptions import UnauthorizedSSOTokenError
from botocore.exceptions import MetadataRetrievalError, CredentialRetrievalError, \
InvalidConfigError, PartialCredentialsError, RefreshWithMFAUnsupportedError, \
UnknownCredentialError
from botocore.compat import compat_shell_split
from botocore.utils import SSOTokenLoader
from aiobotocore.utils import AioContainerMetadataFetcher, AioInstanceMetadataFetcher
from aiobotocore.config import AioConfig
logger = logging.getLogger(__name__)
def create_credential_resolver(session, cache=None, region_name=None):
"""Create a default credential resolver.
This creates a pre-configured credential resolver
that includes the default lookup chain for
credentials.
"""
profile_name = session.get_config_variable('profile') or 'default'
metadata_timeout = session.get_config_variable('metadata_service_timeout')
num_attempts = session.get_config_variable('metadata_service_num_attempts')
disable_env_vars = session.instance_variables().get('profile') is not None
imds_config = {
'ec2_metadata_service_endpoint': session.get_config_variable(
'ec2_metadata_service_endpoint'),
'imds_use_ipv6': session.get_config_variable('imds_use_ipv6')
}
if cache is None:
cache = {}
env_provider = AioEnvProvider()
container_provider = AioContainerProvider()
instance_metadata_provider = AioInstanceMetadataProvider(
iam_role_fetcher=AioInstanceMetadataFetcher(
timeout=metadata_timeout,
num_attempts=num_attempts,
user_agent=session.user_agent(),
config=imds_config)
)
profile_provider_builder = AioProfileProviderBuilder(
session, cache=cache, region_name=region_name)
assume_role_provider = AioAssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=_get_client_creator(session, region_name),
cache=cache,
profile_name=profile_name,
credential_sourcer=AioCanonicalNameCredentialSourcer([
env_provider, container_provider, instance_metadata_provider
]),
profile_provider_builder=profile_provider_builder,
)
pre_profile = [
env_provider,
assume_role_provider,
]
profile_providers = profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
post_profile = [
AioOriginalEC2Provider(),
AioBotoProvider(),
container_provider,
instance_metadata_provider,
]
providers = pre_profile + profile_providers + post_profile
if disable_env_vars:
# An explicitly provided profile will negate an EnvProvider.
# We will defer to providers that understand the "profile"
# concept to retrieve credentials.
# The one edge case if is all three values are provided via
# env vars:
# export AWS_ACCESS_KEY_ID=foo
# export AWS_SECRET_ACCESS_KEY=bar
# export AWS_PROFILE=baz
# Then, just like our client() calls, the explicit credentials
# will take precedence.
#
# This precedence is enforced by leaving the EnvProvider in the chain.
# This means that the only way a "profile" would win is if the
# EnvProvider does not return credentials, which is what we want
# in this scenario.
providers.remove(env_provider)
logger.debug('Skipping environment variable credential check'
' because profile name was explicitly set.')
resolver = AioCredentialResolver(providers=providers)
return resolver
class AioProfileProviderBuilder(ProfileProviderBuilder):
def _create_process_provider(self, profile_name):
return AioProcessProvider(
profile_name=profile_name,
load_config=lambda: self._session.full_config,
)
def _create_shared_credential_provider(self, profile_name):
credential_file = self._session.get_config_variable('credentials_file')
return AioSharedCredentialProvider(
profile_name=profile_name,
creds_filename=credential_file,
)
def _create_config_provider(self, profile_name):
config_file = self._session.get_config_variable('config_file')
return AioConfigProvider(
profile_name=profile_name,
config_filename=config_file,
)
def _create_web_identity_provider(self, profile_name, disable_env_vars):
return AioAssumeRoleWithWebIdentityProvider(
load_config=lambda: self._session.full_config,
client_creator=_get_client_creator(
self._session, self._region_name),
cache=self._cache,
profile_name=profile_name,
disable_env_vars=disable_env_vars,
)
def _create_sso_provider(self, profile_name):
return AioSSOProvider(
load_config=lambda: self._session.full_config,
client_creator=self._session.create_client,
profile_name=profile_name,
cache=self._cache,
token_cache=self._sso_token_cache,
)
async def get_credentials(session):
resolver = create_credential_resolver(session)
return await resolver.load_credentials()
def create_assume_role_refresher(client, params):
async def refresh():
async with client as sts:
response = await sts.assume_role(**params)
credentials = response['Credentials']
# We need to normalize the credential names to
# the values expected by the refresh creds.
return {
'access_key': credentials['AccessKeyId'],
'secret_key': credentials['SecretAccessKey'],
'token': credentials['SessionToken'],
'expiry_time': _serialize_if_needed(credentials['Expiration']),
}
return refresh
def create_aio_mfa_serial_refresher(actual_refresh):
class _Refresher(object):
def __init__(self, refresh):
self._refresh = refresh
self._has_been_called = False
async def call(self):
if self._has_been_called:
# We can explore an option in the future to support
# reprompting for MFA, but for now we just error out
# when the temp creds expire.
raise RefreshWithMFAUnsupportedError()
self._has_been_called = True
return await self._refresh()
return _Refresher(actual_refresh).call
class AioCredentials(Credentials):
async def get_frozen_credentials(self):
return ReadOnlyCredentials(self.access_key,
self.secret_key,
self.token)
@classmethod
def from_credentials(cls, obj: Optional[Credentials]):
if obj is None:
return None
return cls(
obj.access_key, obj.secret_key,
obj.token, obj.method)
class AioRefreshableCredentials(RefreshableCredentials):
def __init__(self, *args, **kwargs):
super(AioRefreshableCredentials, self).__init__(*args, **kwargs)
self._refresh_lock = asyncio.Lock()
@classmethod
def from_refreshable_credentials(cls, obj: Optional[RefreshableCredentials]):
if obj is None:
return None
return cls( # Using interval values here to skip property calling .refresh()
obj._access_key, obj._secret_key,
obj._token, obj._expiry_time,
obj._refresh_using, obj.method,
obj._time_fetcher
)
# Redeclaring the properties so it doesnt call refresh
# Have to redeclare setter as we're overriding the getter
@property
def access_key(self):
# TODO: this needs to be resolved
raise NotImplementedError("missing call to self._refresh. "
"Use get_frozen_credentials instead")
return self._access_key
@access_key.setter
def access_key(self, value):
self._access_key = value
@property
def secret_key(self):
# TODO: this needs to be resolved
raise NotImplementedError("missing call to self._refresh. "
"Use get_frozen_credentials instead")
return self._secret_key
@secret_key.setter
def secret_key(self, value):
self._secret_key = value
@property
def token(self):
# TODO: this needs to be resolved
raise NotImplementedError("missing call to self._refresh. "
"Use get_frozen_credentials instead")
return self._token
@token.setter
def token(self, value):
self._token = value
async def _refresh(self):
if not self.refresh_needed(self._advisory_refresh_timeout):
return
# By this point we need a refresh but its not critical
if not self._refresh_lock.locked():
async with self._refresh_lock:
if not self.refresh_needed(self._advisory_refresh_timeout):
return
is_mandatory_refresh = self.refresh_needed(
self._mandatory_refresh_timeout)
await self._protected_refresh(is_mandatory=is_mandatory_refresh)
return
elif self.refresh_needed(self._mandatory_refresh_timeout):
# If we're here, we absolutely need a refresh and the
# lock is held so wait for it
async with self._refresh_lock:
# Might have refreshed by now
if not self.refresh_needed(self._mandatory_refresh_timeout):
return
await self._protected_refresh(is_mandatory=True)
async def _protected_refresh(self, is_mandatory):
try:
metadata = await self._refresh_using()
except Exception:
period_name = 'mandatory' if is_mandatory else 'advisory'
logger.warning("Refreshing temporary credentials failed "
"during %s refresh period.",
period_name, exc_info=True)
if is_mandatory:
# If this is a mandatory refresh, then
# all errors that occur when we attempt to refresh
# credentials are propagated back to the user.
raise
# Otherwise we'll just return.
# The end result will be that we'll use the current
# set of temporary credentials we have.
return
self._set_from_data(metadata)
self._frozen_credentials = ReadOnlyCredentials(
self._access_key, self._secret_key, self._token)
if self._is_expired():
msg = ("Credentials were refreshed, but the "
"refreshed credentials are still expired.")
logger.warning(msg)
raise RuntimeError(msg)
async def get_frozen_credentials(self):
await self._refresh()
return self._frozen_credentials
class AioDeferredRefreshableCredentials(AioRefreshableCredentials):
def __init__(self, refresh_using, method, time_fetcher=_local_now):
self._refresh_using = refresh_using
self._access_key = None
self._secret_key = None
self._token = None
self._expiry_time = None
self._time_fetcher = time_fetcher
self._refresh_lock = asyncio.Lock()
self.method = method
self._frozen_credentials = None
def refresh_needed(self, refresh_in=None):
if self._frozen_credentials is None:
return True
return super(AioDeferredRefreshableCredentials, self).refresh_needed(
refresh_in
)
class AioCachedCredentialFetcher(CachedCredentialFetcher):
async def _get_credentials(self):
raise NotImplementedError('_get_credentials()')
async def fetch_credentials(self):
return await self._get_cached_credentials()
async def _get_cached_credentials(self):
"""Get up-to-date credentials.
This will check the cache for up-to-date credentials, calling assume
role if none are available.
"""
response = self._load_from_cache()
if response is None:
response = await self._get_credentials()
self._write_to_cache(response)
else:
logger.debug("Credentials for role retrieved from cache.")
creds = response['Credentials']
expiration = _serialize_if_needed(creds['Expiration'], iso=True)
return {
'access_key': creds['AccessKeyId'],
'secret_key': creds['SecretAccessKey'],
'token': creds['SessionToken'],
'expiry_time': expiration,
}
class AioBaseAssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher,
AioCachedCredentialFetcher):
pass
class AioAssumeRoleCredentialFetcher(AssumeRoleCredentialFetcher,
AioBaseAssumeRoleCredentialFetcher):
async def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
client = await self._create_client()
async with client as sts:
return await sts.assume_role(**kwargs)
async def _create_client(self):
"""Create an STS client using the source credentials."""
frozen_credentials = await self._source_credentials.get_frozen_credentials()
return self._client_creator(
'sts',
aws_access_key_id=frozen_credentials.access_key,
aws_secret_access_key=frozen_credentials.secret_key,
aws_session_token=frozen_credentials.token,
)
class AioAssumeRoleWithWebIdentityCredentialFetcher(
AioBaseAssumeRoleCredentialFetcher
):
def __init__(self, client_creator, web_identity_token_loader, role_arn,
extra_args=None, cache=None, expiry_window_seconds=None):
self._web_identity_token_loader = web_identity_token_loader
super(AioAssumeRoleWithWebIdentityCredentialFetcher, self).__init__(
client_creator, role_arn, extra_args=extra_args,
cache=cache, expiry_window_seconds=expiry_window_seconds
)
async def _get_credentials(self):
"""Get credentials by calling assume role."""
kwargs = self._assume_role_kwargs()
# Assume role with web identity does not require credentials other than
# the token, explicitly configure the client to not sign requests.
config = AioConfig(signature_version=UNSIGNED)
async with self._client_creator('sts', config=config) as client:
return await client.assume_role_with_web_identity(**kwargs)
def _assume_role_kwargs(self):
"""Get the arguments for assume role based on current configuration."""
assume_role_kwargs = deepcopy(self._assume_kwargs)
identity_token = self._web_identity_token_loader()
assume_role_kwargs['WebIdentityToken'] = identity_token
return assume_role_kwargs
class AioProcessProvider(ProcessProvider):
def __init__(self, *args, popen=asyncio.create_subprocess_exec, **kwargs):
super(AioProcessProvider, self).__init__(*args, **kwargs, popen=popen)
async def load(self):
credential_process = self._credential_process
if credential_process is None:
return
creds_dict = await self._retrieve_credentials_using(credential_process)
if creds_dict.get('expiry_time') is not None:
return AioRefreshableCredentials.create_from_metadata(
creds_dict,
lambda: self._retrieve_credentials_using(credential_process),
self.METHOD
)
return AioCredentials(
access_key=creds_dict['access_key'],
secret_key=creds_dict['secret_key'],
token=creds_dict.get('token'),
method=self.METHOD
)
async def _retrieve_credentials_using(self, credential_process):
# We're not using shell=True, so we need to pass the
# command and all arguments as a list.
process_list = compat_shell_split(credential_process)
p = await self._popen(*process_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = await p.communicate()
if p.returncode != 0:
raise CredentialRetrievalError(
provider=self.METHOD, error_msg=stderr.decode('utf-8'))
parsed = botocore.compat.json.loads(stdout.decode('utf-8'))
version = parsed.get('Version', '<Version key not provided>')
if version != 1:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg=("Unsupported version '%s' for credential process "
"provider, supported versions: 1" % version))
try:
return {
'access_key': parsed['AccessKeyId'],
'secret_key': parsed['SecretAccessKey'],
'token': parsed.get('SessionToken'),
'expiry_time': parsed.get('Expiration'),
}
except KeyError as e:
raise CredentialRetrievalError(
provider=self.METHOD,
error_msg="Missing required key in response: %s" % e
)
class AioInstanceMetadataProvider(InstanceMetadataProvider):
async def load(self):
fetcher = self._role_fetcher
metadata = await fetcher.retrieve_iam_role_credentials()
if not metadata:
return None
logger.debug('Found credentials from IAM Role: %s',
metadata['role_name'])
creds = AioRefreshableCredentials.create_from_metadata(
metadata,
method=self.METHOD,
refresh_using=fetcher.retrieve_iam_role_credentials,
)
return creds
class AioEnvProvider(EnvProvider):
async def load(self):
# It gets credentials from an env var,
# so just convert the response to Aio variants
result = super().load()
if isinstance(result, RefreshableCredentials):
return AioRefreshableCredentials.\
from_refreshable_credentials(result)
elif isinstance(result, Credentials):
return AioCredentials.from_credentials(result)
return None
class AioOriginalEC2Provider(OriginalEC2Provider):
async def load(self):
result = super(AioOriginalEC2Provider, self).load()
if isinstance(result, Credentials):
result = AioCredentials.from_credentials(result)
return result
class AioSharedCredentialProvider(SharedCredentialProvider):
async def load(self):
result = super(AioSharedCredentialProvider, self).load()
if isinstance(result, Credentials):
result = AioCredentials.from_credentials(result)
return result
class AioConfigProvider(ConfigProvider):
async def load(self):
result = super(AioConfigProvider, self).load()
if isinstance(result, Credentials):
result = AioCredentials.from_credentials(result)
return result
class AioBotoProvider(BotoProvider):
async def load(self):
result = super(AioBotoProvider, self).load()
if isinstance(result, Credentials):
result = AioCredentials.from_credentials(result)
return result
class AioAssumeRoleProvider(AssumeRoleProvider):
async def load(self):
self._loaded_config = self._load_config()
profiles = self._loaded_config.get('profiles', {})
profile = profiles.get(self._profile_name, {})
if self._has_assume_role_config_vars(profile):
return await self._load_creds_via_assume_role(self._profile_name)
async def _load_creds_via_assume_role(self, profile_name):
role_config = self._get_role_config(profile_name)
source_credentials = await self._resolve_source_credentials(
role_config, profile_name
)
extra_args = {}
role_session_name = role_config.get('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
external_id = role_config.get('external_id')
if external_id is not None:
extra_args['ExternalId'] = external_id
mfa_serial = role_config.get('mfa_serial')
if mfa_serial is not None:
extra_args['SerialNumber'] = mfa_serial
duration_seconds = role_config.get('duration_seconds')
if duration_seconds is not None:
extra_args['DurationSeconds'] = duration_seconds
fetcher = AioAssumeRoleCredentialFetcher(
client_creator=self._client_creator,
source_credentials=source_credentials,
role_arn=role_config['role_arn'],
extra_args=extra_args,
mfa_prompter=self._prompter,
cache=self.cache,
)
refresher = fetcher.fetch_credentials
if mfa_serial is not None:
refresher = create_aio_mfa_serial_refresher(refresher)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return AioDeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=refresher,
time_fetcher=_local_now
)
async def _resolve_source_credentials(self, role_config, profile_name):
credential_source = role_config.get('credential_source')
if credential_source is not None:
return await self._resolve_credentials_from_source(
credential_source, profile_name
)
source_profile = role_config['source_profile']
self._visited_profiles.append(source_profile)
return await self._resolve_credentials_from_profile(source_profile)
async def _resolve_credentials_from_profile(self, profile_name):
profiles = self._loaded_config.get('profiles', {})
profile = profiles[profile_name]
if self._has_static_credentials(profile) and \
not self._profile_provider_builder:
return self._resolve_static_credentials_from_profile(profile)
elif self._has_static_credentials(profile) or \
not self._has_assume_role_config_vars(profile):
profile_providers = self._profile_provider_builder.providers(
profile_name=profile_name,
disable_env_vars=True,
)
profile_chain = AioCredentialResolver(profile_providers)
credentials = await profile_chain.load_credentials()
if credentials is None:
error_message = (
'The source profile "%s" must have credentials.'
)
raise InvalidConfigError(
error_msg=error_message % profile_name,
)
return credentials
return self._load_creds_via_assume_role(profile_name)
def _resolve_static_credentials_from_profile(self, profile):
try:
return AioCredentials(
access_key=profile['aws_access_key_id'],
secret_key=profile['aws_secret_access_key'],
token=profile.get('aws_session_token')
)
except KeyError as e:
raise PartialCredentialsError(
provider=self.METHOD, cred_var=str(e))
async def _resolve_credentials_from_source(self, credential_source,
profile_name):
credentials = await self._credential_sourcer.source_credentials(
credential_source)
if credentials is None:
raise CredentialRetrievalError(
provider=credential_source,
error_msg=(
'No credentials found in credential_source referenced '
'in profile %s' % profile_name
)
)
return credentials
class AioAssumeRoleWithWebIdentityProvider(AssumeRoleWithWebIdentityProvider):
async def load(self):
return await self._assume_role_with_web_identity()
async def _assume_role_with_web_identity(self):
token_path = self._get_config('web_identity_token_file')
if not token_path:
return None
token_loader = self._token_loader_cls(token_path)
role_arn = self._get_config('role_arn')
if not role_arn:
error_msg = (
'The provided profile or the current environment is '
'configured to assume role with web identity but has no '
'role ARN configured. Ensure that the profile has the role_arn'
'configuration set or the AWS_ROLE_ARN env var is set.'
)
raise InvalidConfigError(error_msg=error_msg)
extra_args = {}
role_session_name = self._get_config('role_session_name')
if role_session_name is not None:
extra_args['RoleSessionName'] = role_session_name
fetcher = AioAssumeRoleWithWebIdentityCredentialFetcher(
client_creator=self._client_creator,
web_identity_token_loader=token_loader,
role_arn=role_arn,
extra_args=extra_args,
cache=self.cache,
)
# The initial credentials are empty and the expiration time is set
# to now so that we can delay the call to assume role until it is
# strictly needed.
return AioDeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=fetcher.fetch_credentials,
)
class AioCanonicalNameCredentialSourcer(CanonicalNameCredentialSourcer):
async def source_credentials(self, source_name):
"""Loads source credentials based on the provided configuration.
:type source_name: str
:param source_name: The value of credential_source in the config
file. This is the canonical name of the credential provider.
:rtype: Credentials
"""
source = self._get_provider(source_name)
if isinstance(source, AioCredentialResolver):
return await source.load_credentials()
return await source.load()
def _get_provider(self, canonical_name):
"""Return a credential provider by its canonical name.
:type canonical_name: str
:param canonical_name: The canonical name of the provider.
:raises UnknownCredentialError: Raised if no
credential provider by the provided name
is found.
"""
provider = self._get_provider_by_canonical_name(canonical_name)
# The AssumeRole provider should really be part of the SharedConfig
# provider rather than being its own thing, but it is not. It is
# effectively part of both the SharedConfig provider and the
# SharedCredentials provider now due to the way it behaves.
# Therefore if we want either of those providers we should return
# the AssumeRole provider with it.
if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']:
assume_role_provider = self._get_provider_by_method('assume-role')
if assume_role_provider is not None:
# The SharedConfig or SharedCredentials provider may not be
# present if it was removed for some reason, but the
# AssumeRole provider could still be present. In that case,
# return the assume role provider by itself.
if provider is None:
return assume_role_provider
# If both are present, return them both as a
# CredentialResolver so that calling code can treat them as
# a single entity.
return AioCredentialResolver([assume_role_provider, provider])
if provider is None:
raise UnknownCredentialError(name=canonical_name)
return provider
class AioContainerProvider(ContainerProvider):
def __init__(self, *args, **kwargs):
super(AioContainerProvider, self).__init__(*args, **kwargs)
# This will always run if no fetcher arg is provided
if isinstance(self._fetcher, ContainerMetadataFetcher):
self._fetcher = AioContainerMetadataFetcher()
async def load(self):
if self.ENV_VAR in self._environ or self.ENV_VAR_FULL in self._environ:
return await self._retrieve_or_fail()
async def _retrieve_or_fail(self):
if self._provided_relative_uri():
full_uri = self._fetcher.full_url(self._environ[self.ENV_VAR])
else:
full_uri = self._environ[self.ENV_VAR_FULL]
headers = self._build_headers()
fetcher = self._create_fetcher(full_uri, headers)
creds = await fetcher()
return AioRefreshableCredentials(
access_key=creds['access_key'],
secret_key=creds['secret_key'],
token=creds['token'],
method=self.METHOD,
expiry_time=_parse_if_needed(creds['expiry_time']),
refresh_using=fetcher,
)
def _create_fetcher(self, full_uri, headers):
async def fetch_creds():
try:
response = await self._fetcher.retrieve_full_uri(
full_uri, headers=headers)
except MetadataRetrievalError as e:
logger.debug("Error retrieving container metadata: %s", e,
exc_info=True)
raise CredentialRetrievalError(provider=self.METHOD,
error_msg=str(e))
return {
'access_key': response['AccessKeyId'],
'secret_key': response['SecretAccessKey'],
'token': response['Token'],
'expiry_time': response['Expiration'],
}
return fetch_creds
class AioCredentialResolver(CredentialResolver):
async def load_credentials(self):
"""
Goes through the credentials chain, returning the first ``Credentials``
that could be loaded.
"""
# First provider to return a non-None response wins.
for provider in self.providers:
logger.debug("Looking for credentials via: %s", provider.METHOD)
creds = await provider.load()
if creds is not None:
return creds
# If we got here, no credentials could be found.
# This feels like it should be an exception, but historically, ``None``
# is returned.
#
# +1
# -js
return None
class AioSSOCredentialFetcher(AioCachedCredentialFetcher):
_UTC_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, start_url, sso_region, role_name, account_id,
client_creator, token_loader=None, cache=None,
expiry_window_seconds=None):
self._client_creator = client_creator
self._sso_region = sso_region
self._role_name = role_name
self._account_id = account_id
self._start_url = start_url
self._token_loader = token_loader
super(AioSSOCredentialFetcher, self).__init__(
cache, expiry_window_seconds
)
def _create_cache_key(self):
args = {
'startUrl': self._start_url,
'roleName': self._role_name,
'accountId': self._account_id,
}
args = json.dumps(args, sort_keys=True, separators=(',', ':'))
argument_hash = sha1(args.encode('utf-8')).hexdigest()
return self._make_file_safe(argument_hash)
def _parse_timestamp(self, timestamp_ms):
# fromtimestamp expects seconds so: milliseconds / 1000 = seconds
timestamp_seconds = timestamp_ms / 1000.0
timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc())
return timestamp.strftime(self._UTC_DATE_FORMAT)
async def _get_credentials(self):
"""Get credentials by calling SSO get role credentials."""
config = Config(
signature_version=UNSIGNED,
region_name=self._sso_region,
)
async with self._client_creator('sso', config=config) as client:
kwargs = {
'roleName': self._role_name,
'accountId': self._account_id,
'accessToken': self._token_loader(self._start_url),
}
try:
response = await client.get_role_credentials(**kwargs)
except client.exceptions.UnauthorizedException:
raise UnauthorizedSSOTokenError()
credentials = response['roleCredentials']
credentials = {
'ProviderType': 'sso',
'Credentials': {
'AccessKeyId': credentials['accessKeyId'],
'SecretAccessKey': credentials['secretAccessKey'],
'SessionToken': credentials['sessionToken'],
'Expiration': self._parse_timestamp(credentials['expiration']),
}
}
return credentials
class AioSSOProvider(SSOProvider):
async def load(self):
sso_config = self._load_sso_config()
if not sso_config:
return None
sso_fetcher = AioSSOCredentialFetcher(
sso_config['sso_start_url'],
sso_config['sso_region'],
sso_config['sso_role_name'],
sso_config['sso_account_id'],
self._client_creator,
token_loader=SSOTokenLoader(cache=self._token_cache),
cache=self.cache,
)
return AioDeferredRefreshableCredentials(
method=self.METHOD,
refresh_using=sso_fetcher.fetch_credentials,
)
|
import asyncio
import json
from base64 import b64decode
from base64 import b64encode
from datetime import datetime
from time import perf_counter
from typing import Awaitable
from typing import Callable
from typing import Coroutine
from typing import Optional
from uuid import uuid4
import websockets
from aiohttp import ClientSession
from aiohttp import DummyCookieJar
from websockets import WebSocketClientProtocol
from websockets.exceptions import ConnectionClosed
from ttun.pubsub import PubSub
from ttun.types import Config
from ttun.types import RequestData
from ttun.types import ResponseData
class Client:
def __init__(self, port: int, server: str, subdomain: str = None):
self.port = port
self.server = server
self.subdomain = subdomain
self.config: Optional[Config] = None
self.connection: WebSocketClientProtocol = None
async def send(self, data: dict):
await self.connection.send(json.dumps(data))
async def receive(self) -> dict:
return json.loads(await self.connection.recv())
@staticmethod
def loop(sleep: int = None):
async def wrapper(callback: Callable[[], Coroutine]):
while True:
try:
await callback()
if sleep is not None:
await asyncio.sleep(sleep)
except ConnectionClosed:
break
return wrapper
async def connect(self) -> WebSocketClientProtocol:
self.connection = await websockets.connect(f"{self.server}/tunnel/")
await self.send({"subdomain": self.subdomain})
self.config = await self.receive()
if self.connection.open:
return self.connection
async def handle_messages(self):
while True:
try:
request: RequestData = await self.receive()
await self.proxyRequest(
request=request, on_response=lambda response: self.send(response)
)
except ConnectionClosed:
break
async def proxyRequest(
self,
request: RequestData,
on_response: Callable[[ResponseData], Awaitable] = None,
):
async with ClientSession(cookie_jar=DummyCookieJar()) as session:
request_id = uuid4()
await PubSub.publish(
{
"type": "request",
"payload": {
"id": request_id.hex,
"timestamp": datetime.now().isoformat(),
**request,
},
}
)
start = perf_counter()
response = await session.request(
method=request["method"],
url=f'http://localhost:{self.port}{request["path"]}',
headers=request["headers"],
data=b64decode(request["body"].encode()),
allow_redirects=False,
)
end = perf_counter()
response_data = ResponseData(
status=response.status,
headers=[
(key, value)
for key, value in response.headers.items()
if key.lower() not in ["transfer-encoding", "content-encoding"]
],
body=b64encode(await response.read()).decode(),
)
if on_response is not None:
await on_response(response_data)
await PubSub.publish(
{
"type": "response",
"payload": {
"id": request_id.hex,
"timing": end - start,
**response_data,
},
}
)
|
import unittest
import time
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from PageObjectModel.Pages.Buttons import Buttons
from PageObjectModel.Pages.Home import HomePage
class TestButtons(unittest.TestCase):
def setUp(self) -> None:
self.driver = webdriver.Chrome(ChromeDriverManager().install())
# self.driver = webdriver.Chrome("C:/Users/serbd/Desktop/GitHub/ITFProjects/Resources/chromedriver.exe")
self.driver.get("http://formy-project.herokuapp.com/")
def test_buttons(self):
home = HomePage(self.driver)
buttons = Buttons(self.driver)
time.sleep(1)
home.click_on_buttons()
time.sleep(2)
buttons.click_primary()
time.sleep(1)
buttons.click_success()
time.sleep(1)
buttons.click_middle()
time.sleep(1)
buttons.click_dropdown()
time.sleep(1)
buttons.click_drlink2()
time.sleep(1)
def tearDown(self) -> None:
self.driver.quit()
|
# Generated by Django 3.2.5 on 2021-08-04 10:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('artists', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='artist',
old_name='seeking_talent',
new_name='seeking_venue',
),
]
|
import numpy as np
from keras.utils import Sequence
import data_preprocessing
from PIL import Image
class BatchGenerator(Sequence):
'''
generate batch of normalized, randomly transformed images
'''
def __init__(self, data, config):
'''
Args:
data : dictionary ("filename": list of rects)
config : paarmeters for data generator as dictionary
'''
self.data = data
self.filenames = list(self.data.keys())
self.is_augment = config.get('is_augment', True)
self.batch_size = config.get('batch_size', 32)
self.grid_w = config.get('grid_w', 7)
self.grid_h = config.get('grid_h', 7)
self.img_w = config.get('img_w', 224) # width of the image for NN input
self.img_h = config.get('img_h', 224)
self.shift_x = config.get('shift_x', 0.1)
self.shift_y = config.get('shift_y', 0.1)
self.flip = data_preprocessing.RandomHorizontalFlip()
self.shift = data_preprocessing.RandomShift(self.shift_x, self.shift_y)
if self.is_augment:
print("data will be augmented with random flip and random left-right shift on " +
str(self.shift_x * 100.0) + " percents and top-bottom shift on " +
str(self.shift_y * 100.0) + " percents.")
print("iteration_num = ", self.__len__())
def filter_rects(self, rects_aug, width, height):
'''
filter removes rectangles which centers are outside the image
and cut rectangles which go out the image
'''
# import pdb
# pdb.set_trace()
rects_aug_filtered = []
for rect in rects_aug:
x_c = rect[0] + rect[2] / 2.0
y_c = rect[1] + rect[3] / 2.0
x_max = rect[0] + rect[2]
y_max = rect[1] + rect[3]
w = width
h = height
if x_c < w and x_c > 0 and y_c < h and y_c > 0: # at least half of image must be on image
x_min = np.maximum(0, rect[0])
y_min = np.maximum(0, rect[1])
x_max = np.minimum(w-1, x_max)
y_max = np.minimum(h-1, y_max)
rects_aug_filtered += [np.array([x_min, y_min, x_max - x_min, y_max - y_min])]
return rects_aug_filtered
# input : normalized rects (x_c, y_c, w, h), each value in [0,1]
# output : labels for YOLO loss function, rect in each grid cell in relative to the cell coordinates
def convert_GT_to_YOLO(self, rects):
y_YOLO = np.zeros((self.grid_h, self.grid_w, 4 + 1))
for rect in rects:
center_x = rect[0] * self.grid_w
center_y = rect[1] * self.grid_h
grid_x = int(np.floor(center_x))
grid_y = int(np.floor(center_y))
center_x -= grid_x
center_y -= grid_y
center_w = rect[2] * self.grid_w
center_h = rect[3] * self.grid_h
y_YOLO[grid_y, grid_x, :] = np.array([1, center_x, center_y, center_w, center_h])
return y_YOLO
def __len__(self):
return int(np.ceil(float(len(self.data)/self.batch_size)))
def __getitem__(self, idx):
indices = np.random.choice(len(self.data), self.batch_size, replace=False)
return self.get_XY(indices)
def get_XY(self, indices):
x_batch = np.zeros((len(indices), self.img_h, self.img_w, 3)) # input images
y_batch = np.zeros((len(indices), self.grid_h, self.grid_w, 4+1)) # desired network output
for i, index in enumerate(indices):
image, rects = self.get_image_rects(index)
image_norm, rects_norm = data_preprocessing.normalize_data(image, self.img_w, self.img_h, rects)
x_batch[i] = image_norm
y_YOLO = self.convert_GT_to_YOLO(rects_norm)
y_batch[i] = y_YOLO
return x_batch, y_batch
def get_image_rects(self, index):
filename = self.filenames[index]
# augment input image and fix object's position and size (before normalization)
image = np.array(Image.open(filename))
rects = self.data[filename]
if self.is_augment:
# augment input image and fix object's rectangle
height, width = image.shape[:2]
image, rects = self.flip(image, rects)
image, rects = self.shift(image, rects)
rects = self.filter_rects(rects, width, height)
return image, rects
def on_epoch_end(self):
print("epoch is finished")
# np.random.shuffle(self.data.keys)
|
#!/usr/bin/env python
# /* -*- indent-tabs-mode:t; tab-width: 8; c-basic-offset: 8 -*- */
# /*
# Copyright (c) 2014, Daniel M. Lofaro
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# */
# from ctypes import *
from ctypes import Structure,c_uint16,c_double,c_ubyte,c_uint32,c_int16
# import ach
# import sys
DEF_1 = 0
DEF_2 = 1
DEF_3 = 2
CONTROLLER_REF_NAME = 'ike-coordinates-chan'
class CONTROLLER_REF(Structure):
_pack_ = 1
_fields_ = [("x", c_double),
("y", c_double),
("z", c_double)]
#class HUBO_REF(Structure):
# _pack_ = 1
# _fields_ = [("ref", c_double*HUBO_JOINT_COUNT),
# ("mode", c_int16*HUBO_JOINT_COUNT),
# ("comply", c_ubyte*HUBO_JOINT_COUNT)]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains functions and classes related with MotionBuilder takes
"""
from __future__ import print_function, division, absolute_import
import pyfbsdk
def get_current_anim_take_name():
"""
Returns the name of the current take
:return: str, name of the current take
"""
current_take = pyfbsdk.FBSystem().CurrentTake
take_name = None
if current_take:
take_name = pyfbsdk.FBSystem().CurrentTake.Name
return take_name
|
import sms_sdk_renderer_python.lib.sms_sdk_renderer as SmsRenderer
info_data = {
"title": 'Informaiton Title',
"content": 'This is a information message',
"description": 'Information message description'
}
print(SmsRenderer.renderInBot(info_data, SmsRenderer.smsTypes['INFORMATION']))
|
#!/usr/bin/env python
import fileinput
import subprocess
import json
import os
import re
last = {
'artist': False,
'title': False
}
data = {}
def clean(str):
str = str.decode('utf-8')
str = re.sub('[\[].*?[\]]', '', str)
str = re.sub('[\(].*?[\)]', '', str)
str = re.sub('\s+', ' ', str).strip()
return str.encode('utf-8')
process = subprocess.Popen(['/home/pi/bin/pipe_music_meta'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
while True:
line = process.stdout.readline()
if line != '':
#print "test:", line.rstrip()
line = line.replace('"', '').rstrip()
if line.startswith('Artist'):
data['artist'] = clean(line.replace('Artist: ', '')[:-1])
if line.startswith('Title'):
data['title'] = clean(line.replace('Title: ', '')[:-1])
#print data
if 'artist' in data and 'title' in data:
if data['artist'] != last['artist'] or data['title'] != last['title']:
#print "Data Changed"
last['artist'] = data['artist']
last['title'] = data['title']
#print data
f = open('/home/pi/api/html/music.json', 'w')
f.write(json.dumps(data, indent=4, sort_keys=True))
f.close()
else:
break
|
from datetime import timedelta
import pytest
from django.core.management import call_command
from django.utils import timezone
from magiclink.models import MagicLink
@pytest.mark.django_db
def test_magiclink_clear_logins():
# Valid Magic Links
for index in range(2):
MagicLink.objects.create(
email='test@example.com',
token='fake',
expiry=timezone.now(),
redirect_url='',
)
# Magic Links which expired 2 weeks ago so should be removed
two_weeks_ago = timezone.now() - timedelta(days=14)
for index in range(2):
MagicLink.objects.create(
email='test@example.com',
token='fake',
expiry=two_weeks_ago,
redirect_url='',
)
# Disabled Magic Links which should be removed
for index in range(2):
magic_link = MagicLink.objects.create(
email='test@example.com',
token='fake',
expiry=timezone.now(),
redirect_url='',
)
magic_link.disable()
call_command('magiclink_clear_logins')
all_magiclinks = MagicLink.objects.all()
assert all_magiclinks.count() == 2
for link in all_magiclinks:
assert not link.disabled
assert link.expiry > timezone.now() - timedelta(days=6)
|
"""argon.py"""
# Constant-temperature constant-pressure MD simulation of Argon
from MMTK import *
from MMTK.ForceFields import LennardJonesForceField
from MMTK.Environment import NoseThermostat, AndersenBarostat
from MMTK.Trajectory import Trajectory, TrajectoryOutput, LogOutput
from MMTK.Dynamics import VelocityVerletIntegrator, Velocity Scaler, \
TranslationRemover, BarostatReset
import string
from Scientific.IO.TextFile import TextFile
# Open the config file and read box size
conf_file = TextFile('argon.conf.gz')
lx, ly, lz = map(string.atof, string.split(conf_file.readline()))
# Construct periodic universe using Lennard-Jones (noble gas) force field
# with cutoff of 15 Angstroms
universe = OrthorhombicPeriodicUniverse((lx*Units.Ang, ly*Units.Ang, lz*Units.Ang),
LennardJonesForceField(15.*Units.Ang))
# Read the atom positions and construct the atoms
while 1:
line = conf_file.readline()
if not line:
break
x, y, z, = map(string.atof, string.split(line))
universe.addObject(Atom('Ar', position=Vector(x*Units.Ang, y*Units.Ang, z*Units.Ang)))
# Define thermodynamic parameters
temperature = 94.4*Units.K
pressure = 1.*Units.atm
# Add thermostat and barostat
universe.thermostat = NoseThermostat(temperature)
universe.barostat = AndersenBarostat(pressure)
# Initialise velocities
universe.initializeVelocitiesToTemperature(temperature)
# Create trajectory and integrator
trajectory = Trajectory(universe, "argon_npt.nc", "w", "Argon NPT test")
integrator = VelocityVerletIntegrator(universe, delta_t=10*Units.fs)
# Periodical actions for trajectory output and text log output
output_actions = [TrajectoryOutput(trajectory, ('configuration', 'energy', 'thermodynamic',
'time', 'auxiliary'), 0, None, 20),
LogOutput("argon.log", ('time', 'energy'), 0, None, 100)]
# Do some equilibration steps, rescaling velocities and resetting barostat in regular intervals
integrator(steps = 2000, actions = [TranslationRemover(0, None, 100),
VelocityScaler(temperature, 0.1*temperature, 0, None, 100),
BarostatReset(100)] + output_actions)
# Do some "production" steps
integrator(steps = 2000, actions = [TranslationRemover(0, None, 100)] + output_actions)
# Close trajectory
trajectory.close() |
#!/usr/bin/env python3
import os
from jupyterhub_client import JupyterHubClient
import fire
from deploy_util import get_config
def _create_hub_client():
c = get_config()
hub_api_token = c["HUB_CLIENT_API_TOKEN"]
hub_api_url = "http://{}:{}/hub/api".format(
"127.0.0.1",
c["HUB_API_PORT"]
)
return JupyterHubClient(token=hub_api_token, url=hub_api_url)
hub_client = _create_hub_client()
if __name__ == "__main__":
fire.Fire(hub_client)
|
import itertools
import algo_2_count
import d_SVM
import numpy as np
import funcs
import warnings
import pandas as pd
warnings.filterwarnings("ignore")
# 获取原始数据
data_raw = d_SVM.dataDigitize("data/adult_new.csv")
# 将特征fnlwgt离散为5个维度
data_raw['fnlwgt'] = pd.cut(data_raw['fnlwgt'], 5)
# data, labels, names = funcs.data_clean(data_raw)
adjustment_f = ['hours-per-week', 'education-num', 'fnlwgt', 'age']
data1 = data_raw[adjustment_f]
data2 = data_raw.drop(adjustment_f, axis=1)
best_f = list((data2.drop('lable', axis=1)).columns)
label = 'lable'
# 根据调整特征系数对调整特征集的特征进行组合,生成备选方案
def partition_c(adjustment_features):
# 两两组合,返回备选方案c
c_raw = [list(i) for i in itertools.combinations(adjustment_features, 2)]
for x in c_raw:
x.append(label)
return c_raw
# 生成备选方案c
backup_solutions = partition_c(adjustment_f)
# 计算信息增益
def gain_info(feature_name):
# 合并后数据集Dp的初始信息熵
info_entropy_dp = cal_info_entropy(data_raw)
# 根据特征名称获取特征取值的count
count_a_feature = data_raw[feature_name].value_counts()
# 根据特征取值划分子集
subset = [data_raw.loc[data_raw[feature_name] == value] for value in count_a_feature._index]
# 计算各子集的信息熵
subset_entropy = [cal_info_entropy(x) for x in subset]
feature_chance = np.array([x / np.sum(count_a_feature) for x in count_a_feature])
# 计算信息增益
return info_entropy_dp - np.matmul(feature_chance, subset_entropy)
# 计算信息熵
def cal_info_entropy(data, data_label="lable"):
# 计算标签取值count情况
label_data = data[data_label].value_counts()
# 计算标签取值概率
chance = [x / np.sum(label_data) for x in label_data]
# 计算信息熵
info_entropy = -(np.sum(np.fromiter((x * np.log2(x) for x in chance), float)))
return info_entropy
# 计算单个特征的信息增益的字典
def gain_info_initialize(adjustment_features):
single_feature_gain_info = dict(zip(adjustment_features, map(lambda x: gain_info(x), adjustment_features)))
single_feature_gain_info[label] = 0
return single_feature_gain_info
gain_info_dict = gain_info_initialize(adjustment_f)
def u1_initialize(c):
# 将备选方案中所有子列表转元组,因为list是unhashable
tuple_c = tuple(tuple(x) for x in c)
# 备选方案的信息增益为 b 个特征增益的和
sum_gain_info = []
for x in c:
sum_gain_info.append(np.sum(gain_info_dict[feature] for feature in x))
return dict(zip(tuple_c, sum_gain_info))
u1_dict = u1_initialize(backup_solutions)
# 效用函数1
def utility_function1(feature_names):
return u1_dict[tuple(feature_names)]
# n方数据集平均相关度
def get_mcd(subdatas):
return np.mean([funcs.cs(x)['CS_mean'] for x in subdatas])
mcd = get_mcd(funcs.split(data_raw, 3))
# 计算u2字典
def u2_initialize(c):
# 将备选方案中所有子列表转元组,因为list是unhashable
tuple_c = tuple(tuple(x) for x in c)
return dict(zip(tuple_c, map(lambda x: funcs.cs(data_raw[x], mcd)['CS_i'], c)))
# 生成u2字典
u2_dict = u2_initialize(backup_solutions)
# 效用函数2
def utility_function2(feature_names):
return mcd / u2_dict[tuple(feature_names)]
# 使用效用函数1的指数机制1
def exponential_mechanism1(privacy_budget, feature_names, delta_u=1):
return np.exp(privacy_budget * utility_function1(feature_names) / (2 * delta_u))
# 使用指数机制1的概率
def chance_exp1(privacy_budget, c, ci):
exp_sum = np.sum(exponential_mechanism1(privacy_budget, x) for x in c)
return exponential_mechanism1(privacy_budget, ci) / exp_sum
# 使用效用函数2的指数机制
def exponential_mechanism2(privacy_budget, feature_names, delta_u=1):
return np.exp(privacy_budget * utility_function2(feature_names) / (2 * delta_u))
# 使用指数机制2的概率
def chance_exp2(privacy_budget, c, ci):
exp_sum = np.sum(exponential_mechanism2(privacy_budget, x) for x in c)
return exponential_mechanism2(privacy_budget, ci) / exp_sum
def select_ci1(privacy_budget, c, best_features):
# 根据效用函数1选出的ci
selected_features1 = sorted(zip(map(lambda x: chance_exp1(privacy_budget, c, x), c), c), reverse=True)
# 将选出的最佳ci加入best features
return best_features + list(selected_features1[0])[1]
def select_ci2(privacy_budget, c, best_features):
# 根据效用函数2选出的ci
selected_features2 = sorted(zip(map(lambda x: chance_exp2(privacy_budget, c, x), c), c), reverse=True)
# 将选出的最佳ci加入best features
return best_features + list(selected_features2[0])[1]
# 打印lyc要的cs
def print_cs(name, data, threshold=0.5):
print(name + ':(阈值为 %s )' % threshold)
print(data.columns)
print(funcs.cs(data, threshold))
def mae1(privacy_budget):
best_and_selected1 = select_ci1(1, backup_solutions, best_f)
# print_cs('best+u1选出来的特征', data_raw[best_and_selected1])
# print_cs('best+u1选出来的特征', data_raw[best_and_selected1], float(mcd))
u1 = algo_2_count.noise_count_error(data_raw[best_and_selected1],
funcs.cs(data_raw[best_and_selected1], mcd)['CS_i'],
privacy_budget)
# cs,阈值为默认0.5
mae_cs = algo_2_count.noise_count_error(data_raw[best_and_selected1],
funcs.cs(data_raw[best_and_selected1])['CS_i'],
privacy_budget)
# GS,阈值为默认0.5
mae_gs = algo_2_count.noise_count_error(data_raw[best_and_selected1],
funcs.cs(data_raw[best_and_selected1])['GS'],
privacy_budget)
print('u1' + str(u1))
print('u1cs' + str(mae_cs))
print('u1gs' + str(mae_gs))
return u1, mae_cs, mae_gs
def mae2(privacy_budget):
best_and_selected2 = select_ci2(1, backup_solutions, best_f)
# print_cs('best+u2选出来的特征', data_raw[best_and_selected2])
# print_cs('best+u2选出来的特征', data_raw[best_and_selected2], float(mcd))
# 我们的算法,使用mcd为阈值
u2 = algo_2_count.noise_count_error(data_raw[best_and_selected2],
funcs.cs(data_raw[best_and_selected2], mcd)['CS_i'],
privacy_budget)
# cs,阈值为默认0.5
mae_cs = algo_2_count.noise_count_error(data_raw[best_and_selected2],
funcs.cs(data_raw[best_and_selected2])['CS_i'],
privacy_budget)
# GS,阈值为默认0.5
mae_gs = algo_2_count.noise_count_error(data_raw[best_and_selected2],
funcs.cs(data_raw[best_and_selected2])['GS'],
privacy_budget)
print('u2' + str(u2))
print('u2cs' + str(mae_cs))
print('u2gs' + str(mae_gs))
return u2, mae_cs, mae_gs
if __name__ == '__main__':
# print_cs('所有特征', data_raw)
# print_cs('所有特征', data_raw, float(mcd))
#
# mae1(1)
# mae2(1)
print(mcd)
|
import os
print("Enter Text in \"\"\" <Text> \"\"\" Format")
Text = input()
print("Enter File name (Do not Enter .txt)")
FileName = input()
FileName += ".txt"
os.chdir("G:\\Python DB")
FileLocation = os.path.join(os.path.abspath("."),FileName)
FileOBJ = open(FileLocation , "w")
FileOBJ.write(Text)
FileOBJ.close()
|
from podb import DB
from threading import Thread
from multiprocessing import Queue
import unittest
from time import time, sleep
from datetime import datetime
from copy import deepcopy
from tests import TestObject
db = DB("test")
class DBTestMethods(unittest.TestCase):
def test_insert(self):
print("test_insert")
to = TestObject.random()
b = db.size()
db.insert(to)
a = db.size()
self.assertGreater(a, b, "DB entry count did not increase")
def test_find(self):
print("test_find")
to = TestObject.random()
self.assertTrue(db.insert(to), "insert was not successful")
self.assertGreater(db.size(), 0, "database count is still 0")
ti = db.find_one({"name": to.name})
self.assertIsNotNone(ti, "find_one returned None")
self.assertEqual(ti.uuid, to.uuid, "Inserted and retrieved object uuids are not the same")
def test_find_by_uuid(self):
print("test_find_by_uuid")
to = TestObject.random()
db.insert(to)
ti = db.find_by_uuid(to.uuid)
self.assertIsNotNone(ti, "find_by_uuid returned None")
self.assertEqual(to.uuid, ti.uuid, "Inserted and retrieved object uuids are not the same")
def test_update(self):
print("test_update")
t0 = TestObject.random()
db.insert(t0)
t1 = deepcopy(t0)
t0.age *= 2
t0.size *= 2
self.assertGreater(t0.age, t1.age, "age is not greater than before")
self.assertGreater(t0.size, t1.size, "size is not greater than before")
self.assertTrue(db.update(t0), "update failed")
del t0
t0 = db.find_by_uuid(t1.uuid)
self.assertIsNotNone(t0, "find_by_uuid returned None")
self.assertGreater(t0.size, t1.size, "not greater than original objects age")
self.assertGreater(t0.age, t1.age, "not greater than original objects age")
def test_timings(self):
db.drop()
print("test_timings")
avg_queue = Queue()
def f(q: Queue, n=200):
avg = 0
for _ in range(n):
start = time()
db.insert(TestObject.random())
stop = time()
d = (stop - start)
avg += d
avg /= n
q.put(avg)
t = []
for _ in range(4):
t.append(Thread(target=f, args=[avg_queue]))
for _ in t:
_.start()
for _ in t:
_.join()
while not avg_queue.empty():
avg_result = avg_queue.get()
self.assertLess(avg_result, 0.2, "processing time is lesser than 0.2 seconds")
self.assertEqual(db.size(), 800, "db has size of {0} not 800".format(db.size()))
def test_get_after(self):
print("test_get_after")
now = datetime.now()
sleep(1)
t0 = TestObject.random()
t1 = TestObject.random()
db.insert_many([t0, t1])
r = db.find_after(now)
self.assertIsNotNone(r, "get_after returned something")
if __name__ == '__main__':
db.drop()
unittest.main()
|
"""Unit test package for platemate."""
|
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from apps.web.models import Competition, CompetitionParticipant, ParticipantStatus
from apps.teams.models import Team, TeamStatus, get_user_team, TeamMembership
User = get_user_model()
class CompetitionTeamsTests(TestCase):
def setUp(self):
self.creator = User.objects.create(email='test@user.com', username='testuser')
self.creator.set_password('test')
self.creator.save()
self.competition = Competition.objects.create(creator=self.creator, modified_by=self.creator)
# Not sure why this status doesn't exist?
self.part_status = ParticipantStatus.objects.create(
name='Approved',
codename=ParticipantStatus.APPROVED,
description='Approved',
)
self.creator_part = CompetitionParticipant.objects.create(
user=self.creator,
competition=self.competition,
status=self.part_status,
reason="Creator"
)
def test_organizer_is_member_after_creating_team(self):
self.client.login(username='testuser', password='test')
resp = self.client.post(
reverse('team_new', kwargs={
'competition_pk': self.competition.pk
}),
{'name': "Test Team", 'description': "A team for automated tests!", 'allow_requests': False}
)
new_team = Team.objects.first()
new_team.status = TeamStatus.objects.get(codename=TeamStatus.APPROVED)
new_team.save()
assert resp.status_code == 302
assert resp.url == 'http://testserver/teams/1/'
assert get_user_team(self.creator_part, self.competition) != None
|
# This macro provides an example to extract the joints of a program.
# The joints extracted take into account the rounding effect.
from robodk.robolink import * # API to communicate with RoboDK
import sys
import os
# Set to False to have a more customized output as dictated by this script
FAST_SAVE = False
# Start the RoboDK API:
RDK = Robolink()
# Get the robot and the program available in the open station:
#robot = RDK.Item('', ITEM_TYPE_ROBOT_ARM)
# Option one, retrieve joint list as a matrix (not through a file):
prog = RDK.ItemUserPick('Select a Program', ITEM_TYPE_PROGRAM)
if not prog.Valid():
print("No program selected")
quit()
robot = prog.getLink(ITEM_TYPE_ROBOT)
ndofs = len(robot.Joints().list())
path_table = RDK.getParam("PATH_OPENSTATION") + "/" + prog.Name() + "-Table-"
path_table_csv = path_table + "1.csv"
count = 1
while os.path.isfile(path_table_csv) and count < 20:
count = count + 1
path_table_csv = path_table + str(count) + ".csv"
# Define the way we want to output the list of joints
Position = 1 # Only provide the joint position and XYZ values
Speed = 2 # Calculate speed (added to position)
SpeedAndAcceleration = 3 # Calculate speed and acceleration (added to position)
TimeBased = 4 # Make the calculation time-based (adds a time stamp added to the previous options)
TimeBasedFast = 5 # Make the calculation time-based and avoids calculating speeds and accelerations (adds a time stamp added to the previous options)
FLAGS = TimeBasedFast
STEP_MM = 0.01 # Step in mm
STEP_DEG = 0.01 # Step in deg
TIME_STEP = 0.01 # time step in seconds
if FAST_SAVE:
FLAGS = TimeBased
error_msg, joint_list, error_code = prog.InstructionListJoints(STEP_MM, STEP_DEG, path_table_csv, flags=FLAGS, time_step=TIME_STEP)
print(error_msg)
quit()
# If not FAST_SAVE
FLAGS = TimeBasedFast
error_msg, joint_list, error_code = prog.InstructionListJoints(STEP_MM, STEP_DEG, None, flags=FLAGS, time_step=TIME_STEP)
print(error_msg)
def diff(j1, j2, dt, dofs):
"""Returns True if joints 1 and joints 2 are different"""
if j2 is None or dt <= 0:
return [0] * dofs
res = []
for j1, j2 in zip(j1, j2):
res.append((j1 - j2) / dt)
return res
joints_last = None
speeds_last = None
t_last = None
print("Saving joints to file: " + path_table_csv)
with open(path_table_csv, 'w') as fid:
joints_header = ",".join(["Joint J" + str(i + 1) for i in range(ndofs)])
speeds_header = ",".join(["Speed J" + str(i + 1) for i in range(ndofs)])
accel_header = ",".join(["Accel J" + str(i + 1) for i in range(ndofs)])
t_now = 0
fid.write("Time (s)," + joints_header + ",,Error,Step (mm), Step (deg), Move ID,,Time (s)," + speeds_header + ",,Time (s)," + accel_header)
fid.write("\n")
for line in joint_list:
joints = line[:ndofs]
error = line[ndofs]
step_mm = line[ndofs + 1]
step_deg = line[ndofs + 2]
move_id = line[ndofs + 3]
t_delta = line[ndofs + 4]
t_now += t_delta
# Miscelaneous: Error,Step (mm), Step (deg), Move ID
misc_str = "%.1f,%.3f,%.3f,%.0f" % (error, step_mm, step_deg, move_id)
# Calculate speeds
speeds = diff(joints, joints_last, t_delta, ndofs)
# Calcualte accelerations
accels = diff(speeds, speeds_last, t_delta, ndofs)
print('Time +S: %.3f s' % t_now)
joints_str = ",".join(["%.6f" % x for x in joints])
speeds_str = ",".join(["%.6f" % x for x in speeds])
accels_str = ",".join(["%.6f" % x for x in accels])
time_str = ("%.6f," % t_now)
fid.write(time_str + joints_str + ",," + misc_str + ",," + time_str + speeds_str + ",," + time_str + accels_str)
fid.write("\n")
t_last = t_now
joints_last = joints
speeds_last = speeds
print(error_msg)
|
"""
You are given a 2d grid of `"1"`s and `"0"`s that represents a "map". The
`"1"`s represent land and the `"0"s` represent water.
You need to write a function that, given a "map" as an argument, counts the
number of islands. Islands are defined as adjacent pieces of land that are
connected horizontally or vertically. You can also assume that the edges of the
map are surrounded by water.
Example 1:
Input: grid = [
["1","1","1","1","0"],
["1","1","0","1","0"],
["1","1","0","0","0"],
["0","0","0","0","0"]
]
Output: 1
Example 2:
Input: grid = [
["1","1","0","0","0"],
["1","1","0","0","0"],
["0","0","1","0","0"],
["0","0","0","1","1"]
]
Output: 3
"""
def numIslands(grid):
# Your code here
|
"""
This module is to define all application level events in one place
to avoid attempting to remember string event names
"""
class EventsMeta(type):
"""Class that defines what events are exposed at the bot level"""
@property
def on_example(self):
"""
This is an example event for the Example Service, this should not be
used under any circumstances
Args:
None
"""
return 'on_example'
@property
def on_guild_message_received(self):
"""
Published whenever a message is sent in a server
Args:
message (Message) – The deleted message.
"""
return '_on_guild_message_received'
@property
def on_dm_message_received(self):
"""
Published whenever a direct message is sent to ClemBot
Args:
message (Message) – The deleted message.
"""
return '_on_dm_message_received'
@property
def on_raw_message_edit(self):
"""
Published when a Message receives an update event and is not in the cache
Args:
payload (Edit Object) – The edit payload with the id of the edited message
"""
return '_on_raw_message_edit'
@property
def on_message_edit(self):
"""
Published when a Message receives an update event and is in the cache
Args:
before (Message) – The previous version of the message.
after (Message) – The current version of the message.
"""
return '_on_message_edit'
@property
def on_raw_message_delete(self):
"""
Published when a Message receives an update event and is not in the cache
Args:
payload (Edit Object) – The delete payload with the id of the edited message
"""
return '_on_raw_message_delete'
@property
def on_message_delete(self):
"""
Published whenever a message is deleted while it exists in the cache
Args:
message (Message) – The message that was deleted
"""
return '_on_message_delete'
def on_reaction_add(self):
"""
Published whenever a reaction is sent in a server, and that message is stored
in d.pys internal cache
Args:
reaction (Reaction) – The current state of the reaction.
user (Union[Member, User]) – The user who added the reaction.
"""
return '_on_reaction_add'
_on_raw_reaction_add = 'on_raw_reaction_add'
@property
def on_raw_reaction_add(self):
"""
Called when a message has a reaction added. regardless of cache state
Args:
payload (RawReactionActionEvent) – The raw event payload data.
"""
return '_on_raw_reaction_add'
@property
def on_reaction_remove(self):
"""
Published whenever a reaction is removed in a server, and that message is stored
in d.pys internal cache
Args:
reaction (Reaction) – The current state of the reaction.
user (Union[Member, User]) – The user who removed the reaction.
"""
return '_on_reaction_remove'
@property
def on_raw_reaction_remove(self):
"""
Called when a message has a reaction removeed. regardless of cache state
Args:
payload (RawReactionActionEvent) – The raw event payload data.
"""
return '_on_raw_reaction_remove'
@property
def on_guild_joined(self):
"""
Published whenever the bot joins new guild
Args:
guild (Guild) – The guild that was joined.
"""
return '_on_guild_joined'
@property
def on_guild_leave(self):
"""
Published whenever the bot leaves a guild
Args:
guild (Guild) – The guild that was left.
"""
return '_on_guild_leave'
@property
def on_new_guild_initialized(self):
"""
Published whenever the bot joins a new guild and that guild has been created
in the bots database. This is the event that should be used for all new guild
related services
Args:
guild (Guild) – The guild that was joined.
"""
return '_on_new_guild_initialized'
@property
def on_guild_role_create(self):
"""
published whenever a guild role is created in a guild
Args:
role (Role) – The role that was created or deleted.
"""
return '_on_guild_role_create'
@property
def on_guild_role_update(self):
"""
published whenever a guild role is updated in a guild
Args:
before (Role) – The updated role’s old info.
after (Role) – The updated role’s updated info.
"""
return '_on_guild_role_update'
@property
def on_guild_role_delete(self):
"""
published whenever a guild role is deleted in a guild
Args:
role (Role) – The role that was created or deleted.
"""
return '_on_guild_role_delete'
@property
def on_user_joined(self):
"""
Published whenever a new user joins a guild
Args:
user (User) – The user who joined or left.
"""
return '_on_user_joined'
@property
def on_user_removed(self):
"""
Published whenever a user leaves a guild
Args:
user (User) – The user who was removed or left.
"""
return '_on_user_removed'
@property
def on_user_update(self):
"""
Published whenever a user updates themselves
Args:
before (User) – The updated user’s old info.
after (User) – The updated user’s updated info.
"""
return '_on_user_update'
@property
def on_add_designated_channel(self):
"""
Published whenever a designated channel id is added to a designated channel slot
Args:
channel (Channel) the channel object that was added
"""
return '_on_add_designated_channel'
@property
def on_send_in_designated_channel(self):
"""
Published when a reqeust to send a message in a designated channel is sent
Args:
channel_type (str) The designated channel to send the message in
guild_id (int) The id of the guild to attempt to send a message in
message (union[embed, str]) the message to be sent to the channel
id [Optional] (int) Id to associate a sent dc message with sent message ids at the publish site
"""
return 'on_send_in_designated_channel'
@property
def on_designated_message_sent(self):
"""
Published when an on_send_in_designate_channel event is published with an optional id parameter,
this serves as a callback for that event to maintain seperation of concerns
Args:
dc_id (int) The id of the dc send event that was given to the dc service
message (Union[discord.Message, list[discord.Message]]) the message or the list of The messages sent in dc channels
"""
return 'on_designated_message_sent'
@property
def on_broadcast_designated_channel(self):
"""
Published when a request to broadcast a message to all registered channels
in all servers is sent
Args:
channel_type (str) The designated channel to broadcast the message to
message (union[embed, str]) the message to be sent to the channels
"""
return '_on_broadcast_designated_channel'
@property
def on_set_custom_prefix(self):
"""
Published when a new custom prefix is added in a guild
Args:
guild (discord.Guild): The guild object of the added prefix
prefix (str): The prefix to be added
"""
return 'on_set_custom_prefix'
@property
def on_assignable_role_add(self):
"""
Pulbished when a new role is marked as set to be marked as assignable
Args:
role (discord.Role) The role to mark as assignable
"""
return 'on_assignable_role_add'
@property
def on_assignable_role_remove(self):
"""
Pulbished when a role is marked as set to be removed as assignable
Args:
role (discord.Role) The role to remove as assignable
"""
return 'on_assignable_role_remove'
@property
def on_set_deletable(self):
"""
Published when a bot message is needed to be able to be deleted
Args:
messagesToDelete (List[discord.Message]) Messages to be deleted
author (discord.Member) member who called the bot
roles (str) Stores the roles needed to delete the message
"""
return '_on_set_deletable'
@property
def on_guild_channel_create(self):
"""
Published when a new text channel is created in a guild
Args:
channel (discord.TextChannel): The new channel
"""
return 'on_guild_channel_create'
@property
def on_guild_channel_delete(self):
"""
Published when a new text channel is deleted in a guild
Args:
channel (discord.TextChannel): The deleted channel
"""
return 'on_guild_channel_delete'
@property
def on_guild_channel_update(self):
"""
Published when a text channel is edited
Args:
before (discord.TextChannel): The before of the channel
after (discord.TextChannel): The after of the channel
"""
return 'on_guild_channel_update'
@property
def on_set_pageable_text(self):
"""
Published when a bot message is needed to be able to be paginate
Args:
embed_name (str): name of the embed
field_title (str): name for the field/page
pages (list[str]): a list of every page/field for the embed
author (discord.Member): member who called the bot
channel (discord.TextChannel): the channel to send the embed
timeout (int): optional arg, time(seconds) for paginate to timeout, default is 60s
"""
return 'on_set_pageable_text'
@property
def on_set_pageable_embed(self):
"""
Published when a list of embeds is needed to be able to paginate
Args:
pages (list[discord.Embed]): a list of embeds to scroll through
author (discord.Member): member who called the bot
channel (discord.TextChannel): the channel to send the embed
timeout (int): optional arg, time(seconds) for paginate to timeout, default is 60s
"""
return 'on_set_pageable_embed'
@property
def on_member_update(self):
"""
This is called when one or more of the following things change:
status
activity
nickname
roles
pending
"""
return 'on_member_update'
@property
def on_set_reminder(self):
"""
Published when a person sets a reminder
Args:
userId (int)
wait (converters.Duration)
message (str)
"""
return 'on_reminder_set'
def on_bot_mute(self):
"""
Published when a user is warned with clembot
Args:
guild (discord.Guild): Guild id where the mute happened
author (discord.Member): member who called the bot
subject (discord.Member): The moderated user
reason (Optional[str]): The reason for the mute
"""
return 'on_bot_mute'
@property
def on_bot_unmute(self):
"""
Published when a user is unmuted by clembot
Args:
guild (discord.Guild): Guild id where the unmute happened
subject (discord.Member): The moderated user
reason (Optional[str]): The reason for the unmute
"""
return 'on_bot_unmute'
@property
def on_bot_warn(self):
"""
Published when a user is warned with clembot
Args:
guild (discord.Guild): Guild id where the warn happened
author (discord.Member): member who called the bot
subject (discord.Member): The moderated user
reason (Optional[str]): The reason for the warn
"""
return 'on_bot_warn'
@property
def on_bot_ban(self):
"""
Published when a user is banned with clembot
Args:
guild (discord.Guild): Guild id where the ban happened
author (discord.Member): member who called the bot
subject (discord.Member): The moderated user
reason (Optional[str]): The reason for the ban
"""
return 'on_bot_ban'
@property
def on_member_ban(self):
"""
Published when a user is banned with clembot
Args:
guild (discord.Guild): Guild id where the ban happened
user (discord.Member): member who was banned
"""
return 'on_member_ban'
@property
def on_after_command_invoke(self):
"""
Published when after a command has successfully completed
Args:
context (commands.Context): context of the command that was invoked
"""
return 'on_after_command_invoke'
class Events(metaclass=EventsMeta):
pass
|
import sys
from typing import Sequence
import numpy as np
from pyroll.core import RollPass, Unit
from ..reporter import Reporter
from .. import utils
@Reporter.hookimpl
def sequence_plot(units: Sequence[Unit]):
"""Plot the filling ratios of all passes"""
fig, ax = utils.create_sequence_plot(units)
ax.set_ylabel(r"filling ratio $i$")
ax.set_title("Filling Ratios")
units = list(units)
if len(units) > 0:
x, y = np.transpose(
[
(index, unit.out_profile.filling_ratio)
for index, unit in enumerate(units)
if isinstance(unit, RollPass)
]
)
ax.bar(x=x, height=y, width=0.8)
return fig
Reporter.plugin_manager.register(sys.modules[__name__])
|
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include = 'object')
print (categorical_var)
numerical_var = bank.select_dtypes(include = 'number')
print (numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.drop(columns='Loan_ID')
print (banks.isnull().sum())
bank_mode = banks.mode().iloc[0]
banks.fillna(bank_mode, inplace=True)
print (banks.isnull().sum())
#code ends here
# --------------
# Code starts here
avg_loan_amount = pd.pivot_table(banks,index =['Gender', 'Married', 'Self_Employed'], aggfunc='mean')
# code ends here
# --------------
# code starts here
loan_approved_se = banks[(banks['Self_Employed'] == 'Yes') & (banks['Loan_Status'] == 'Y')]
loan_approved_nse = banks[(banks['Self_Employed'] == 'No') & (banks['Loan_Status'] == 'Y')]
percentage_se = len(loan_approved_se)/614*100;
percentage_nse = len(loan_approved_nse)/614*100;
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply(lambda x: int(x)/12 )
big_loan_term=len(loan_term[loan_term>=25])
print(big_loan_term)
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')
loan_groupby = loan_groupby[['ApplicantIncome', 'Credit_History']]
mean_values = loan_groupby.mean()
# code ends here
|
from collections.__init__ import OrderedDict
from typing import List, Optional, Tuple, Union
from .action import ActionArgsPack, ActionExecutor, ArgumentValue
from .path import NodePath
class Command:
"""Represents single command line typed in the shell"""
def __init__(self, command_name):
self.target = None
self.name = command_name
self.arguments = []
def __str__(self):
representation = " ".join(map(Command._to_string, [self.name] + self.arguments))
if self.target is None:
return representation
representation = "{}: {}".format(Command._to_string(self.target), representation)
return representation
@staticmethod
def _to_string(param):
if isinstance(param, Command):
return "{{{}}}".format(str(param))
return str(param)
class CommandInterpreter:
"""Actually executes commands on provided backend tree"""
def __init__(self, tree: ActionExecutor) -> None:
self.tree = tree
def execute(self, command: Command):
if command is None:
raise ValueError("No command to execute provided")
target_path = self._evaluate(command.target)
if target_path is None:
raise RuntimeError("No action target specified")
target_path = NodePath.cast(target_path)
action_path = NodePath.cast(self._evaluate(command.name))
arguments = list(map(self._evaluate, command.arguments))
packed_arguments = parse_argument_tree(arguments)
return self.tree.execute(target_path, action_path, packed_arguments)
def _evaluate(self, part):
if isinstance(part, Command):
return self.execute(part)
return part
def convert_token_type(token):
try:
return int(token)
except ValueError:
pass
try:
return float(token)
except ValueError:
pass
return token
def tokenize(text: str) -> List[str]:
tokens = []
tok = ""
def finish_token():
nonlocal tok, tokens
if tok:
tok = convert_token_type(tok)
tokens.append(tok)
tok = ""
verbatim_mode = False
verbatim_mode_finisher = None
for char in text:
if verbatim_mode:
if char == verbatim_mode_finisher:
finish_token()
verbatim_mode = False
verbatim_mode_finisher = None
else:
tok += char
else:
if char in "'\"":
finish_token()
verbatim_mode = True
verbatim_mode_finisher = char
elif char in "{}:#":
finish_token()
tokens.append(char)
elif char.isspace():
finish_token()
else:
tok += char
finish_token()
return tokens
class CommandParser:
def __init__(self):
self._root_scope = None
def parse(self, command_line: str) -> Optional[Command]:
tokens = tokenize(command_line)
if not tokens:
return None # ignore empty lines
if tokens[0] == "#":
return None # ignore comments
self._root_scope = self._parse_scope(iter(tokens))
return self._root_scope
def _parse_scope(self, token_iterator) -> Command:
parts = []
for tok in token_iterator:
if tok == "{":
parts.append(self._parse_scope(token_iterator))
elif tok != "}":
parts.append(tok)
else:
break
if ":" in parts:
cmd = Command(parts[2])
cmd.target = parts[0]
cmd.arguments = parts[3:]
else:
cmd = Command(parts[0])
cmd.arguments = parts[1:]
return cmd
def parse_argument_tree(raw_arguments: List[str]) -> ActionArgsPack:
pack_list: List[Tuple[Union[NodePath, int], ArgumentValue]] = []
for i, arg in enumerate(raw_arguments):
if isinstance(arg, str) and "=" in arg:
key, value = arg.split("=")
key_path = NodePath.from_python_name(key)
if key_path.is_absolute:
raise ValueError(f"Named argument path must be relative - {key_path}")
typed_value = convert_token_type(value)
pack_list.append((key_path, typed_value))
else:
pack_list.append((i, arg))
return OrderedDict(pack_list)
|
from raptiformica.cli import install
from tests.testcase import TestCase
class TestInstall(TestCase):
def setUp(self):
self.parse_install_arguments = self.set_up_patch(
'raptiformica.cli.parse_install_arguments'
)
self.unload_module = self.set_up_patch(
'raptiformica.cli.unload_module'
)
self.load_module = self.set_up_patch(
'raptiformica.cli.load_module'
)
def test_install_parses_install_arguments(self):
install()
self.parse_install_arguments.assert_called_once_with()
def test_install_unloads_module_if_remove_is_specified(self):
self.parse_install_arguments.return_value.remove = True
install()
self.unload_module.assert_called_once_with(
self.parse_install_arguments.return_value.name
)
self.assertFalse(self.load_module.called)
def test_install_loads_module(self):
self.parse_install_arguments.return_value.remove = False
install()
self.load_module.assert_called_once_with(
self.parse_install_arguments.return_value.name
)
self.assertFalse(self.unload_module.called)
|
# Copyright (c) 2019 Viosoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
import ipaddress
import json
import logging
import os
import re
import tempfile
import time
from collections import OrderedDict
from yardstick.common import constants
from yardstick.common import exceptions
from yardstick.network_services.helpers.cpu import CpuSysCores
from yardstick.network_services.vnf_generic.vnf.sample_vnf import \
DpdkVnfSetupEnvHelper
LOG = logging.getLogger(__name__)
class VppConfigGenerator(object):
VPP_LOG_FILE = '/tmp/vpe.log'
def __init__(self):
self._nodeconfig = {}
self._vpp_config = ''
def add_config_item(self, config, value, path):
if len(path) == 1:
config[path[0]] = value
return
if path[0] not in config:
config[path[0]] = {}
elif isinstance(config[path[0]], str):
config[path[0]] = {} if config[path[0]] == '' \
else {config[path[0]]: ''}
self.add_config_item(config[path[0]], value, path[1:])
def add_unix_log(self, value=None):
path = ['unix', 'log']
if value is None:
value = self.VPP_LOG_FILE
self.add_config_item(self._nodeconfig, value, path)
def add_unix_cli_listen(self, value='/run/vpp/cli.sock'):
path = ['unix', 'cli-listen']
self.add_config_item(self._nodeconfig, value, path)
def add_unix_nodaemon(self):
path = ['unix', 'nodaemon']
self.add_config_item(self._nodeconfig, '', path)
def add_unix_coredump(self):
path = ['unix', 'full-coredump']
self.add_config_item(self._nodeconfig, '', path)
def add_dpdk_dev(self, *devices):
for device in devices:
if VppConfigGenerator.pci_dev_check(device):
path = ['dpdk', 'dev {0}'.format(device)]
self.add_config_item(self._nodeconfig, '', path)
def add_dpdk_cryptodev(self, count, cryptodev):
for i in range(count):
cryptodev_config = 'dev {0}'.format(
re.sub(r'\d.\d$', '1.' + str(i), cryptodev))
path = ['dpdk', cryptodev_config]
self.add_config_item(self._nodeconfig, '', path)
self.add_dpdk_uio_driver('igb_uio')
def add_dpdk_sw_cryptodev(self, sw_pmd_type, socket_id, count):
for _ in range(count):
cryptodev_config = 'vdev cryptodev_{0}_pmd,socket_id={1}'. \
format(sw_pmd_type, str(socket_id))
path = ['dpdk', cryptodev_config]
self.add_config_item(self._nodeconfig, '', path)
def add_dpdk_dev_default_rxq(self, value):
path = ['dpdk', 'dev default', 'num-rx-queues']
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_dev_default_rxd(self, value):
path = ['dpdk', 'dev default', 'num-rx-desc']
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_dev_default_txd(self, value):
path = ['dpdk', 'dev default', 'num-tx-desc']
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_log_level(self, value):
path = ['dpdk', 'log-level']
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_socketmem(self, value):
path = ['dpdk', 'socket-mem']
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_num_mbufs(self, value):
path = ['dpdk', 'num-mbufs']
self.add_config_item(self._nodeconfig, value, path)
def add_dpdk_uio_driver(self, value=None):
path = ['dpdk', 'uio-driver']
self.add_config_item(self._nodeconfig, value, path)
def add_cpu_main_core(self, value):
path = ['cpu', 'main-core']
self.add_config_item(self._nodeconfig, value, path)
def add_cpu_corelist_workers(self, value):
path = ['cpu', 'corelist-workers']
self.add_config_item(self._nodeconfig, value, path)
def add_heapsize(self, value):
path = ['heapsize']
self.add_config_item(self._nodeconfig, value, path)
def add_ip6_hash_buckets(self, value):
path = ['ip6', 'hash-buckets']
self.add_config_item(self._nodeconfig, value, path)
def add_ip6_heap_size(self, value):
path = ['ip6', 'heap-size']
self.add_config_item(self._nodeconfig, value, path)
def add_ip_heap_size(self, value):
path = ['ip', 'heap-size']
self.add_config_item(self._nodeconfig, value, path)
def add_statseg_size(self, value):
path = ['statseg', 'size']
self.add_config_item(self._nodeconfig, value, path)
def add_plugin(self, state, *plugins):
for plugin in plugins:
path = ['plugins', 'plugin {0}'.format(plugin), state]
self.add_config_item(self._nodeconfig, ' ', path)
def add_dpdk_no_multi_seg(self):
path = ['dpdk', 'no-multi-seg']
self.add_config_item(self._nodeconfig, '', path)
def add_dpdk_no_tx_checksum_offload(self):
path = ['dpdk', 'no-tx-checksum-offload']
self.add_config_item(self._nodeconfig, '', path)
def dump_config(self, obj=None, level=-1):
if obj is None:
obj = self._nodeconfig
obj = OrderedDict(sorted(obj.items()))
indent = ' '
if level >= 0:
self._vpp_config += '{}{{\n'.format(level * indent)
if isinstance(obj, dict):
for key, val in obj.items():
if hasattr(val, '__iter__') and not isinstance(val, str):
self._vpp_config += '{}{}\n'.format((level + 1) * indent,
key)
self.dump_config(val, level + 1)
else:
self._vpp_config += '{}{} {}\n'.format(
(level + 1) * indent,
key, val)
if level >= 0:
self._vpp_config += '{}}}\n'.format(level * indent)
return self._vpp_config
@staticmethod
def pci_dev_check(pci_dev):
pattern = re.compile("^[0-9A-Fa-f]{4}:[0-9A-Fa-f]{2}:"
"[0-9A-Fa-f]{2}\\.[0-9A-Fa-f]$")
if not pattern.match(pci_dev):
raise ValueError('PCI address {addr} is not in valid format '
'xxxx:xx:xx.x'.format(addr=pci_dev))
return True
class VppSetupEnvHelper(DpdkVnfSetupEnvHelper):
APP_NAME = "vpp"
CFG_CONFIG = "/etc/vpp/startup.conf"
CFG_SCRIPT = ""
PIPELINE_COMMAND = ""
QAT_DRIVER = "qat_dh895xcc"
VNF_TYPE = "IPSEC"
VAT_BIN_NAME = 'vpp_api_test'
def __init__(self, vnfd_helper, ssh_helper, scenario_helper):
super(VppSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper,
scenario_helper)
self.sys_cores = CpuSysCores(self.ssh_helper)
def kill_vnf(self):
ret_code, _, _ = \
self.ssh_helper.execute(
'service {name} stop'.format(name=self.APP_NAME))
if int(ret_code):
raise RuntimeError(
'Failed to stop service {name}'.format(name=self.APP_NAME))
def tear_down(self):
pass
def start_vpp_service(self):
ret_code, _, _ = \
self.ssh_helper.execute(
'service {name} restart'.format(name=self.APP_NAME))
if int(ret_code):
raise RuntimeError(
'Failed to start service {name}'.format(name=self.APP_NAME))
def _update_vnfd_helper(self, additional_data, iface_key=None):
for k, v in additional_data.items():
if iface_key is None:
if isinstance(v, dict) and k in self.vnfd_helper:
self.vnfd_helper[k].update(v)
else:
self.vnfd_helper[k] = v
else:
if isinstance(v,
dict) and k in self.vnfd_helper.find_virtual_interface(
ifname=iface_key):
self.vnfd_helper.find_virtual_interface(ifname=iface_key)[
k].update(v)
else:
self.vnfd_helper.find_virtual_interface(ifname=iface_key)[
k] = v
def get_value_by_interface_key(self, interface, key):
try:
return self.vnfd_helper.find_virtual_interface(
ifname=interface).get(key)
except (KeyError, ValueError):
return None
def crypto_device_init(self, pci_addr, numvfs):
# QAT device must be re-bound to kernel driver before initialization.
self.dpdk_bind_helper.load_dpdk_driver(self.QAT_DRIVER)
# Stop VPP to prevent deadlock.
self.kill_vnf()
current_driver = self.get_pci_dev_driver(pci_addr.replace(':', r'\:'))
if current_driver is not None:
self.pci_driver_unbind(pci_addr)
# Bind to kernel driver.
self.dpdk_bind_helper.bind(pci_addr, self.QAT_DRIVER.replace('qat_', ''))
# Initialize QAT VFs.
if numvfs > 0:
self.set_sriov_numvfs(pci_addr, numvfs)
def get_sriov_numvfs(self, pf_pci_addr):
command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'. \
format(pci=pf_pci_addr.replace(':', r'\:'))
_, stdout, _ = self.ssh_helper.execute(command)
try:
return int(stdout)
except ValueError:
LOG.debug('Reading sriov_numvfs info failed')
return 0
def set_sriov_numvfs(self, pf_pci_addr, numvfs=0):
command = "sh -c 'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'". \
format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:'))
self.ssh_helper.execute(command)
def pci_driver_unbind(self, pci_addr):
command = "sh -c 'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'". \
format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:'))
self.ssh_helper.execute(command)
def get_pci_dev_driver(self, pci_addr):
cmd = 'lspci -vmmks {0}'.format(pci_addr)
ret_code, stdout, _ = self.ssh_helper.execute(cmd)
if int(ret_code):
raise RuntimeError("'{0}' failed".format(cmd))
for line in stdout.splitlines():
if not line:
continue
name = None
value = None
try:
name, value = line.split("\t", 1)
except ValueError:
if name == "Driver:":
return None
if name == 'Driver:':
return value
return None
def vpp_create_ipsec_tunnels(self, if1_ip_addr, if2_ip_addr, if_name,
n_tunnels, n_connections, crypto_alg,
crypto_key, integ_alg, integ_key, addrs_ip,
spi_1=10000, spi_2=20000):
mask_length = 32
if n_connections <= n_tunnels:
count = 1
else:
count = int(n_connections / n_tunnels)
addr_ip_i = int(ipaddress.ip_address(str(addrs_ip)))
dst_start_ip = addr_ip_i
tmp_fd, tmp_path = tempfile.mkstemp()
vpp_ifname = self.get_value_by_interface_key(if_name, 'vpp_name')
ckey = binascii.hexlify(crypto_key.encode())
ikey = binascii.hexlify(integ_key.encode())
integ = ''
if crypto_alg.alg_name != 'aes-gcm-128':
integ = 'integ_alg {integ_alg} ' \
'local_integ_key {local_integ_key} ' \
'remote_integ_key {remote_integ_key} ' \
.format(integ_alg=integ_alg.alg_name,
local_integ_key=ikey,
remote_integ_key=ikey)
create_tunnels_cmds = 'ipsec_tunnel_if_add_del ' \
'local_spi {local_spi} ' \
'remote_spi {remote_spi} ' \
'crypto_alg {crypto_alg} ' \
'local_crypto_key {local_crypto_key} ' \
'remote_crypto_key {remote_crypto_key} ' \
'{integ} ' \
'local_ip {local_ip} ' \
'remote_ip {remote_ip}\n'
start_tunnels_cmds = 'ip_add_del_route {raddr}/{mask} via {addr} ipsec{i}\n' \
'exec set interface unnumbered ipsec{i} use {uifc}\n' \
'sw_interface_set_flags ipsec{i} admin-up\n'
with os.fdopen(tmp_fd, 'w') as tmp_file:
for i in range(0, n_tunnels):
create_tunnel = create_tunnels_cmds.format(local_spi=spi_1 + i,
remote_spi=spi_2 + i,
crypto_alg=crypto_alg.alg_name,
local_crypto_key=ckey,
remote_crypto_key=ckey,
integ=integ,
local_ip=if1_ip_addr,
remote_ip=if2_ip_addr)
tmp_file.write(create_tunnel)
self.execute_script(tmp_path, json_out=False, copy_on_execute=True)
os.remove(tmp_path)
tmp_fd, tmp_path = tempfile.mkstemp()
with os.fdopen(tmp_fd, 'w') as tmp_file:
for i in range(0, n_tunnels):
if count > 1:
dst_start_ip = addr_ip_i + i * count
dst_end_ip = ipaddress.ip_address(dst_start_ip + count - 1)
ips = [ipaddress.ip_address(ip) for ip in
[str(ipaddress.ip_address(dst_start_ip)),
str(dst_end_ip)]]
lowest_ip, highest_ip = min(ips), max(ips)
mask_length = self.get_prefix_length(int(lowest_ip),
int(highest_ip),
lowest_ip.max_prefixlen)
# TODO check duplicate route for some IPs
elif count == 1:
dst_start_ip = addr_ip_i + i
start_tunnel = start_tunnels_cmds.format(
raddr=str(ipaddress.ip_address(dst_start_ip)),
mask=mask_length,
addr=if2_ip_addr,
i=i, count=count,
uifc=vpp_ifname)
tmp_file.write(start_tunnel)
# TODO add route for remain IPs
self.execute_script(tmp_path, json_out=False, copy_on_execute=True)
os.remove(tmp_path)
def apply_config(self, vpp_cfg, restart_vpp=True):
vpp_config = vpp_cfg.dump_config()
ret, _, _ = \
self.ssh_helper.execute('echo "{config}" | sudo tee {filename}'.
format(config=vpp_config,
filename=self.CFG_CONFIG))
if ret != 0:
raise RuntimeError('Writing config file failed')
if restart_vpp:
self.start_vpp_service()
def vpp_route_add(self, network, prefix_len, gateway=None, interface=None,
use_sw_index=True, resolve_attempts=10,
count=1, vrf=None, lookup_vrf=None, multipath=False,
weight=None, local=False):
if interface:
if use_sw_index:
int_cmd = ('sw_if_index {}'.format(
self.get_value_by_interface_key(interface,
'vpp_sw_index')))
else:
int_cmd = interface
else:
int_cmd = ''
rap = 'resolve-attempts {}'.format(resolve_attempts) \
if resolve_attempts else ''
via = 'via {}'.format(gateway) if gateway else ''
cnt = 'count {}'.format(count) \
if count else ''
vrf = 'vrf {}'.format(vrf) if vrf else ''
lookup_vrf = 'lookup-in-vrf {}'.format(
lookup_vrf) if lookup_vrf else ''
multipath = 'multipath' if multipath else ''
weight = 'weight {}'.format(weight) if weight else ''
local = 'local' if local else ''
with VatTerminal(self.ssh_helper, json_param=False) as vat:
vat.vat_terminal_exec_cmd_from_template('add_route.vat',
network=network,
prefix_length=prefix_len,
via=via,
vrf=vrf,
interface=int_cmd,
resolve_attempts=rap,
count=cnt,
lookup_vrf=lookup_vrf,
multipath=multipath,
weight=weight,
local=local)
def add_arp_on_dut(self, iface_key, ip_address, mac_address):
with VatTerminal(self.ssh_helper) as vat:
return vat.vat_terminal_exec_cmd_from_template(
'add_ip_neighbor.vat',
sw_if_index=self.get_value_by_interface_key(iface_key,
'vpp_sw_index'),
ip_address=ip_address, mac_address=mac_address)
def set_ip(self, interface, address, prefix_length):
with VatTerminal(self.ssh_helper) as vat:
return vat.vat_terminal_exec_cmd_from_template(
'add_ip_address.vat',
sw_if_index=self.get_value_by_interface_key(interface,
'vpp_sw_index'),
address=address, prefix_length=prefix_length)
def set_interface_state(self, interface, state):
sw_if_index = self.get_value_by_interface_key(interface,
'vpp_sw_index')
if state == 'up':
state = 'admin-up link-up'
elif state == 'down':
state = 'admin-down link-down'
else:
raise ValueError('Unexpected interface state: {}'.format(state))
with VatTerminal(self.ssh_helper) as vat:
return vat.vat_terminal_exec_cmd_from_template(
'set_if_state.vat', sw_if_index=sw_if_index, state=state)
def vpp_set_interface_mtu(self, interface, mtu=9200):
sw_if_index = self.get_value_by_interface_key(interface,
'vpp_sw_index')
if sw_if_index:
with VatTerminal(self.ssh_helper, json_param=False) as vat:
vat.vat_terminal_exec_cmd_from_template(
"hw_interface_set_mtu.vat", sw_if_index=sw_if_index,
mtu=mtu)
def vpp_interfaces_ready_wait(self, timeout=30):
if_ready = False
not_ready = []
start = time.time()
while not if_ready:
out = self.vpp_get_interface_data()
if time.time() - start > timeout:
for interface in out:
if interface.get('admin_up_down') == 1:
if interface.get('link_up_down') != 1:
LOG.debug('%s link-down',
interface.get('interface_name'))
raise RuntimeError('timeout, not up {0}'.format(not_ready))
not_ready = []
for interface in out:
if interface.get('admin_up_down') == 1:
if interface.get('link_up_down') != 1:
not_ready.append(interface.get('interface_name'))
if not not_ready:
if_ready = True
else:
LOG.debug('Interfaces still in link-down state: %s, '
'waiting...', not_ready)
time.sleep(1)
def vpp_get_interface_data(self, interface=None):
with VatTerminal(self.ssh_helper) as vat:
response = vat.vat_terminal_exec_cmd_from_template(
"interface_dump.vat")
data = response[0]
if interface is not None:
if isinstance(interface, str):
param = "interface_name"
elif isinstance(interface, int):
param = "sw_if_index"
else:
raise TypeError
for data_if in data:
if data_if[param] == interface:
return data_if
return dict()
return data
def update_vpp_interface_data(self):
data = {}
interface_dump_json = self.execute_script_json_out(
"dump_interfaces.vat")
interface_list = json.loads(interface_dump_json)
for interface in self.vnfd_helper.interfaces:
if_mac = interface['virtual-interface']['local_mac']
interface_dict = VppSetupEnvHelper.get_vpp_interface_by_mac(
interface_list, if_mac)
if not interface_dict:
LOG.debug('Interface %s not found by MAC %s', interface,
if_mac)
continue
data[interface['virtual-interface']['ifname']] = {
'vpp_name': interface_dict["interface_name"],
'vpp_sw_index': interface_dict["sw_if_index"]
}
for iface_key, updated_vnfd in data.items():
self._update_vnfd_helper(updated_vnfd, iface_key)
def iface_update_numa(self):
iface_numa = {}
for interface in self.vnfd_helper.interfaces:
cmd = "cat /sys/bus/pci/devices/{}/numa_node".format(
interface["virtual-interface"]["vpci"])
ret, out, _ = self.ssh_helper.execute(cmd)
if ret == 0:
try:
numa_node = int(out)
if numa_node < 0:
if self.vnfd_helper["cpuinfo"][-1][3] + 1 == 1:
iface_numa[
interface['virtual-interface']['ifname']] = {
'numa_node': 0
}
else:
raise ValueError
else:
iface_numa[
interface['virtual-interface']['ifname']] = {
'numa_node': numa_node
}
except ValueError:
LOG.debug(
'Reading numa location failed for: %s',
interface["virtual-interface"]["vpci"])
for iface_key, updated_vnfd in iface_numa.items():
self._update_vnfd_helper(updated_vnfd, iface_key)
def execute_script(self, vat_name, json_out=True, copy_on_execute=False):
if copy_on_execute:
self.ssh_helper.put_file(vat_name, vat_name)
remote_file_path = vat_name
else:
vat_path = self.ssh_helper.join_bin_path("vpp", "templates")
remote_file_path = '{0}/{1}'.format(vat_path, vat_name)
cmd = "{vat_bin} {json} in {vat_path} script".format(
vat_bin=self.VAT_BIN_NAME,
json="json" if json_out is True else "",
vat_path=remote_file_path)
try:
return self.ssh_helper.execute(cmd=cmd)
except Exception:
raise RuntimeError("VAT script execution failed: {0}".format(cmd))
def execute_script_json_out(self, vat_name):
vat_path = self.ssh_helper.join_bin_path("vpp", "templates")
remote_file_path = '{0}/{1}'.format(vat_path, vat_name)
_, stdout, _ = self.execute_script(vat_name, json_out=True)
return self.cleanup_vat_json_output(stdout, vat_file=remote_file_path)
@staticmethod
def cleanup_vat_json_output(json_output, vat_file=None):
retval = json_output
clutter = ['vat#', 'dump_interface_table error: Misc',
'dump_interface_table:6019: JSON output supported only ' \
'for VPE API calls and dump_stats_table']
if vat_file:
clutter.append("{0}(2):".format(vat_file))
for garbage in clutter:
retval = retval.replace(garbage, '')
return retval.strip()
@staticmethod
def _convert_mac_to_number_list(mac_address):
list_mac = []
for num in mac_address.split(":"):
list_mac.append(int(num, 16))
return list_mac
@staticmethod
def get_vpp_interface_by_mac(interfaces_list, mac_address):
interface_dict = {}
list_mac_address = VppSetupEnvHelper._convert_mac_to_number_list(
mac_address)
LOG.debug("MAC address %s converted to list %s.", mac_address,
list_mac_address)
for interface in interfaces_list:
# TODO: create vat json integrity checking and move there
if "l2_address" not in interface:
raise KeyError(
"key l2_address not found in interface dict."
"Probably input list is not parsed from correct VAT "
"json output.")
if "l2_address_length" not in interface:
raise KeyError(
"key l2_address_length not found in interface "
"dict. Probably input list is not parsed from correct "
"VAT json output.")
mac_from_json = interface["l2_address"][:6]
if mac_from_json == list_mac_address:
if interface["l2_address_length"] != 6:
raise ValueError("l2_address_length value is not 6.")
interface_dict = interface
break
return interface_dict
@staticmethod
def get_prefix_length(number1, number2, bits):
for i in range(bits):
if number1 >> i == number2 >> i:
return bits - i
return 0
class VatTerminal(object):
__VAT_PROMPT = ("vat# ",)
__LINUX_PROMPT = (":~# ", ":~$ ", "~]$ ", "~]# ")
def __init__(self, ssh_helper, json_param=True):
json_text = ' json' if json_param else ''
self.json = json_param
self.ssh_helper = ssh_helper
EXEC_RETRY = 3
try:
self._tty = self.ssh_helper.interactive_terminal_open()
except Exception:
raise RuntimeError("Cannot open interactive terminal")
for _ in range(EXEC_RETRY):
try:
self.ssh_helper.interactive_terminal_exec_command(
self._tty,
'sudo -S {0}{1}'.format(VppSetupEnvHelper.VAT_BIN_NAME,
json_text),
self.__VAT_PROMPT)
except exceptions.SSHTimeout:
continue
else:
break
self._exec_failure = False
self.vat_stdout = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.vat_terminal_close()
def vat_terminal_exec_cmd(self, cmd):
try:
out = self.ssh_helper.interactive_terminal_exec_command(self._tty,
cmd,
self.__VAT_PROMPT)
self.vat_stdout = out
except exceptions.SSHTimeout:
self._exec_failure = True
raise RuntimeError(
"VPP is not running on node. VAT command {0} execution failed".
format(cmd))
if self.json:
obj_start = out.find('{')
obj_end = out.rfind('}')
array_start = out.find('[')
array_end = out.rfind(']')
if obj_start == -1 and array_start == -1:
raise RuntimeError(
"VAT command {0}: no JSON data.".format(cmd))
if obj_start < array_start or array_start == -1:
start = obj_start
end = obj_end + 1
else:
start = array_start
end = array_end + 1
out = out[start:end]
json_out = json.loads(out)
return json_out
else:
return None
def vat_terminal_close(self):
if not self._exec_failure:
try:
self.ssh_helper.interactive_terminal_exec_command(self._tty,
'quit',
self.__LINUX_PROMPT)
except exceptions.SSHTimeout:
raise RuntimeError("Failed to close VAT console")
try:
self.ssh_helper.interactive_terminal_close(self._tty)
except Exception:
raise RuntimeError("Cannot close interactive terminal")
def vat_terminal_exec_cmd_from_template(self, vat_template_file, **args):
file_path = os.path.join(constants.YARDSTICK_ROOT_PATH,
'yardstick/resources/templates/',
vat_template_file)
with open(file_path, 'r') as template_file:
cmd_template = template_file.readlines()
ret = []
for line_tmpl in cmd_template:
vat_cmd = line_tmpl.format(**args)
ret.append(self.vat_terminal_exec_cmd(vat_cmd.replace('\n', '')))
return ret
|
from flask import Flask, request, jsonify, make_response
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
import uuid
import jwt
import datetime
from functools import wraps
import os
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SECRET_KEY'] = 'Th1s1ss3cr3t'
#app.config['SQLALCHEMY_DATABASE_URI'] = 'library.db'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'library.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
class Users(db.Model):
id = db.Column(db.Integer, primary_key=True)
public_id = db.Column(db.Integer)
username = db.Column(db.String(50))
password = db.Column(db.String(50))
userEmail = db.Column(db.String(50))
avatarName = db.Column(db.String(50))
avatarColor = db.Column(db.String(50))
admin = db.Column(db.Boolean)
class Authors(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), unique=True, nullable=False)
book = db.Column(db.String(20), unique=True, nullable=False)
country = db.Column(db.String(50), nullable=False)
book_prize = db.Column(db.Boolean)
user_id = db.Column(db.Integer)
def token_required(f):
@wraps(f)
def decorator(*args, **kwargs):
token = None
if 'x-access-tokens' in request.headers:
token = request.headers['x-access-tokens']
if not token:
return jsonify({'message': 'a valid token is missing'})
try:
data = jwt.decode(token, app.config['SECRET_KEY'])
current_user = Users.query.filter_by(public_id=data['public_id']).first()
#print(data)
return f(current_user, *args, **kwargs)
except Exception as e:
print(str(e))
return jsonify({'message': 'token is invalid'})
return decorator
@app.route('/register', methods=['GET', 'POST'])
def signup_user():
data = request.get_json()
hashed_password = generate_password_hash(data['password'], method='sha256')
new_user = Users(public_id=str(uuid.uuid4()), username=data['username'], password=hashed_password, admin=False, userEmail=data['userEmail'], avatarName=data['avatarName'], avatarColor=data['avatarColor'])
db.session.add(new_user)
db.session.commit()
#return jsonify({'message': 'registered successfully'})
return jsonify({'username': data['username'], 'password': hashed_password, 'userEmail': data['userEmail'], 'avatarName': data['avatarName'], 'avatarColor': data['avatarColor']})
@app.route('/login', methods=['GET', 'POST'])
def login_user():
#auth = request.authorization
auth = request.json
username = auth.get('username')
password = auth.get('password')
if not auth or not username or not password:
return make_response('1could not verify', 401, {'WWW.Authentication': 'Basic realm: "login required"'})
print("Debug: 2")
user = Users.query.filter_by(username=auth['username']).first()
print("user password: ",user.password, " auth password: ",auth['password'])
#BURAYA DİKKAT USER.PASSWORD DEĞİŞTİRİLDİ
if check_password_hash(user.password, auth['password']):
print("true")
token = jwt.encode(
{'public_id': user.public_id, 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=30)},
app.config['SECRET_KEY'])
return jsonify({'token': token.decode('UTF-8')})
return make_response('2could not verify', 401, {'WWW.Authentication': 'Basic realm: "login required"'})
@app.route('/login/byEmail/<userEmail>',methods=['GET'])
def loginbyUsername(userEmail):
user = Users.query.filter_by(userEmail=userEmail).first()
if not user:
return jsonify({'message': 'author does not exist'})
else:
return jsonify({'avatarColor':user.avatarColor, 'avatarName':user.avatarName, 'email':user.userEmail, 'username':user.username})
return make_response('2could not verify', 401, {'WWW.Authentication': 'Basic realm: "login required"'})
#>>> encoded = jwt.encode({'some': 'payload'}, 'secret', algorithm='HS256')
#'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzb21lIjoicGF5bG9hZCJ9.4twFt5NiznN84AWoo1d7KO1T_yoc0Z6XOpOVswacPZg'
#>>> jwt.decode(encoded, 'secret', algorithms=['HS256'])
#{'some': 'payload'}
@app.route('/user', methods=['GET'])
def get_all_users():
users = Users.query.all()
result = []
for user in users:
user_data = {}
user_data['public_id'] = user.public_id
user_data['username'] = user.username
user_data['password'] = user.password
user_data['admin'] = user.admin
result.append(user_data)
return jsonify({'users': result})
@app.route('/authors', methods=['GET'])
@token_required
def get_authors(current_user):
authors = Authors.query.all()
output = []
print(current_user)
for author in authors:
author_data = {}
author_data['name'] = author.name
author_data['book'] = author.book
author_data['country'] = author.country
author_data['book_prize'] = author.book_prize
output.append(author_data)
return jsonify({'list_of_authors': output})
@app.route('/authorsPost', methods=['POST'])
@token_required
def create_author(current_user):
data = request.get_json()
print(current_user)
#print("post geldi, "+data)
new_authors = Authors(name=data['name'], country=data['country'], book=data['book'], book_prize=True)
db.session.add(new_authors)
db.session.commit()
return jsonify({'message': 'new author created'})
@app.route('/authors/<name>', methods=['DELETE'])
@token_required
def delete_author(current_user, name):
author = Authors.query.filter_by(name=name).first()
if not author:
return jsonify({'message': 'author does not exist'})
db.session.delete(author)
db.session.commit()
return jsonify({'message': 'Author deleted'})
if __name__ == '__main__':
app.run(debug=True)
|
from abc import ABC, abstractmethod
from .connections import BaseConnection
class BaseEndpoint(ABC):
"""
This forces all endpoint objects to have a connection method and an
endpoint attribute.
"""
@abstractmethod
def __init__(self, connection, endpoint):
self.connection = connection
self.endpoint = endpoint
if not isinstance(self.connection, BaseConnection):
raise TypeError('connection must be a BaseConnection object.')
|
#Даны стороны прямоугольника. Найти его периметр и длину диагонали.
import math
a = float(input("Введите а: "))
b = float(input("Введите b: "))
P = 2 * (a + b)
l = math.sqrt(a**2 + b**2)
print("Периметр: ", P,",", "длина диагонали", l) |
# Generated by Django 3.1.7 on 2021-03-14 19:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tools', '0007_suptech'),
]
operations = [
migrations.AddField(
model_name='suptech',
name='created_at',
field=models.DateTimeField(editable=False, null=True, verbose_name='ajouté le'),
),
migrations.AddField(
model_name='suptech',
name='created_by',
field=models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='suptechs_created', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='suptech',
name='modified_at',
field=models.DateTimeField(null=True, verbose_name='modifié le'),
),
migrations.AddField(
model_name='suptech',
name='modified_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='suptechs_modified', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='suptech',
name='action',
field=models.TextField(blank=True, max_length=2000, verbose_name='ACTION/RETOUR'),
),
migrations.AlterField(
model_name='suptech',
name='info',
field=models.TextField(max_length=2000, verbose_name='INFO'),
),
migrations.AlterField(
model_name='suptech',
name='item',
field=models.CharField(choices=[('Hot Line Tech', 'Hot Line Tech'), ('Support Admin', 'Support Admin'), ('R.M.', 'R.M.'), ('Temps Annexe', 'Temps Annexe'), ('Validation Tech', 'Validation Tech'), ('Retour Autotronik', 'Retour Autotronik'), ('Probleme process', 'Probleme process'), ('Informatique/Reseau', 'Informatique/Reseau'), ('Inter Maintenance(AF/YM)', 'Inter Maintenance(AF/YM)'), ('Autres... (Avec resumé)', 'Autres... (Avec resumé)')], max_length=100, verbose_name='ITEM'),
),
migrations.AlterField(
model_name='suptech',
name='rmq',
field=models.TextField(blank=True, max_length=2000, verbose_name='RMQ'),
),
migrations.AlterField(
model_name='suptech',
name='time',
field=models.CharField(max_length=10, verbose_name='TIME'),
),
]
|
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Oliver J. Backhouse <olbackhouse@gmail.com>
# George H. Booth <george.booth@kcl.ac.uk>
#
'''
Auxiliary second-order Green's function perturbation theory for
unrestricted references with density fitting
'''
import numpy as np
import ctypes
from pyscf import lib
from pyscf.lib import logger
from pyscf import __config__
from pyscf import ao2mo, df
from pyscf.agf2 import uagf2, dfragf2, mpi_helper, _agf2
from pyscf.agf2 import aux_space as aux
BLKMIN = getattr(__config__, 'agf2_blkmin', 100)
def build_se_part(agf2, eri, gf_occ, gf_vir, os_factor=1.0, ss_factor=1.0):
''' Builds either the auxiliaries of the occupied self-energy,
or virtual if :attr:`gf_occ` and :attr:`gf_vir` are swapped.
Args:
eri : _ChemistsERIs
Electronic repulsion integrals
gf_occ : GreensFunction
Occupied Green's function
gf_vir : GreensFunction
Virtual Green's function
Kwargs:
os_factor : float
Opposite-spin factor for spin-component-scaled (SCS)
calculations. Default 1.0
ss_factor : float
Same-spin factor for spin-component-scaled (SCS)
calculations. Default 1.0
Returns:
:class:`SelfEnergy`
'''
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(agf2.stdout, agf2.verbose)
assert type(gf_occ[0]) is aux.GreensFunction
assert type(gf_occ[1]) is aux.GreensFunction
assert type(gf_vir[0]) is aux.GreensFunction
assert type(gf_vir[1]) is aux.GreensFunction
nmoa, nmob = eri.nmo
nocca, nvira = gf_occ[0].naux, gf_vir[0].naux
noccb, nvirb = gf_occ[1].naux, gf_vir[1].naux
naux = agf2.with_df.get_naoaux()
tol = agf2.weight_tol
facs = dict(os_factor=os_factor, ss_factor=ss_factor)
ci_a, ei_a = gf_occ[0].coupling, gf_occ[0].energy
ci_b, ei_b = gf_occ[1].coupling, gf_occ[1].energy
ca_a, ea_a = gf_vir[0].coupling, gf_vir[0].energy
ca_b, ea_b = gf_vir[1].coupling, gf_vir[1].energy
qeri = _make_qmo_eris_incore(agf2, eri, (ci_a, ci_a, ca_a), (ci_b, ci_b, ca_b))
(qxi_a, qja_a), (qxi_b, qja_b) = qeri
qxi = (qxi_a, qxi_b)
qja = (qja_a, qja_b)
himem_required = naux*(nvira+nmoa) + (nocca*nvira+noccb*nvirb)*(1+2*nmoa) + (2*nmoa**2)
himem_required *= 8e-6
himem_required *= lib.num_threads()
if ((himem_required*1.05 + lib.current_memory()[0]) > agf2.max_memory
and agf2.allow_lowmem_build) or agf2.allow_lowmem_build == 'force':
log.debug('Thread-private memory overhead %.3f exceeds max_memory, using '
'low-memory version.', himem_required)
build_mats_dfuagf2 = _agf2.build_mats_dfuagf2_lowmem
else:
build_mats_dfuagf2 = _agf2.build_mats_dfuagf2_incore
vv, vev = build_mats_dfuagf2(qxi, qja, (ei_a, ei_b), (ea_a, ea_b), **facs)
e, c = _agf2.cholesky_build(vv, vev)
se_a = aux.SelfEnergy(e, c, chempot=gf_occ[0].chempot)
se_a.remove_uncoupled(tol=tol)
if not (agf2.frozen is None or agf2.frozen == 0):
mask = uagf2.get_frozen_mask(agf2)
coupling = np.zeros((nmoa, se_a.naux))
coupling[mask[0]] = se_a.coupling
se_a = aux.SelfEnergy(se_a.energy, coupling, chempot=se_a.chempot)
cput0 = log.timer('se part (alpha)', *cput0)
himem_required = naux*(nvirb+nmob) + (noccb*nvirb+nocca*nvira)*(1+2*nmob) + (2*nmob**2)
himem_required *= 8e-6
himem_required *= lib.num_threads()
if ((himem_required*1.05 + lib.current_memory()[0]) > agf2.max_memory
and agf2.allow_lowmem_build) or agf2.allow_lowmem_build == 'force':
log.debug('Thread-private memory overhead %.3f exceeds max_memory, using '
'low-memory version.', himem_required)
build_mats_dfuagf2 = _agf2.build_mats_dfuagf2_lowmem
else:
build_mats_dfuagf2 = _agf2.build_mats_dfuagf2_incore
rv = np.s_[::-1]
vv, vev = build_mats_dfuagf2(qxi[rv], qja[rv], (ei_b, ei_a), (ea_b, ea_a), **facs)
e, c = _agf2.cholesky_build(vv, vev)
se_b = aux.SelfEnergy(e, c, chempot=gf_occ[1].chempot)
se_b.remove_uncoupled(tol=tol)
if not (agf2.frozen is None or agf2.frozen == 0):
mask = uagf2.get_frozen_mask(agf2)
coupling = np.zeros((nmoa, se_b.naux))
coupling[mask[1]] = se_b.coupling
se_b = aux.SelfEnergy(se_b.energy, coupling, chempot=se_b.chempot)
cput0 = log.timer('se part (beta)', *cput0)
return (se_a, se_b)
class DFUAGF2(uagf2.UAGF2):
''' Unrestricted AGF2 with canonical HF reference with density fitting
Attributes:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to :class:`Mole.max_memory`
incore_complete : bool
Avoid all I/O. Default is False.
allow_lowmem_build : bool
Allow the self-energy build to switch to a serially slower
code with lower thread-private memory overhead if needed. One
of True, False or 'force'. Default value is True.
conv_tol : float
Convergence threshold for AGF2 energy. Default value is 1e-7
conv_tol_rdm1 : float
Convergence threshold for first-order reduced density matrix.
Default value is 1e-8.
conv_tol_nelec : float
Convergence threshold for the number of electrons. Default
value is 1e-6.
max_cycle : int
Maximum number of AGF2 iterations. Default value is 50.
max_cycle_outer : int
Maximum number of outer Fock loop iterations. Default
value is 20.
max_cycle_inner : int
Maximum number of inner Fock loop iterations. Default
value is 50.
weight_tol : float
Threshold in spectral weight of auxiliaries to be considered
zero. Default 1e-11.
diis : bool or lib.diis.DIIS
Whether to use DIIS, can also be a lib.diis.DIIS object. Default
value is True.
diis_space : int
DIIS space size. Default value is 8.
diis_min_space : int
Minimum space of DIIS. Default value is 1.
fock_diis_space : int
DIIS space size for Fock loop iterations. Default value is 6.
fock_diis_min_space :
Minimum space of DIIS. Default value is 1.
os_factor : float
Opposite-spin factor for spin-component-scaled (SCS)
calculations. Default 1.0
ss_factor : float
Same-spin factor for spin-component-scaled (SCS)
calculations. Default 1.0
damping : float
Damping factor for the self-energy. Default value is 0.0
Saved results
e_corr : float
AGF2 correlation energy
e_tot : float
Total energy (HF + correlation)
e_1b : float
One-body part of :attr:`e_tot`
e_2b : float
Two-body part of :attr:`e_tot`
e_init : float
Initial correlation energy (truncated MP2)
converged : bool
Whether convergence was successful
se : tuple of SelfEnergy
Auxiliaries of the self-energy for each spin
gf : tuple of GreensFunction
Auxiliaries of the Green's function for each spin
'''
def __init__(self, mf, frozen=None, mo_energy=None, mo_coeff=None, mo_occ=None):
uagf2.UAGF2.__init__(self, mf, frozen=frozen, mo_energy=mo_energy,
mo_coeff=mo_coeff, mo_occ=mo_occ)
if getattr(mf, 'with_df', None) is not None:
self.with_df = mf.with_df
else:
self.with_df = df.DF(mf.mol)
self.with_df.auxbasis = df.make_auxbasis(mf.mol, mp2fit=True)
self.allow_lowmem_build = True
self._keys.update(['_with_df', 'allow_lowmem_build'])
build_se_part = build_se_part
get_jk = dfragf2.get_jk
def ao2mo(self, mo_coeff=None):
''' Get the density-fitted electronic repulsion integrals in
MO basis.
'''
eri = _make_mo_eris_incore(self, mo_coeff)
return eri
def reset(self, mol=None):
self.with_df.reset(mol)
return uagf2.UAGF2.reset(self, mol)
@property
def with_df(self):
return self._with_df
@with_df.setter
def with_df(self, val):
self._with_df = val
self._with_df.__class__ = dfragf2.DF
class _ChemistsERIs(uagf2._ChemistsERIs):
''' (pq|rs) as (pq|J)(J|rs)
MO tensors are stored in tril from, we only need QMO tensors
in low-symmetry
'''
pass
def _make_mo_eris_incore(agf2, mo_coeff=None):
''' Returns _ChemistsERIs
'''
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(agf2.stdout, agf2.verbose)
eris = _ChemistsERIs()
eris._common_init_(agf2, mo_coeff)
with_df = agf2.with_df
moa, mob = eris.mo_coeff
nmoa, nmob = moa.shape[1], mob.shape[1]
npaira, npairb = nmoa*(nmoa+1)//2, nmob*(nmob+1)//2
naux = with_df.get_naoaux()
qxy_a = np.zeros((naux, npaira))
qxy_b = np.zeros((naux, npairb))
moa = np.asarray(moa, order='F')
mob = np.asarray(mob, order='F')
sija = (0, nmoa, 0, nmoa)
sijb = (0, nmob, 0, nmob)
sym = dict(aosym='s2', mosym='s2')
for p0, p1 in with_df.prange():
eri0 = with_df._cderi[p0:p1]
qxy_a[p0:p1] = ao2mo._ao2mo.nr_e2(eri0, moa, sija, out=qxy_a[p0:p1], **sym)
qxy_b[p0:p1] = ao2mo._ao2mo.nr_e2(eri0, mob, sijb, out=qxy_b[p0:p1], **sym)
mpi_helper.barrier()
mpi_helper.allreduce_safe_inplace(qxy_a)
mpi_helper.allreduce_safe_inplace(qxy_b)
eris.eri_a = qxy_a
eris.eri_b = qxy_b
eris.eri_aa = (eris.eri_a, eris.eri_a)
eris.eri_ab = (eris.eri_a, eris.eri_b)
eris.eri_ba = (eris.eri_b, eris.eri_a)
eris.eri_bb = (eris.eri_b, eris.eri_b)
eris.eri = (eris.eri_a, eris.eri_b)
log.timer('MO integral transformation', *cput0)
return eris
def _make_qmo_eris_incore(agf2, eri, coeffs_a, coeffs_b):
''' Returns nested tuple of ndarray
'''
cput0 = (logger.process_clock(), logger.perf_counter())
log = logger.Logger(agf2.stdout, agf2.verbose)
cxa, cxb = np.eye(agf2.nmo[0]), np.eye(agf2.nmo[1])
if not (agf2.frozen is None or agf2.frozen == 0):
mask = uagf2.get_frozen_mask(agf2)
cxa = cxa[:,mask[0]]
cxb = cxb[:,mask[1]]
nmoa, nmob = agf2.nmo
npaira, npairb = nmoa*(nmoa+1)//2, nmob*(nmob+1)//2
with_df = agf2.with_df
naux = with_df.get_naoaux()
cia, cja, caa = coeffs_a
cib, cjb, cab = coeffs_b
xisym_a, nxi_a, cxi_a, sxi_a = ao2mo.incore._conc_mos(cxa, cia, compact=False)
jasym_a, nja_a, cja_a, sja_a = ao2mo.incore._conc_mos(cja, caa, compact=False)
xisym_b, nxi_b, cxi_b, sxi_b = ao2mo.incore._conc_mos(cxb, cib, compact=False)
jasym_b, nja_b, cja_b, sja_b = ao2mo.incore._conc_mos(cjb, cab, compact=False)
sym = dict(aosym='s2', mosym='s1')
qxi_a = np.zeros((naux, nxi_a))
qxi_b = np.zeros((naux, nxi_b))
qja_a = np.zeros((naux, nja_a))
qja_b = np.zeros((naux, nja_b))
buf = (np.zeros((with_df.blockdim, npaira)), np.zeros((with_df.blockdim, npairb)))
for p0, p1 in mpi_helper.prange(0, naux, with_df.blockdim):
naux0 = p1 - p0
bufa0 = buf[0][:naux0]
bufb0 = buf[1][:naux0]
bufa0[:] = eri.eri[0][p0:p1]
bufb0[:] = eri.eri[1][p0:p1]
qxi_a[p0:p1] = ao2mo._ao2mo.nr_e2(bufa0, cxi_a, sxi_a, out=qxi_a[p0:p1], **sym)
qxi_b[p0:p1] = ao2mo._ao2mo.nr_e2(bufb0, cxi_b, sxi_b, out=qxi_b[p0:p1], **sym)
qja_a[p0:p1] = ao2mo._ao2mo.nr_e2(bufa0, cja_a, sja_a, out=qja_a[p0:p1], **sym)
qja_b[p0:p1] = ao2mo._ao2mo.nr_e2(bufb0, cja_b, sja_b, out=qja_b[p0:p1], **sym)
mpi_helper.barrier()
mpi_helper.allreduce_safe_inplace(qxi_a)
mpi_helper.allreduce_safe_inplace(qxi_b)
mpi_helper.allreduce_safe_inplace(qja_a)
mpi_helper.allreduce_safe_inplace(qja_b)
qxi_a = qxi_a.reshape(naux, -1)
qxi_b = qxi_b.reshape(naux, -1)
qja_a = qja_a.reshape(naux, -1)
qja_b = qja_b.reshape(naux, -1)
log.timer('QMO integral transformation', *cput0)
return ((qxi_a, qja_a), (qxi_b, qja_b))
if __name__ == '__main__':
from pyscf import gto, scf, mp
import pyscf.scf.stability
mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='cc-pvdz', charge=-1, spin=1, verbose=3)
uhf = scf.UHF(mol).density_fit()
uhf.conv_tol = 1e-11
uhf.run()
for niter in range(1, 11):
stability = scf.stability.uhf_stability(uhf)
if isinstance(stability, tuple):
sint, sext = stability
else:
sint = stability
if np.allclose(sint, uhf.mo_coeff):
break
else:
rdm1 = uhf.make_rdm1(sint, uhf.mo_occ)
uhf.scf(dm0=rdm1)
uagf2 = DFUAGF2(uhf)
uagf2.run()
uagf2.ipagf2(nroots=5)
uagf2.eaagf2(nroots=5)
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""
Module for the workflow parameter type
"""
from .identifier import IdentifierParamType
class WorkflowParamType(IdentifierParamType):
"""
The ParamType for identifying WorkflowNode entities or its subclasses
"""
name = 'WorkflowNode'
@property
def orm_class_loader(self):
"""
Return the orm entity loader class, which should be a subclass of OrmEntityLoader. This class is supposed
to be used to load the entity for a given identifier
:return: the orm entity loader class for this ParamType
"""
from aiida.orm.utils.loaders import WorkflowEntityLoader
return WorkflowEntityLoader
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: utils
:platform: Unix
:synopsis: Utilities for plugin management
.. moduleauthor:: Mark Basham <scientificsoftware@diamond.ac.uk>
"""
import os
import re
import sys
import ast
import logging
import savu
import importlib
import inspect
import itertools
from collections import OrderedDict
import numpy as np
from savu.plugins.loaders.utils.my_safe_constructor import MySafeConstructor
# can I remove these from here?
load_tools = {}
plugins = {}
plugins_path = {}
dawn_plugins = {}
count = 0
OUTPUT_TYPE_DATA_ONLY = 0
OUTPUT_TYPE_METADATA_ONLY = 1
OUTPUT_TYPE_METADATA_AND_DATA = 2
def register_plugin(clazz):
"""decorator to add plugins to a central register"""
plugins[clazz.__name__] = clazz
if clazz.__module__.split(".")[0] != "savu":
plugins_path[clazz.__name__] = clazz.__module__
return clazz
def dawn_compatible(plugin_output_type=OUTPUT_TYPE_METADATA_AND_DATA):
def _dawn_compatible(clazz):
"""
decorator to add dawn compatible plugins and details to a central
register
"""
dawn_plugins[clazz.__name__] = {}
try:
plugin_path = sys.modules[clazz.__module__].__file__
# looks out for .pyc files
dawn_plugins[clazz.__name__]['path2plugin'] = plugin_path.split('.py')[0] + '.py'
dawn_plugins[clazz.__name__]['plugin_output_type'] = _plugin_output_type
except Exception as e:
print(e)
return clazz
# for backwards compatibility, if decorator is invoked without brackets...
if inspect.isclass(plugin_output_type):
_plugin_output_type = OUTPUT_TYPE_METADATA_AND_DATA
return _dawn_compatible(plugin_output_type)
else:
_plugin_output_type = plugin_output_type
return _dawn_compatible
def get_plugin(plugin_name, params, exp, check=False):
"""Get an instance of the plugin class and populate default parameters.
:param plugin_name: Name of the plugin to import
:type plugin_name: str.
:returns: An instance of the class described by the named plugin.
"""
logging.debug("Importing the module %s", plugin_name)
instance = load_class(plugin_name)()
instance.initialise(params, exp, check=check)
return instance
def _get_cls_name(name):
return "".join(x.capitalize() for x in name.split(".")[-1].split("_"))
def load_class(name, cls_name=None):
"""Returns an instance of the class associated with the module name.
:param name: Module name or path to a module file
:returns: An instance of the class associated with module.
"""
path = name if os.path.dirname(name) else None
name = os.path.basename(os.path.splitext(name)[0]) if path else name
cls_name = _get_cls_name(name) if not cls_name else cls_name
if cls_name in plugins.keys():
return plugins[cls_name]
if path:
mod = importlib.machinery.SourceFileLoader(name, path).load_module()
else:
mod = importlib.import_module(name)
return getattr(mod, cls_name)
def plugin_loader(exp, plugin_dict, check=False):
logging.debug("Running plugin loader")
try:
plugin = get_plugin(plugin_dict['id'],
plugin_dict['data'],
exp,
check=check)
except Exception as e:
logging.error("failed to load the plugin")
logging.error(e)
# re-raise the original error
raise
if check:
exp.meta_data.plugin_list._set_datasets_list(plugin)
logging.debug("finished plugin loader")
return plugin
def get_tools_class(plugin_tools_id, cls=None):
"""Load the plugin tools class
:param plugin_tools_id: plugin tools module name
:param cls: Class to initialise
:return:
"""
if plugin_tools_id == "savu.plugins.plugin_tools":
plugin_tools_id = "savu.plugins.base_tools"
if cls:
return load_class(plugin_tools_id)(cls)
else:
return load_class(plugin_tools_id)
def get_plugins_paths(examples=True):
"""
This gets the plugin paths, but also adds any that are not on the
pythonpath to it.
"""
plugins_paths = OrderedDict()
# Add the savu plugins paths first so it is overridden by user folders
savu_plugins_path = os.path.join(savu.__path__[0], 'plugins')
savu_plugins_subpaths = [d for d in next(os.walk(savu_plugins_path))[1] \
if d != "__pycache__"]
for path in savu_plugins_subpaths:
plugins_paths[os.path.join(savu_plugins_path, path)] = \
''.join(['savu.plugins.', path, '.'])
# get user, environment and example plugin paths
user_path = [os.path.join(os.path.expanduser("~"), "savu_plugins")]
env_paths = os.getenv("SAVU_PLUGINS_PATH", "").replace(" ", "").split(":")
templates = "../examples/plugin_examples/plugin_templates"
eg_path = [os.path.join(savu.__path__[0], templates)] if examples else []
for ppath in env_paths + user_path + eg_path:
if os.path.exists(ppath):
plugins_paths[ppath] = os.path.basename(ppath) + "."
if ppath not in sys.path:
sys.path.append(os.path.dirname(ppath))
return plugins_paths
def is_template_param(param):
"""Identifies if the parameter should be included in an input template
and returns the default value of the parameter if it exists.
"""
start = 0
ptype = "local"
if isinstance(param, str):
param = param.strip()
if not param.split("global")[0]:
ptype = "global"
start = 6
first, last = param[start], param[-1]
if first == "<" and last == ">":
param = param[start + 1 : -1]
param = None if not param else param
try:
param = eval(param)
except:
pass
return [ptype, param]
return False
def blockPrint():
""" Disable printing to stdout """
import tempfile
fname = tempfile.mkdtemp() + "/unwanted_prints.txt"
sys.stdout = open(fname, "w")
def enablePrint():
""" Enable printing to stdout """
sys.stdout = sys.__stdout__
def parse_config_string(string):
regex = r"[\[\]\, ]+"
split_vals = [_f for _f in re.split(regex, string) if _f]
delimitors = re.findall(regex, string)
split_vals = [repr(a.strip()) for a in split_vals]
zipped = itertools.zip_longest(delimitors, split_vals)
string = "".join([i for l in zipped for i in l if i is not None])
try:
return ast.literal_eval(string)
except ValueError:
return ast.literal_eval(parse_array_index_as_string(string))
def parse_array_index_as_string(string):
p = re.compile(r"'\['")
for m in p.finditer(string):
offset = m.start() - count + 3
end = string[offset:].index("']") + offset
string = string[:end] + "]'" + string[end + 2 :]
string = string.replace("'['", "[")
return string
def param_to_str(param_name, keys):
"""Check the parameter is within the provided list and
return the string name.
"""
if param_name.isdigit():
param_name = int(param_name)
if param_name <= len(keys):
param_name = keys[param_name - 1]
else:
raise ValueError(
"This parameter number is not valid for this plugin"
)
elif param_name not in keys:
raise Exception("This parameter is not present in this plug in.")
return param_name
def set_order_by_visibility(parameters, level=False):
"""Return an ordered list of parameters depending on the
visibility level
:param parameters: The dictionary of parameters
:param level: The visibility level
:return: An ordered list of parameters
"""
data_keys = []
basic_keys = []
interm_keys = []
adv_keys = []
for k, v in parameters.items():
if v["display"] == "on":
if v["visibility"] == "datasets":
data_keys.append(k)
if v["visibility"] == "basic":
basic_keys.append(k)
if v["visibility"] == "intermediate":
interm_keys.append(k)
if v["visibility"] == "advanced":
adv_keys.append(k)
if level:
if level == "datasets":
keys = data_keys
elif level == "basic":
keys = basic_keys
elif level == "intermediate":
keys = basic_keys + interm_keys + data_keys
elif level == "advanced":
keys = basic_keys + interm_keys + adv_keys + data_keys
else:
keys = basic_keys + interm_keys + adv_keys + data_keys
else:
keys = basic_keys + interm_keys + adv_keys + data_keys
return keys
def convert_multi_params(param_name, value):
"""Check if value is a multi parameter and check if each item is valid.
Change from the input multi parameter string to a list
:param param_name: Name of the parameter
:param value: Parameter value
:return: List or unchanged value
"""
error_str = ""
multi_parameters = (
isinstance(value, str) and (";" in value) and param_name != "preview"
)
if multi_parameters:
value = value.split(";")
isdict = re.findall(r"[\{\}]+", value[0])
if ":" in value[0] and not isdict:
seq = value[0].split(":")
try:
seq = [ast.literal_eval(s) for s in seq]
if len(value) == 0:
error_str = (
f"No values for tuned parameter "
f"'{param_name}' ensure start:stop:step; values "
f"are valid"
)
elif len(seq) == 2:
value = list(np.arange(seq[0], seq[1]))
elif len(seq) > 2:
value = list(np.arange(seq[0], seq[1], seq[2]))
else:
error_str = "Ensure start:stop:step; values are valid."
if not value:
# Don't allow an empty list
raise ValueError
except:
error_str = "Ensure start:stop:step; values are valid."
val_list = (
parse_config_string(value) if isinstance(value, str) else value
)
# Remove blank list entries
# Change type to int, float or str
val_list = [_dumps(val) for val in value if val]
value = val_list
return value, error_str
def _dumps(val):
"""Replace any missing quotes around variables
Change the string to an integer, float, tuple, list, str, dict
"""
import yaml
# Prevent conversion from on/off to boolean
yaml.SafeLoader.add_constructor(
"tag:yaml.org,2002:bool", MySafeConstructor.add_bool
)
if isinstance(val, str):
try:
# Safely evaluate an expression node or a string containing
# a Python literal or container display
value = ast.literal_eval(val)
return value
except Exception:
pass
try:
isdict = re.findall(r"[\{\}]+", val)
val = _sexagesimal_check(val, isdict, remove=False)
value = yaml.safe_load(val)
return _sexagesimal_check(value, isdict)
except Exception:
val = _sexagesimal_check(val, isdict)
pass
try:
isdict = re.findall(r"[\{\}]+", val)
# Matches { } between one and unlimited number of times
if isdict:
if isinstance(val, dict):
value_dict = {}
for k, v in val.items():
v = v.replace("[", "'[").replace("]", "]'")
value_dict[k] = _dumps(
yaml.safe_load(v)
)
return value_dict
else:
value = val.replace("[", "'[").replace("]", "]'")
return _dumps(yaml.safe_load(value))
else:
value = parse_config_string(val)
return value
except Exception:
if len(val.split(";")) > 1:
value = val
return value
else:
raise Exception("Invalid string %s" % val)
else:
value = val
return value
def _sexagesimal_check(val, isdict, remove=True):
"""To avoid sexagesimal values being evaluated, replace colon
values temporarily
:param val:
:param isdict: True if braces {} found
:return: value
"""
if isinstance(val, str) and not isdict:
if remove:
val = val.replace(":?", ":")
else:
val = val.replace(":", ":?")
return val
def check_valid_dimension(dim, prev_list):
"""Check the dimension is within the correct range"""
if not 0 < dim < 21:
raise Exception("Please use a dimension between 1 and 20.")
if prev_list and (dim > len(prev_list)):
raise Exception(
"You have not specified enough dimensions "
"inside the preview parameter."
)
return True
def is_slice_notation(value):
"""Return True if the value is made up of multiple"""
return isinstance(value, str) and (":" in value)
def create_dir(file_path):
"""Check if directories provided exist at this file path. If they don't
create the directories.
"""
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
def indent_multi_line_str(text, indent_level=1, justify=False):
text = text.split("\n")
# Remove additional spacing on the left side so that text aligns
if justify is False:
text = [(" " * 4 * indent_level) + line for line in text]
else:
text = [(" " * 4 * indent_level) + line.lstrip() for line in text]
text = "\n".join(text)
return text
def indent(text, indent_level=1):
text = (" " * 4 * indent_level) + text
return text
def sort_alphanum(_list):
"""Sort list numerically and alphabetically
*While maintaining original list value types*
:param _list: Input list to be sorted
:return: List sorted by number and letter alphabetically
"""
return sorted(_list, key=_alphanum)
def _str_to_int(_str):
"""Convert the input str to an int if possible
:param _str: input string
:return: integer if text is a digit, else string
"""
return int(_str) if _str.isdigit() else _str
def _alphanum(_str):
"""Split string into numbers and letters
:param _str:
:return: list of numbers and letters
"""
char_list = re.split("([0-9]+)", _str)
return [_str_to_int(c) for c in char_list]
|
from Kaspa.modules.abstract_modules.abstractBriefingSubmodule import AbstractBriefingSubmodule
class NewsModuleEn(AbstractBriefingSubmodule):
module_name = "News"
language = "en"
key_regexes = dict()
def __init__(self):
self.key_regexes = {'(?i).*?(?=news)+.': self.action}
def action(self, query):
communicator = query.get_communicator()
communicator.say(
"These are the New York Times headlines: \n" + self.main_module.read_rss(
"http://rss.nytimes.com/services/xml/rss/nyt/World.xml"))
def briefing_action(self, query):
self.action(query) |
import requests
from lxml import html
import sys
import lxml.etree
import lxml._elementpath
def ratings(handle, result):
str = ""
#valid username url
url = 'https://www.codechef.com/users/' + handle
page = requests.get(url)
tree = html.fromstring(page.content)
data = tree.xpath("//div/section/div/div/div/a/text()")
if len(data) == 0:
result = result + "Handle: " + handle + " is invalid."
print("Wrong Handle: user_ratings_py")
sys.exit()
else:
for i in range(0, len(data[0])-2):
str += data[0][i]
return str
|
import decouple
class EnvConfig:
def __init__(self):
self.aws_access_key_id = decouple.config("AWS_ACCESS_KEY_ID")
self.aws_secret_access_key = decouple.config("AWS_SECRET_ACCESS_KEY")
env_config = EnvConfig()
|
from flask import Flask
from flask_testing import TestCase
class TestFoo(TestCase):
def create_app(self):
return Flask(__name__)
def test_add(self):
self.assertEqual(1 + 2, 3)
|
import shutil
import os
import global_vars as var
print('hi') |
'''
This module is aimed at reading JSON data files, either locally or from a remote
host. The data files are not exactly JSON, they're files in which each line is a
JSON object, thus making up a row of data, and in which each key of the JSON
strings refers to a column.
'''
import os
import gzip
import logging
import paramiko
import pandas as pd
import numpy as np
LOGGER = logging.getLogger(__name__)
def yield_tweets_access(tweets_files_paths, tweets_res=None):
'''
Yields what we call an access to a tweets' DataFrame, which can either be
the DataFrame directly if a list `tweets_res` of them is supplied, or the
arguments of `read_json_wrapper`. The functions applied in a loop started
from this generator then must have as an argument a "get_df" function to
finally get a DataFrame (see more detail in comments below).
Unfortunately we can't make this "get_df" function part of the yield here,
as the function needs to be pickable (so declared globally) for later use in
a multiprocessing context.
'''
if tweets_res is None:
# Here get_df = lambda x: read_json_wrapper(*x).
for file_path in tweets_files_paths:
for chunk_start, chunk_size in chunkify(file_path, size=1e9):
yield (file_path, chunk_start, chunk_size)
else:
# In this case get_df = lambda x: x is to be used
for tweets_df in tweets_res:
yield tweets_df
def filter_df(raw_tweets_df, cols=None, dfs_to_join=None):
'''
Filters `raw_tweets_df` via inner joins with a list of dataframes
`dfs_to_join`, each of which must have their index corresponding to a column
of `raw_tweets_df`. Can also choose to keep only some columns, with the list
`cols`.
'''
if dfs_to_join is None:
dfs_to_join = []
filtered_tweets_df = raw_tweets_df.copy()
if cols is None:
cols = filtered_tweets_df.columns.values
for df in dfs_to_join:
filtered_tweets_df = filtered_tweets_df.join(df, on=df.index.name,
how='inner')
new_nr_tweets = filtered_tweets_df.shape[0]
LOGGER.info(f'{new_nr_tweets} tweets remaining after filters.')
filtered_tweets_df = filtered_tweets_df.loc[:, cols]
return filtered_tweets_df
def read_data(tweets_file_path, chunk_start, chunk_size, dfs_to_join=None,
cols=None, ssh_domain=None, ssh_username=None):
'''
Reads the JSON file at `tweets_file_path` starting at the byte `chunk_start`
and reading `chunk_size` bytes, and dumps it into a DataFrame. Then the
tweets DataFrame is filtered via inner joins with a list of dataframes
`dfs_to_join`.
'''
if dfs_to_join is None:
dfs_to_join = []
raw_tweets_df = read_json_wrapper(
tweets_file_path, chunk_start, chunk_size, ssh_domain=ssh_domain,
ssh_username=ssh_username)
return filter_df(raw_tweets_df, cols=cols, dfs_to_join=dfs_to_join)
def yield_sftp(file_path, ssh_domain, ssh_username):
'''
Yields a SFTP file handler of the file located in 'file_path' on the server
with domain 'ssh_domain', to which you connect with your user name
'ssh_username'. It is assumed you have done ssh-copy-id on the remote host
(so `load_system_host_keys` actually loads something).
'''
with paramiko.client.SSHClient() as ssh_client:
ssh_client.load_system_host_keys()
ssh_client.connect(ssh_domain, username=ssh_username)
sftp_client = ssh_client.open_sftp()
with sftp_client.file(file_path, mode='r') as f:
yield f
# Better to separate generators (functions with yield) and regular functions
# (terminating with return).
def return_json(file_path, ssh_domain=None, ssh_username=None,
compression='infer'):
'''
Returns a DataFrame from a local or remote json file. Not recommended for
large data files.
'''
is_local = os.path.exists(file_path)
if is_local:
data = pd.read_json(file_path, lines=True, compression=compression)
else:
# Equivalent to with, the generator contains only one object, the sftp
# file object, and it actually closes the file when the loop is over.
for f in yield_sftp(file_path, ssh_domain, ssh_username):
# Here data is a DataFrame, so the return can be made outside
# of the file context.
data = pd.read_json(f, lines=True, compression=compression)
return data
def yield_json(file_path, ssh_domain=None, ssh_username=None, chunk_size=1000,
compression='infer'):
'''
Yields a JsonReader from a local or remote json file, reading it it chunks.
This is more suitable to larger files than `return_json`, however it can't
be parallelized because it would involve a generator of file handles, which
can't be serialized so this can't be used with `multiprocessing`.
'''
is_local = os.path.exists(file_path)
if is_local:
data = pd.read_json(file_path, lines=True, chunksize=chunk_size,
compression=compression)
for raw_df in data:
yield raw_df
else:
# Equivalent to with, the generator contains only one object, the sftp
# file object, and it actually closes the file when the loop is over.
for f in yield_sftp(file_path, ssh_domain, ssh_username):
data = pd.read_json(f, lines=True, chunksize=chunk_size,
compression=compression)
# Here data is a JsonReader, so the yield can't be made outside
# of the file context, otherwise the file is closed and the
# data can't be accessed.
for raw_df in data:
yield raw_df
def yield_gzip(file_path, ssh_domain=None, ssh_username=None):
'''
Yields a gzip file handler from a remote or local directory.
'''
is_local = os.path.exists(file_path)
if is_local:
with gzip.open(file_path, 'rb') as unzipped_f:
yield unzipped_f
else:
for f in yield_sftp(file_path, ssh_domain, ssh_username):
with gzip.open(f, 'rb') as unzipped_f:
yield unzipped_f
def read_json_wrapper(file_path, chunk_start, chunk_size, ssh_domain=None,
ssh_username=None):
'''
Reads a DataFrame from the json file in 'file_path', starting at the byte
'chunk_start' and reading 'chunk_size' bytes.
'''
for f in yield_gzip(file_path, ssh_domain=ssh_domain,
ssh_username=ssh_username):
f.seek(chunk_start)
lines = f.read(chunk_size)
raw_tweets_df = pd.read_json(lines, lines=True)
nr_tweets = len(raw_tweets_df)
LOGGER.info(f'{chunk_size*10**-6:.4g}MB read, {nr_tweets} tweets '
'unpacked.')
return raw_tweets_df
def chunkify(file_path, size=5e8, ssh_domain=None, ssh_username=None):
'''
Generator going through a json file located in 'file_path', and yielding the
chunk start and size of (approximate byte) size 'size'. Since we want to
read lines of data, the function ensures that the end of the chunk
'chunk_end' is at the end of a line.
'''
for f in yield_gzip(file_path, ssh_domain=ssh_domain,
ssh_username=ssh_username):
chunk_end = f.tell()
while True:
chunk_start = chunk_end
# Seek 'size' bytes ahead, relatively to where we are now (second
# argument = 1)
f.seek(int(size), 1)
# Read a line at this point, that is, read until a '\n' is
# encountered:
f.readline()
# Put the end of the chunk at the end of this line:
chunk_end = f.tell()
# If the end of the file is reached, f.tell() returns
# the last byte, even if we keep seeking forward.
yield chunk_start, chunk_end-chunk_start
# Because of readline, we'll always read some bytes more than
# 'size', if it's not the case it means we've reached the end of the
# file.
if chunk_end - chunk_start < size:
break
# Might be better on larger files, but it's not on CH (compressed 2.4GB)
def test_chunkify(file_path, size=5e8, uncompressed_size=None, ssh_domain=None,
ssh_username=None):
'''
Generator going through a json file located in 'file_path', and yielding the
chunk start and size of (approximate byte) size 'size'. Since we want to
read lines of data, the function ensures that the end of the chunk
'chunk_end' is at the end of a line.
'''
for f in yield_gzip(file_path, ssh_domain=ssh_domain,
ssh_username=ssh_username):
if not uncompressed_size:
uncompressed_size = f.seek(0, 2)
for chunk_start in np.arange(0, uncompressed_size, size):
yield chunk_start, size
def test_read_json_wrapper(file_path, chunk_start, chunk_size, ssh_domain=None,
ssh_username=None):
'''
Reads a DataFrame from the json file in 'file_path', starting at the byte
'chunk_start' and reading 'chunk_size' bytes.
'''
for f in yield_gzip(file_path, ssh_domain=ssh_domain,
ssh_username=ssh_username):
f.seek(int(chunk_start))
# readlines reads at least chunk_size, so if we seek chunk_size forward,
# to get to the next line we need to readline where we are to finish the
# current line, and then we can readlines(chunk_size), and we'll be
# starting at the next tweet.
if chunk_start > 0:
f.readline()
# Problem: the generator passing us chunk_start doesn't see that we
# actually started at a higher byte than chunk_start, so if there are
# many chunks we might get duplicate lines.
lines_list = f.readlines(int(chunk_size))
lines = b''.join(lines_list)
raw_tweets_df = pd.read_json(lines, lines=True)
nr_tweets = len(raw_tweets_df)
print(f'{chunk_size*10**-6:.4g}MB read, {nr_tweets} tweets unpacked.')
return raw_tweets_df
|
"""
This script creates monoT5 input files by taking corpus,
queries and the retrieval run file for the queries and then
create files for monoT5 input. Each line in the monoT5 input
file follows the format:
f'Query: {query} Document: {document} Relevant:\n')
"""
import collections
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--queries", type=str, required=True,
help="tsv file with two columns, <query_id> and <query_text>")
parser.add_argument("--run", type=str, required=True,
help="tsv file with three columns <query_id>, <doc_id> and <rank>")
parser.add_argument("--corpus", type=str, required=True)
parser.add_argument("--t5_input", type=str, required=True,
help="path to store t5_input, txt format")
parser.add_argument("--t5_input_ids", type=str, required=True,
help="path to store the query-doc ids of t5_input, tsv format")
args = parser.parse_args()
def load_corpus(path):
print('Loading corpus...')
corpus = {}
with open(path) as f:
for line in tqdm(f):
doc_id, doc = line.rstrip().split('\t')
corpus[doc_id] = doc
return corpus
def load_queries(path):
"""Loads queries into a dict of key: query_id, value: query text."""
print('Loading queries...')
queries = {}
with open(path) as f:
for line in tqdm(f):
query_id, query = line.rstrip().split('\t')
queries[query_id] = query
return queries
def load_run(path):
"""Loads run into a dict of key: query_id, value: list of candidate doc
ids."""
# We want to preserve the order of runs so we can pair the run file with
# the TFRecord file.
print('Loading run...')
run = collections.OrderedDict()
with open(path) as f:
for line in tqdm(f):
query_id, doc_title, rank = line.split('\t')
if query_id not in run:
run[query_id] = []
run[query_id].append((doc_title, int(rank)))
# Sort candidate docs by rank.
print('Sorting candidate docs by rank...')
sorted_run = collections.OrderedDict()
for query_id, doc_titles_ranks in tqdm(run.items()):
doc_titles_ranks.sort(key=lambda x: x[1])
doc_titles = [doc_titles for doc_titles, _ in doc_titles_ranks]
sorted_run[query_id] = doc_titles
return sorted_run
corpus = load_corpus(path=args.corpus)
queries = load_queries(path=args.queries)
run = load_run(path=args.run)
print("Writing t5 input and ids")
with open(args.t5_input, 'w') as fout_t5, open(args.t5_input_ids, 'w') as fout_tsv:
for num_examples, (query_id, candidate_doc_ids) in enumerate(
tqdm(run.items(), total=len(run))):
query = queries[query_id]
for candidate_doc_id in candidate_doc_ids:
fout_t5.write(
f'Query: {query} Document: {corpus[candidate_doc_id]} Relevant:\n')
fout_tsv.write(f'{query_id}\t{candidate_doc_id}\n')
|
from .triplet_sampler import RandomIdentitySampler
|
# Copyright 2017 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Test configuration setup for djangolg."""
from __future__ import print_function
from __future__ import unicode_literals
def pytest_configure():
"""Configure pytest."""
from django.conf import settings
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.configure(
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
SITE_ID=1,
SECRET_KEY='not very secret in tests',
USE_I18N=True,
USE_L10N=True,
STATIC_URL='/static/',
ROOT_URLCONF='tests.urls',
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
],
MIDDLEWARE=MIDDLEWARE,
MIDDLEWARE_CLASSES=MIDDLEWARE,
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'djangolg',
'tests',
),
PASSWORD_HASHERS=(
'django.contrib.auth.hashers.MD5PasswordHasher',
),
)
try:
import django
django.setup()
except AttributeError:
pass
|
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, Mapping, NamedTuple, Optional
from requests import Response
from ambra_sdk.exceptions.storage import AmbraResponseException
from ambra_sdk.storage.response import check_response
from ambra_sdk.types import RequestsFileType
if TYPE_CHECKING:
from ambra_sdk.storage.storage import Storage # NOQA:WPS433
class StorageMethod(Enum):
"""Storage methods."""
get = 'GET'
post = 'POST'
delete = 'DELETE'
class PreparedRequest(NamedTuple):
"""Prepared request."""
# This some sort of private field.
# User should not have dicect access to this field
# But we can not use _name in NamedTuple attributes
storage_: 'Storage' # NOQA WPS1120
url: str
method: StorageMethod
# Mapping type is covariant is covariant type
errors_mapping: Optional[Mapping[int, AmbraResponseException]] = None
params: Optional[Dict[str, Any]] = None # NOQA:WPS110
files: Optional[Dict[str, RequestsFileType]] = None
headers: Optional[Dict[str, str]] = None
data: Optional[Any] = None # NOQA:WPS110
stream: Optional[bool] = None
def execute(self) -> Response:
"""Execute prepared request.
If sid problems we try to get new sid
and retry request.
:return: response object
"""
response: Response = self.storage_.retry_with_new_sid(
self.execute_once,
)
return response # NOQA:WPS331
def execute_once(self) -> Response:
"""Execute prepared request.
:return: response object
:raises RuntimeError: Unknown request method
"""
request_kwargs: Dict[str, Any] = {}
if self.params is not None:
request_kwargs['params'] = self.params
if self.data is not None:
request_kwargs['data'] = self.data
if self.headers is not None:
request_kwargs['headers'] = self.headers
if self.files is not None:
request_kwargs['files'] = self.files
if self.stream is not None:
request_kwargs['stream'] = self.stream
if self.method == StorageMethod.get:
response = self.storage_.get(self.url, **request_kwargs)
elif self.method == StorageMethod.post:
response = self.storage_.post(self.url, **request_kwargs)
elif self.method == StorageMethod.delete:
response = self.storage_.delete(self.url, **request_kwargs)
else:
raise RuntimeError(
'Unknown storage request method: {method}'.format(
method=self.method,
),
)
return check_response(
response,
self.url,
errors_mapping=self.errors_mapping,
)
|
from flask import request,Flask,jsonify #pylint: disable=F0401
from flask_cors import CORS
import secrets
import PikuAi as bot
from time import gmtime, strftime
import json
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
CORS(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///dbpiku.sqlite3'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class User(db.Model):
id=db.Column(db.Integer, primary_key=True)
api_key = db.Column(db.String(50), unique=True)
username = db.Column(db.String(50))
password = db.Column(db.String(80))
def verifyApi(key):
user = User.query.filter_by(api_key=key).first()
if user:
return True
return False
def userExist(username):
user_v = User.query.filter_by(username=username).first()
if user_v:
return True
return False
@app.route('/conv/<key>',methods=['GET'])
def replyBypiku(key):
if verifyApi(key):
q= str(request.args['getReply'])
res = bot.PikuAi.getBot().response(q)
return jsonify({'status':'ok',"reply":res})
return jsonify({'status':'nok',"reply":"Invalid api key! visit website to get one."})
@app.route('/')
def index():
return "server running fine"
@app.route('/info/<key>',methods=['GET'])
def userinfo(key):
user = User.query.filter_by(api_key=key).first()
if not user:
return jsonify({'status':'nok','message' : 'No user found!'})
user_data = {}
user_data['status'] = 'ok'
user_data['api_key'] = user.api_key
user_data['username'] = user.username
user_data['password'] = user.password
return jsonify(user_data)
@app.route('/reg',methods=['POST'])
def register():
uname = str(request.args['uname'])
pwd=str(request.args['password'])
if len(uname)<4 and len(pwd)<6:
return jsonify({'status':'nok','error':'909','message' : 'failed! Please check length of username is min 4 char and length of password is min 6 char.'})
api_predict=str(secrets.token_urlsafe(20))
api_filter=User.query.filter_by(api_key=api_predict).first()
if not api_filter:
if not userExist(uname):
api_final = api_predict
new_user = User(api_key=api_final, username=uname, password=pwd)
db.session.add(new_user)
db.session.commit()
return jsonify({'status':'ok','message' : 'user created!','api':api_final})
else:
return jsonify({'status':'nok','message' : 'username already exist'})
else:
api_predict = str(secrets.token_urlsafe(20))
return jsonify({'status':'nok','message' : 'failed! Please check parameters or contact support'})
@app.route('/getapi',methods=['GET'])
def getInfoByidpass():
uname = str(request.args['uname'])
pwd=str(request.args['password'])
user = User.query.filter_by(username=uname,password=pwd).first()
if not user:
return jsonify({'status':'nok','message' : 'No user found!'})
user_data = {}
user_data['status'] = 'ok'
user_data['api_key'] = user.api_key
user_data['username'] = user.username
user_data['password'] = user.password
return jsonify(user_data)
if __name__ == '__main__':
app.run(host= '0.0.0.0') |
from .settings import * # noqa
DEBUG = True
SECRET_KEY = 'dev_secret_key'
SEND_GA_EVENTS = False
SCHEME = 'http://'
HOST = '127.0.0.1'
ALLOWED_HOSTS = [HOST]
PORT = 8000
BRAINTREE_MERCHANT_ID = get_secret('bt_sandbox.merchant_id')
BRAINTREE_PUBLIC_KEY = get_secret('bt_sandbox.public_key')
BRAINTREE_PRIVATE_KEY = get_secret('bt_sandbox.private_key')
braintree.Configuration.configure(braintree.Environment.Sandbox,
merchant_id=BRAINTREE_MERCHANT_ID,
public_key=BRAINTREE_PUBLIC_KEY,
private_key=BRAINTREE_PRIVATE_KEY)
OAUTH_REDIRECT_URI = "%s%s:%s/%s" % (SCHEME, HOST, PORT, 'autorespond/oauth2callback/')
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'gchat_db.sqlite3')
}
}
DJMAIL_REAL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
RAVEN_CONFIG.pop('dsn')
|
import csv
import sys
if __name__ == "__main__":
print(sys.argv)
for i in range(len(sys.argv)):
if sys.argv[i] == "-i":
inLoc = sys.argv[i+1]
if sys.argv[i] == "-o":
outLoc = sys.argv[i+1]
csv.register_dialect("tsv", delimiter="\t")
with open(inLoc, 'r') as inFile, open(outLoc, 'w') as outFile:
reader = csv.reader(inFile, delimiter="\t")
writer = csv.writer(outFile, delimiter="\t")
for row in reader:
chr, start, stop = row[:3]
start = int(start)
stop = int(stop)
the_rest = row[3:]
while start != stop:
writer.writerow([chr, str(start), str(start + 1)] + the_rest)
start += 1
|
from django.db import models
class CompanyManager(models.Manager):
def get_for_index(self, index):
return self.get(index=index)
class Company(models.Model):
"""
A Paranuaran company.
"""
# The index of the company record in the JSON source data
index = models.PositiveIntegerField(unique=True)
# Referred to as 'company' in the JSON source data
company_name = models.CharField(unique=True, max_length=100)
objects = CompanyManager()
# A current employee isn't dead yet! ;-)
@property
def current_employees(self):
return self.employees.is_alive()
def __str__(self):
return self.company_name
class Meta:
ordering = ['company_name']
verbose_name_plural = 'Companies'
class FoodstuffQuerySet(models.QuerySet):
def fruit(self):
return self.filter(type=Foodstuff.FRUIT)
def vegetables(self):
return self.filter(type=Foodstuff.VEGETABLE)
class Foodstuff(models.Model):
"""
A kind of food - initially either a fruit or a vegetable
"""
FRUIT = 'f'
VEGETABLE = 'v'
TYPE_CHOICES = (
(FRUIT, 'Fruit'),
(VEGETABLE, 'Vegetable'),
)
name = models.CharField(unique=True, max_length=100)
type = models.CharField(max_length=1, choices=TYPE_CHOICES)
objects = FoodstuffQuerySet.as_manager()
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name_plural = 'Foodstuffs'
class Tag(models.Model):
"""
A tag which can be linked to a Person
"""
label = models.CharField(unique=True, max_length=100)
def __str__(self):
return self.label
class Meta:
ordering = ['label']
verbose_name_plural = 'Tags'
class PersonQuerySet(models.QuerySet):
def is_alive(self):
return self.filter(has_died=False)
def has_brown_eyes(self):
return self.filter(eyecolor=Person.EYE_COLOR_BROWN)
def has_friend(self, friend):
return self.filter(friends=friend)
def friend_of(self, friend):
return self.filter(friend_of=friend)
class PersonManager(models.Manager):
def get_for_index(self, index):
return self.get(index=index)
# TODO: Determine what 'friendship' actually means in this context!
# Is a friendship define just by the 'has friend' (forward) relationship,
# or also by the 'friend of' (reverse) relationship.
#
# Consider:
# Jack:
# friends: Jill, Simon
#
# Jill:
# friends: Jack, Simon
#
# Simon:
# friends: (none)
#
# Susan:
# friends: Jack
#
# There are a range of reasonable answers to the question "who are Jack's
# friends":
# 1) Just the friends Jack lists: Jill & Simon
# 2) (1) plus the people who list Jack as a friend: Jill, Simon, & Susan
# 3) Only those who also consider Jack a friend: Jill (only)
#
# For the purposes of this exercise, we'll choose the easy option - 1!
def mutual_friends_alive_with_brown_eyes(self, person, friend):
# Select people who:
# 'person' considers a friend and
# 'friend' considers a friend and
# are still alive and
# have brown eyes
return (self.friend_of(person).friend_of(friend).
is_alive().has_brown_eyes())
class Person(models.Model):
"""
A Paranuaran Person
"""
EYE_COLOR_BLUE = 'bl'
EYE_COLOR_BROWN = 'br'
EYE_COLOR_CHOICES = (
(EYE_COLOR_BLUE, 'Blue'),
(EYE_COLOR_BROWN, 'Brown'),
)
GENDER_MALE = 'm'
GENDER_FEMALE = 'f'
GENDER_CHOICES = (
(GENDER_MALE, 'Male'),
(GENDER_FEMALE, 'Female'),
)
# The _id field from the JSON source file
json_id = models.CharField(unique=True, max_length=24)
# The index of of the Person record in the JSON file
index = models.PositiveIntegerField(unique=True)
guid = models.CharField(unique=True, max_length=36)
has_died = models.BooleanField()
balance = models.DecimalField(max_digits=8, decimal_places=2)
picture = models.URLField()
age = models.PositiveIntegerField()
eyecolor = models.CharField(max_length=2, choices=EYE_COLOR_CHOICES)
name = models.CharField(max_length=100)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES)
company = models.ForeignKey(Company, null=True, blank=True,
related_name='employees')
email = models.EmailField(unique=True)
phone = models.CharField(max_length=30)
address = models.CharField(max_length=200)
about = models.TextField()
registered = models.DateTimeField()
tags = models.ManyToManyField(Tag, blank=True)
friends = models.ManyToManyField('Person', blank=True,
related_name='friend_of')
greeting = models.CharField(max_length=100)
favourite_food = models.ManyToManyField(Foodstuff)
objects = PersonManager.from_queryset(PersonQuerySet)()
@property
def favourite_fruit(self):
return self.favourite_food.fruit()
@property
def favourite_vegetables(self):
return self.favourite_food.vegetables()
def __str__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name_plural = 'People'
|
# add a new test file
# test other thing
|
import math
import random
import pickle
import sys
from ..math.matrix import Matrix
from ..math.text2matrix import Text2Matrix
from ..nlp.segmenter import Segmenter
from ..common.global_info import GlobalInfo
from ..common.configuration import Configuration
class Kmeans:
"""
kmeans is a single-pass work, so don't have store model
"""
def __init__(self):
#self.curNode = config.GetChild(nodeName)
self.__means = None
self.__output = None
self.__meansNorm2 = None
"""
input x, and output a vector of every document belongs
program runs pesudo-code:
random-means-vector
while (not converage):
calculate for means
check converage
output result
@k for k-output-cluster
@return a vector y, len(y) = len(x)
"""
def Cluster(self, x, k):
"""
first, using twc-naive_bayes's tech
assign x's value
"""
for r in range(len(x.rows) - 1):
sampleSum = 0.0
for c in range(x.rows[r], x.rows[r + 1]):
termId = x.cols[c]
x.vals[c] = math.log(x.vals[c] + 1)
x.vals[c] = x.vals[c] * GlobalInfo.idToIdf[termId]
sampleSum += x.vals[c] * x.vals[c]
#normalize it
sampleSum = math.sqrt(sampleSum)
for c in range(x.rows[r], x.rows[r + 1]):
x.vals[c] = float(x.vals[c]) / sampleSum
"""
second, runs kmeans clustering
"""
#random-means-vector
self.__InitMeans(x, k)
#iterate
converged = False
while (not converged):
converged = self.__CalculateMeans(x, k)
#output
return self.__output
def __InitMeans(self, x, k):
self.__means = [[0 for i in range(x.nCol)] for j in range(k)]
self.__output = [0 for i in range(x.nRow)]
self.__meansNorm2 = [0 for i in range(k)]
for i in xrange(k):
docId = random.randint(0, x.nRow - 1)
for c in range(x.rows[docId], x.rows[docId + 1]):
self.__means[i][x.cols[c]] = x.vals[c]
self.__meansNorm2[i] += self.__means[i][x.cols[c]]**2
def __CalculateMeans(self, x, k):
meansSum = [[0 for i in range(x.nCol)] for j in range(k)]
meansCount = [0 for i in range(k)]
changed = 0
#assign samples to means
for r in range(len(x.rows) - 1):
belongs = -1
minCost = float(sys.maxint)
#debug
#print "new doc"
for kk in range(k):
cost = self.__CalculateCost(kk, x, r)
if (cost < minCost):
minCost = cost
belongs = kk
if self.__output[r] <> belongs:
changed += 1
self.__output[r] = belongs
for c in range(x.rows[r], x.rows[r + 1]):
meansSum[belongs][x.cols[c]] += x.vals[c]
meansCount[belongs] += 1
print "meansCount:", meansCount
#calculate new means point
for i in xrange(k):
self.__meansNorm2[i] = 0
for j in xrange(x.nCol):
self.__means[i][j] = meansSum[i][j] / meansCount[i]
self.__meansNorm2[i] += self.__means[i][j]**2
if float(changed) / x.nRow <= 0.01:
return True
else:
return False
"""
using Euclidean distance and a simple trick:
when calculate dist of dense vector(means vector) and
sparse vector(sample vector):
dist(dV, sV) = sqrt(dV[0]^2 + dV[1]^2..dV[n]^2) +
(dV[k0] - sV[0])^2 + (dV[k1] - sV[1])^2 ... -
(dV[k0]^2 + dV[k1]^2 ... dV[km]^2))
"""
def __CalculateCost(self, kk, x, r):
cost = self.__meansNorm2[kk]
#print "meansNorm:", cost
for c in range(x.rows[r], x.rows[r + 1]):
termId = x.cols[c]
#debug
#print x.vals[c], " ", self.__means[kk][termId]
cost += (x.vals[c] - self.__means[kk][termId]) * (x.vals[c] - self.__means[kk][termId]) - self.__means[kk][termId] * self.__means[kk][termId]
#print "cost:",cost
return math.sqrt(cost + 1e-8)
|
import base64
import simplejson as json
import os
import pprint
from io import BytesIO
from IPython.core.display import display, HTML
from matplotlib import pyplot as plt
from optimus.infer import is_str
def output_image(fig, path):
"""
Output a png file
:param fig:
:param path: Matplotlib figure
:return: Base64 encode image
"""
fig.savefig(path, format='png')
plt.close()
def output_base64(fig):
"""
Output a matplotlib as base64 encode
:param fig: Matplotlib figure
:return: Base64 encode image
"""
fig_file = BytesIO()
plt.savefig(fig_file, format='png')
# rewind to beginning of file
fig_file.seek(0)
fig_png = base64.b64encode(fig_file.getvalue())
plt.close(fig)
return fig_png.decode('utf8')
def print_html(html):
"""
Display() helper to print html code
:param html: html code to be printed
:return:
"""
if "DATABRICKS_RUNTIME_VERSION" in os.environ:
displayHTML(result)
else:
display(HTML(html))
def print_json(value):
"""
Print a human readable json
:param value: json to be printed
:return: json
"""
pp = pprint.PrettyPrinter(indent=2)
if is_str(value):
value = value.replace("'", "\"")
value = json.loads(value)
pp.pprint(value)
|
"""
dmim_analysis/analysis.py
Dylan H. Ross
2019/10/30
description:
tools for analyzing data
"""
from numpy import array
from dmim_analysis.util import remove_counter_ions, get_and_fit_chrom
from dmim_analysis.scraping.pubchem import get_pchem_monoiso, get_pchem_smi
from dmim_analysis.scraping.cactus import get_cactus_img
from dmim_analysis.data import Compound, Adduct, Metabolite
from dhrmasslynxapi.reader import MassLynxReader
from biotrans.pybiotransformer.wrapper import PBTWrapper
def setup_compound(cmpd_id, name):
"""
setup_compound
description:
Gathers some metadata on a compound including compound name with counterions removed, monoisotopic mass and
SMILES structure (from PubChem), and png image of the compound structure (from cactus), storing all of it in
a Compound object and returning it. Returns None if monoisotopic mass or SMILES structure fail to be retrieved
since both are needed for later steps in the data processing.
parameters:
cmpd_id (int) -- integer identifier for the compound
name (str) -- compound name
returns:
(dmim_analysis.data.Compound or None) -- initial Compound instance (or None if anything goes wrong)
"""
# remove unwanted counter ions from name
name = remove_counter_ions(name)
# get a monoisotopic mass from PubChem
monoiso = get_pchem_monoiso(name)
if not monoiso:
# need monoisotopic mass to proceed ...
return None
# get a SMILES structure from PubChem
smi = get_pchem_smi(name)
if not smi:
# need to get a SMILES structure to proceed ...
return None
# get an image of the compound's chemical structure
png = get_cactus_img(smi)
# actually set up the Compound object
return Compound(cmpd_id, name, monoiso, smi, png=png)
def find_adducts(raw_file, neutral_monoiso, tolerance, esi_mode, ccs_calibration, lc_func, im_func,
is_metab=False, ctrl_raw_file=None, filter_by_dt=True):
"""
find_adducts
description:
Try extracting data for masses corresponding to multiple possible MS adducts ([M]+, [M+Na]+, ...)
parameters:
raw_file (str) -- file name for raw data file
neutral_monoiso (float) -- monoisotopic mass of neutral species
tolerance (float) -- tolerance to use when filtering on mass
esi_mode (str) -- ionization mode, may be 'pos' or 'neg'
ccs_calibration (dhrmasslynxapi.ccs_calibration.CCSCalibrationRaw) -- CCS calibration instance
lc_func (int) -- function number for LC data
im_func (int) -- function number for IM data
[is_metab (bool)] -- flag for specifying looking for metabolite adducts instead of parent compound adducts,
in which case the control data file is also checked for cofactor dependence
[optional, default=False]
[ctrl_raw_file (str)] -- if is_metab is set to True, file name of the control raw file [optional, default=None]
[filter_by_dt (bool)] -- filter the LC chromatogram using the fitted drift time [optional, default=True]
yields:
(DMIM_data.Adduct) -- data for the MS adduct (if fit is acceptable)
"""
# define mass shifts for various MS adducts
adducts = {
'pos': {
'[M]+': 0.,
'[M+H]+': 1.0078,
'[M+Na]+': 22.9898,
'[M+K]+': 38.9637,
'[M+H-H2O]+': -17.0028
},
'neg': {
'[M-H]-': -1.0078
}
}
# initialize a MassLynxReader using the raw file
rdr = MassLynxReader(raw_file)
if is_metab:
if not ctrl_raw_file:
m = 'find_adducts: is_metab was set to True but no ctrl_raw_file was provided'
raise ValueError(m)
ctrl_rdr = MassLynxReader(ctrl_raw_file)
for adduct in adducts[esi_mode]:
# MS adduct m/z
adduct_mz = neutral_monoiso + adducts[esi_mode][adduct]
# attempt to find an observed m/z
#mz_obs = get_mz_obs(rdr, adduct_mz, tolerance)
mz_obs = adduct_mz
# attempt a fit on ATD
atd, atd_fit_params = get_and_fit_chrom(rdr, im_func, mz_obs, tolerance)
# apply rough acceptance criteria
if atd_fit_params is not None:
low_intensity = atd_fit_params[0] < 1000
peak_too_broad = atd_fit_params[2] > 0.75
peak_too_narrow = atd_fit_params[2] < 0.025
if low_intensity or peak_too_broad or peak_too_narrow:
atd_fit_params = None # do not accept the fit
# only proceed if ATD fit was successful
if atd_fit_params is not None:
# get the drift time and calibrated CCS
dt = atd_fit_params[1]
ccs = ccs_calibration.calibrated_ccs(mz_obs, dt)
# attempt to fit LC chromatogram
if filter_by_dt:
dt_tol = atd_fit_params[2]
dt_min, dt_max = dt - dt_tol, dt + dt_tol
lc, lc_fit_params = get_and_fit_chrom(rdr, lc_func, mz_obs, tolerance, dt_bounds=(dt_min, dt_max))
else:
lc, lc_fit_params = get_and_fit_chrom(rdr, lc_func, mz_obs, tolerance)
rt = lc_fit_params[1] if lc_fit_params is not None else -1
# finally, if it is a metabolite, get the no-cofactor ATD
ctrl_atd = None
if is_metab:
# noinspection PyUnboundLocalVariable
_, ctrl_dti = array(ctrl_rdr.get_chrom(im_func, mz_obs, tolerance))
yield Adduct(adduct, adduct_mz, mz_obs, tolerance, atd, atd_fit_params, lc, lc_fit_params, dt, rt, ccs,
raw_file, ctrl_d_file=ctrl_raw_file, no_cofac_dti=ctrl_dti)
else:
yield Adduct(adduct, adduct_mz, mz_obs, tolerance, atd, atd_fit_params, lc, lc_fit_params, dt, rt, ccs,
raw_file)
def predict_metabolites(cmpd):
"""
predict_metabolites
description:
predicts metabolites from the parent compound and yields them (as long as the mass difference from the parent
compound is > 1 Da)
parameters:
cmpd (dmim_analysis.data.Compound) -- parent Compound instance
yields:
(dmim_analysis.data.Metabolite) -- data structure for a predicted metabolite
"""
# initialize the Biotransformer wrapper
pbtw = PBTWrapper()
i = 0
for metab in pbtw.predict_metabolites(cmpd.SMILES, n_steps=2, unique=True):
if abs(metab.mass - cmpd.monoiso) > 1.:
i += 1
# grab an image for the metabolite structure
png = get_cactus_img(metab.InChI)
yield Metabolite(i, metab.reaction, metab.mass, metab.InChI, png=png)
def analyze_compound(cmpd_id, name, raw_file, lc_func, im_func, tolerance, esi_mode, ccs_calibration, check_ctrl=False,
ctrl_raw_file=None):
"""
analyze_compound
description:
Performs a complete analysis on a single compound:
- set up Compound instance with metadata
- find MS adducts of the parent compound
- predict metabolites of the parent compound
- look for MS adducts of predicted metabolites
- return the Compound instance
parameters:
cmpd_id (str) -- parent compound identifier
name (str) -- parent compound name
raw_file (str) -- file name for raw data file
lc_func (int) -- function number for LC data
im_func (int) -- function number for IM data
tolerance (float) -- tolerance to use when filtering on mass
esi_mode (str) -- ionization mode, may be 'pos' or 'neg'
ccs_calibration (dhrmasslynxapi.ccs_calibration.CCSCalibrationRaw) -- CCS calibration instance
[check_ctrl (bool)] -- check the control data file for metabolite signals [optional, default=False]
[ctrl_raw_file (str)] -- file name of the control raw file [optional, default=None]
returns:
(dmim_analysis.data.Compound) -- fully analyzed Compound instance
"""
# setup Compound instance
cmpd = setup_compound(cmpd_id, name)
if not cmpd:
# compound setup did not work
return None
# find MS adducts
cmpd.adducts = [_ for _ in find_adducts(raw_file, cmpd.monoiso, tolerance, esi_mode, ccs_calibration, lc_func,
im_func)]
if not cmpd.adducts:
# no MS adducts were observed for the parent compound
return None
# predict metabolites
cmpd.metabolites = [_ for _ in predict_metabolites(cmpd)]
# look for MS adducts of the predicted metabolites
for metab in cmpd.metabolites:
if check_ctrl:
metab.adducts = [_ for _ in find_adducts(raw_file, metab.monoiso, tolerance, esi_mode, ccs_calibration,
lc_func, im_func, is_metab=True, ctrl_raw_file=ctrl_raw_file)]
else:
metab.adducts = [_ for _ in find_adducts(raw_file, metab.monoiso, tolerance, esi_mode, ccs_calibration,
lc_func, im_func)]
# filter the list of metabolites down to those that were actually observed in the MS data
cmpd.metabolites = [metab for metab in cmpd.metabolites if metab.adducts]
# return the analyzed compound data
return cmpd
def analyze_compound_targeted(name_adduct, mz, raw_file, lc_func, im_func, tolerance, ccs_calibration,
filter_by_dt=True):
"""
analyze_compound_targeted
description:
Attempts to obtain a CCS value for an alr
parameters:
name_adduct (str) -- parent compound name and MS adduct
mz (float) -- m/z
raw_file (str) -- file name for raw data file
lc_func (int) -- function number for LC data
im_func (int) -- function number for IM data
tolerance (float) -- tolerance to use when filtering on mass
ccs_calibration (dhrmasslynxapi.ccs_calibration.CCSCalibrationRaw) -- CCS calibration instance
[filter_by_dt (bool)] -- filter the LC chromatogram using the fitted drift time [optional, default=True]
returns:
(dict(...) or None) -- compound data and associated metadata or None if unsuccessful
"""
# initialize a MassLynxReader using the raw file
rdr = MassLynxReader(raw_file)
# attempt to find an observed m/z
#mz_obs = get_mz_obs(rdr, mz, tolerance)
mz_obs = mz
# attempt a fit on ATD
atd, atd_fit_params = get_and_fit_chrom(rdr, im_func, mz_obs, tolerance)
# apply rough acceptance criteria
if atd_fit_params is not None:
low_intensity = atd_fit_params[0] < 1000
peak_too_broad = atd_fit_params[2] > 0.75
peak_too_narrow = atd_fit_params[2] < 0.025
if low_intensity or peak_too_broad or peak_too_narrow:
atd_fit_params = None # do not accept the fit
# only proceed if ATD fit was successful
if atd_fit_params is not None:
# get the drift time and calibrated CCS
dt = atd_fit_params[1]
ccs = ccs_calibration.calibrated_ccs(mz_obs, dt)
# attempt to fit LC chromatogram
if filter_by_dt:
dt_tol = atd_fit_params[2]
dt_min, dt_max = dt - dt_tol, dt + dt_tol
lc, lc_fit_params = get_and_fit_chrom(rdr, lc_func, mz_obs, tolerance, dt_bounds=(dt_min, dt_max))
else:
lc, lc_fit_params = get_and_fit_chrom(rdr, lc_func, mz_obs, tolerance)
rt = lc_fit_params[1] if lc_fit_params is not None else -1
# assemble the data and metadata to return
data = {
'dt': dt,
'ccs': ccs,
'rt': rt,
'meta': {
'mz_obs': mz_obs,
'atd': atd,
'atd_fit_params': atd_fit_params,
'lc': lc,
'lc_fit_params': lc_fit_params
}
}
return data
else:
return None
|
import nj
def test_various_001(data):
cur = data.find(nj.q(a=nj.in_(1, 2)))
assert {doc['_id'] for doc in cur} == {'11', '22', '14', '25'}
def test_various_002(data):
cur = data.find(nj.q(b=nj.mod_(3, 1)))
assert {doc['_id'] for doc in cur} == {'11', '14'}
def test_various_003(data):
cur = data.find(nj.q(b=nj.not_(nj.gt_(2))) & nj.q(b=nj.ne_(1)))
assert {doc['_id'] for doc in cur} == {'22'}
|
from django import template
register = template.Library()
@register.filter(name='isboolean')
def isboolean(value):
return isinstance(value, bool)
@register.filter(name='vartypename')
def vartypename(value):
return type(value).__name__
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='cronen',
version='1.0',
description='Micro cron library',
author='Barak Schiller',
author_email='bschiller@gmail.com',
packages=find_packages(),
install_requires=[
'bottle',
]
)
|
import logging
from src.utils.configurer import config
class Logger:
def __init__(self, filename: str) -> None:
logging.basicConfig(
filename=filename,
filemode='a',
format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S',
)
logging.getLogger().setLevel(logging.WARNING)
self.level_int_mapping = {
"critical": 50,
"error": 40,
"warning": 30,
"info": 20,
"debug": 10
}
def log(self, level: str, message: str) -> bool:
print(message)
logging.log(self.level_int_mapping[level.lower()], message)
return True
logger = Logger(config.get_configuration("log_filename", "SYSTEM"))
|
#######################################################
#
# RequestCOTController.py
# Python implementation of the Class RequestCOTController
# Generated by Enterprise Architect
# Created on: 26-Mar-2020 6:32:34 PM
# Original author: Giu Platania
#
#######################################################
from Model.Event import Event as Event
class RequestCOTController:
"""this controller manage all the different types of COTS, including the geochat
"""
# default constructor
def __init__(self):
a = 1
def dropPin(self):
event = Event('dropPin', 123, 123)
def sendGeoChatToAllChatRooms(self):
event = Event('GeoToAllRooms', messagetext, callsign)
def sendGeoChatToGroup(self):
event = Event('GeoToGroup', messagetext, callsign)
def sendGeoChatToTeam(self):
"""this will send the geochat to team
"""
event = Event('GeoToTeam', messagetext, callsign)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.